repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
adversarial_ntk_evolution | adversarial_ntk_evolution-master/data.py | import torch
import numpy as np
import torchvision
transform_train = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
])
def get_loader(dataset_name, train = True, batch_size = 128, shuffle = True):
if dataset_name == 'cifar10':
dataset = torchvision.datasets.CIFAR10(root='./data', train=train, download=True, transform=transform_train)
return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=4)
elif dataset_name == 'cifar100':
dataset = torchvision.datasets.CIFAR100(root='./data', train=train, download=True, transform=transform_train)
return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=4)
def get_data_and_labels(dataset_name):
if dataset_name == 'cifar10':
dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
elif dataset_name == 'cifar100':
dataset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
return torch.tensor(np.transpose(dataset.data, [0, 3, 1, 2]) / 255), torch.tensor(dataset.targets)
def get_n_classes(dataset_name):
if dataset_name == 'cifar10':
return 10
elif dataset_name == 'cifar100':
return 100 | 1,368 | 41.78125 | 117 | py |
adversarial_ntk_evolution | adversarial_ntk_evolution-master/eval_and_make_adv.py | import jax
import haiku as hk
import jax.numpy as jnp
from jax.example_libraries import optimizers
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import numpy as np
import neural_tangents as nt
import functools
import operator
import optax
import copy
import models
import pickle
from utils import bind, _add, _sub
import os
from test_functions import do_perturbation_step_l_inf, do_perturbation_step_l_2, perturb, test, loss_fn
import numpy as np
import argparse
import time
import data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default = '', help = 'where the model is saved')
parser.add_argument('--save_name', type=str, default = 'test_results', help = 'extra specifier to include when saving results')
parser.add_argument('--linear', action='store_true', help = 'whether the loaded model is in linearized dynamics')
parser.add_argument('--centering', action='store_true', help = 'whether the loaded model is in centered dynamics')
parser.add_argument('--test_path', type=str, default = '', help = 'where the test images are. Set it to empty to use standard cifar10/cifar100 images. This is used for adversarial transferability testing')
parser.add_argument('--bonus_dir', type=str, default = '.', help = 'include an extra dir the save path for more specific save locations')
parser.add_argument('--no_adv', action='store_true', help = 'whether to skip making adversarial examples')
parser.add_argument('--save_examples', action='store_true', help = 'whether to save the adversarial examples')
parser.add_argument('--short', action='store_true', help = 'dont set this. basically just for debugging')
parser.add_argument('--random_seed', type = int, default = 0, help = 'random seed')
parser.add_argument('--dataset', type=str, default = 'cifar10', help = 'cifar10/cifar100')
parser.add_argument('--model', type=str, default = 'resnet18', help = 'model')
parser.add_argument('--eps', type=float, default = 4.00, help = 'eps value for l-inf adversarial attacks. scaled by 1/255')
args = parser.parse_args()
transform_test = transforms.Compose([
transforms.ToTensor(),
])
class TensorDataset(Dataset):
def __init__(self, *tensors, transform=None):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
self.transform = transform
def __getitem__(self, index):
im, targ = tuple(tensor[index] for tensor in self.tensors)
if self.transform:
real_transform = transforms.Compose([
transforms.ToPILImage(),
self.transform
])
im = real_transform(im)
return im, targ
def __len__(self):
return self.tensors[0].size(0)
rng = jax.random.PRNGKey(args.random_seed)
net_forward_init, net_forward_apply = models.get_model(args.model, data.get_n_classes(args.dataset))
checkpoint = pickle.load(open('./{}'.format(args.model_path), 'rb'))
params = checkpoint['params']
lin_params = checkpoint['lin_params']
net_state = checkpoint['net_state']
if len(args.test_path) > 0:
test_stuff = pickle.load(open('./{}'.format(args.test_path), 'rb'))
test_data = torch.tensor(test_stuff['images']).cpu()
print(test_data.shape)
test_labels = torch.tensor(test_stuff['labels']).cpu()
print(test_labels)
test_dataset = TensorDataset(test_data, test_labels, transform=transform_test)
elif args.dataset == 'cifar10':
test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
test_labels = np.array(test_dataset.targets)
print(test_labels)
elif args.dataset == 'cifar100':
test_dataset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
test_labels = np.array(test_dataset.targets)
print(test_labels)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False, num_workers=4)
acc_clean, acc_dirty, adv_examples, predictions_clean, predictions_dirty, components_clean, components_dirty = test(params, lin_params, net_state, net_forward_apply, rng, test_loader, make_adv_examples = not args.no_adv, linear = args.linear, centering = args.centering, attack = 'linf', return_examples = True, short = args.short, return_components = True, adv_eps = args.eps)
base_path = os.path.dirname(args.model_path)
results_dict = {
'acc_clean': acc_clean,
'acc_dirty': acc_dirty,
'predictions_clean': predictions_clean,
'predictions_dirty': predictions_dirty,
'components_clean': components_clean,
'components_dirty': components_dirty,
}
print('clean: {:.2f} dirty: {:.2f}'.format(100 * acc_clean, 100 * acc_dirty))
if not os.path.isdir('./{}/{}/'.format(base_path, args.bonus_dir)):
os.mkdir('./{}/{}/'.format(base_path, args.bonus_dir))
pickle.dump(results_dict, open('./{}/{}/test_results_{}.pkl'.format(base_path, args.bonus_dir, args.save_name),'wb'))
if args. save_examples:
pickle.dump({'images': np.transpose(adv_examples, [0, 3, 1, 2]), 'labels': test_labels[:adv_examples.shape[0]]}, open('./{}/{}/adv_examples_{}.pkl'.format(base_path, args.bonus_dir, args.save_name),'wb'))
if __name__ == '__main__':
main()
| 5,638 | 43.401575 | 381 | py |
adversarial_ntk_evolution | adversarial_ntk_evolution-master/models.py | import jax
import haiku as hk
import jax.numpy as jnp
from jax.example_libraries import optimizers
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import numpy as np
import neural_tangents as nt
import functools
import operator
import optax
import copy
import models
import sys
from utils import bind, _sub, _add
import modified_resnets
@functools.partial(jax.jit, static_argnums=(3, 6, 7, 8))
def linear_forward(params, params2, state, net_fn, rng, images, is_training = False, centering = False, return_components = False):
dparams = _sub(params2, params)
f_0, df_0, state = jax.jvp(lambda param: net_fn(param, state, rng, images, is_training = is_training), (params,), (dparams,), has_aux = True)
if return_components:
if centering:
return df_0, {'state': state, 'f': f_0, 'df': df_0}
return _add(f_0, df_0), {'state': state, 'f': f_0, 'df': df_0}
if centering:
return df_0, state
return _add(f_0, df_0), state
def get_resnet(n_classes):
def _forward_resnet18(x, is_training):
#use a 3x3 kernel size with stride 1 for the first layer because we are using 32x32 images
net = modified_resnets.ResNet18(n_classes, initial_conv_config = {'kernel_shape': 3, 'stride': 1})
return net(x, is_training)
net_forward = hk.transform_with_state(_forward_resnet18)
return net_forward.init, net_forward.apply
def _forward_narrow_mlp(x, is_training):
mlp = hk.Sequential([
hk.Flatten(),
hk.Linear(256), jax.nn.relu,
hk.Linear(256), jax.nn.relu,
hk.Linear(10),
])
return mlp(x)
def _forward_wide_mlp(x, is_training):
mlp = hk.Sequential([
hk.Flatten(),
hk.Linear(2048), jax.nn.relu,
hk.Linear(2048), jax.nn.relu,
hk.Linear(10),
])
return mlp(x)
def get_narrow_mlp():
net_forward = hk.transform_with_state(_forward_narrow_mlp)
return net_forward.init, net_forward.apply
def get_wide_mlp():
net_forward = hk.transform_with_state(_forward_wide_mlp)
return net_forward.init, net_forward.apply
def get_model(model_name, n_classes):
if model_name == 'resnet18':
return get_resnet(n_classes)
elif model_name == 'mlp_skinny':
return get_narrow_mlp()
elif model_name == 'mlp_wide':
return get_wide_mlp()
else:
print("Invalid model: {}".format(model_name))
sys.exit() | 2,491 | 29.024096 | 145 | py |
adversarial_ntk_evolution | adversarial_ntk_evolution-master/modified_resnets.py | #Copied from Deepmind Haiku library
"""Resnet."""
import types
from typing import Mapping, Optional, Sequence, Union, Any
from haiku._src import basic
from haiku._src import batch_norm
from haiku._src import conv
from haiku._src import module
from haiku._src import pool
import jax
import jax.numpy as jnp
# If forking replace this block with `import haiku as hk`.
hk = types.ModuleType("haiku")
hk.Module = module.Module
hk.BatchNorm = batch_norm.BatchNorm
hk.Conv2D = conv.Conv2D
hk.Linear = basic.Linear
hk.max_pool = pool.max_pool
del basic, batch_norm, conv, module, pool
FloatStrOrBool = Union[str, float, bool]
class BlockV1(hk.Module):
"""ResNet V1 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bn_config: Mapping[str, FloatStrOrBool],
bottleneck: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
bn_config = dict(bn_config)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
bn_config.setdefault("decay_rate", 0.999)
if self.use_projection:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding="SAME",
name="shortcut_conv",
)
self.proj_batchnorm = hk.BatchNorm(name="shortcut_batchnorm", **bn_config)
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1 if bottleneck else stride,
with_bias=False,
padding="SAME",
name="conv_0",
)
bn_0 = hk.BatchNorm(name="batchnorm_0", **bn_config)
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride if bottleneck else 1,
with_bias=False,
padding="SAME",
name="conv_1",
)
bn_1 = hk.BatchNorm(name="batchnorm_1", **bn_config)
layers = ((conv_0, bn_0), (conv_1, bn_1))
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding="SAME",
name="conv_2",
)
bn_2 = hk.BatchNorm(name="batchnorm_2", scale_init=jnp.zeros, **bn_config)
layers = layers + ((conv_2, bn_2),)
self.layers = layers
def __call__(self, inputs, is_training, test_local_stats):
out = shortcut = inputs
if self.use_projection:
shortcut = self.proj_conv(shortcut)
shortcut = self.proj_batchnorm(shortcut, is_training, test_local_stats)
for i, (conv_i, bn_i) in enumerate(self.layers):
out = conv_i(out)
out = bn_i(out, is_training, test_local_stats)
if i < len(self.layers) - 1: # Don't apply relu on last layer
out = jax.nn.relu(out)
return jax.nn.relu(out + shortcut)
class BlockV2(hk.Module):
"""ResNet V2 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bn_config: Mapping[str, FloatStrOrBool],
bottleneck: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
bn_config = dict(bn_config)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
if self.use_projection:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding="SAME",
name="shortcut_conv",
)
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1 if bottleneck else stride,
with_bias=False,
padding="SAME",
name="conv_0",
)
bn_0 = hk.BatchNorm(name="batchnorm_0", **bn_config)
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride if bottleneck else 1,
with_bias=False,
padding="SAME",
name="conv_1",
)
bn_1 = hk.BatchNorm(name="batchnorm_1", **bn_config)
layers = ((conv_0, bn_0), (conv_1, bn_1))
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding="SAME",
name="conv_2",
)
# NOTE: Some implementations of ResNet50 v2 suggest initializing
# gamma/scale here to zeros.
bn_2 = hk.BatchNorm(name="batchnorm_2", **bn_config)
layers = layers + ((conv_2, bn_2),)
self.layers = layers
def __call__(self, inputs, is_training, test_local_stats):
x = shortcut = inputs
for i, (conv_i, bn_i) in enumerate(self.layers):
x = bn_i(x, is_training, test_local_stats)
x = jax.nn.relu(x)
if i == 0 and self.use_projection:
shortcut = self.proj_conv(x)
x = conv_i(x)
return x + shortcut
class BlockGroup(hk.Module):
"""Higher level block for ResNet implementation."""
def __init__(
self,
channels: int,
num_blocks: int,
stride: Union[int, Sequence[int]],
bn_config: Mapping[str, FloatStrOrBool],
resnet_v2: bool,
bottleneck: bool,
use_projection: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
block_cls = BlockV2 if resnet_v2 else BlockV1
self.blocks = []
for i in range(num_blocks):
self.blocks.append(
block_cls(
channels=channels,
stride=(1 if i else stride),
use_projection=(i == 0 and use_projection),
bottleneck=bottleneck,
bn_config=bn_config,
name="block_%d" % (i),
)
)
def __call__(self, inputs, is_training, test_local_stats):
out = inputs
for block in self.blocks:
out = block(out, is_training, test_local_stats)
return out
def check_length(length, value, name):
if len(value) != length:
raise ValueError(f"`{name}` must be of length 4 not {len(value)}")
class ResNet(hk.Module):
"""ResNet model."""
CONFIGS = {
18: {
"blocks_per_group": (2, 2, 2, 2),
"bottleneck": False,
"channels_per_group": (64, 128, 256, 512),
"use_projection": (False, True, True, True),
},
34: {
"blocks_per_group": (3, 4, 6, 3),
"bottleneck": False,
"channels_per_group": (64, 128, 256, 512),
"use_projection": (False, True, True, True),
},
50: {
"blocks_per_group": (3, 4, 6, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
101: {
"blocks_per_group": (3, 4, 23, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
152: {
"blocks_per_group": (3, 8, 36, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
200: {
"blocks_per_group": (3, 24, 36, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
}
BlockGroup = BlockGroup # pylint: disable=invalid-name
BlockV1 = BlockV1 # pylint: disable=invalid-name
BlockV2 = BlockV2 # pylint: disable=invalid-name
def __init__(
self,
blocks_per_group: Sequence[int],
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
bottleneck: bool = True,
channels_per_group: Sequence[int] = (256, 512, 1024, 2048),
use_projection: Sequence[bool] = (True, True, True, True),
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
blocks_per_group: A sequence of length 4 that indicates the number of
blocks created in each group.
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers. By default the
``decay_rate`` is ``0.9`` and ``eps`` is ``1e-5``.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
bottleneck: Whether the block should bottleneck or not. Defaults to
``True``.
channels_per_group: A sequence of length 4 that indicates the number
of channels used for each block in each group.
use_projection: A sequence of length 4 that indicates whether each
residual block should use projection.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(name=name)
self.resnet_v2 = resnet_v2
bn_config = dict(bn_config or {})
bn_config.setdefault("decay_rate", 0.9)
bn_config.setdefault("eps", 1e-5)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
logits_config = dict(logits_config or {})
logits_config.setdefault("w_init", jnp.zeros)
logits_config.setdefault("name", "logits")
# Number of blocks in each group for ResNet.
check_length(4, blocks_per_group, "blocks_per_group")
check_length(4, channels_per_group, "channels_per_group")
check_length(4, strides, "strides")
initial_conv_config = dict(initial_conv_config or {})
initial_conv_config.setdefault("output_channels", 64)
initial_conv_config.setdefault("kernel_shape", 7)
initial_conv_config.setdefault("stride", 2)
initial_conv_config.setdefault("with_bias", False)
initial_conv_config.setdefault("padding", "SAME")
initial_conv_config.setdefault("name", "initial_conv")
self.initial_conv = hk.Conv2D(**initial_conv_config)
if not self.resnet_v2:
self.initial_batchnorm = hk.BatchNorm(name="initial_batchnorm", **bn_config)
self.block_groups = []
for i, stride in enumerate(strides):
self.block_groups.append(
BlockGroup(
channels=channels_per_group[i],
num_blocks=blocks_per_group[i],
stride=stride,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=bottleneck,
use_projection=use_projection[i],
name="block_group_%d" % (i),
)
)
if self.resnet_v2:
self.final_batchnorm = hk.BatchNorm(name="final_batchnorm", **bn_config)
self.logits = hk.Linear(num_classes, **logits_config)
def __call__(self, inputs, is_training, test_local_stats=False):
out = inputs
out = self.initial_conv(out)
if not self.resnet_v2:
out = self.initial_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
#We remove the final max pool because we are dealing with smaller 32x32 images instead of the standard 256x256 imagenet images that this model was designed for
# out = hk.max_pool(out,
# window_shape=(1, 3, 3, 1),
# strides=(1, 2, 2, 1),
# padding="SAME")
for block_group in self.block_groups:
out = block_group(out, is_training, test_local_stats)
if self.resnet_v2:
out = self.final_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = jnp.mean(out, axis=(1, 2))
return self.logits(out)
class ResNet18(ResNet):
"""ResNet18."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(
num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[18],
)
class ResNet34(ResNet):
"""ResNet34."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(
num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[34],
)
class ResNet50(ResNet):
"""ResNet50."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(
num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[50],
)
class ResNet101(ResNet):
"""ResNet101."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(
num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[101],
)
class ResNet152(ResNet):
"""ResNet152."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(
num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[152],
)
class ResNet200(ResNet):
"""ResNet200."""
def __init__(
self,
num_classes: int,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
logits_config: Optional[Mapping[str, Any]] = None,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
logits_config: A dictionary of keyword arguments for the logits layer.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(
num_classes=num_classes,
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
logits_config=logits_config,
name=name,
**ResNet.CONFIGS[200],
)
| 22,689 | 34.676101 | 167 | py |
big_transfer | big_transfer-master/bit_jax/models.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
import jax.numpy as jnp
import flax.nn as nn
def fixed_padding(x, kernel_size):
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = jax.lax.pad(x, 0.0,
((0, 0, 0),
(pad_beg, pad_end, 0), (pad_beg, pad_end, 0),
(0, 0, 0)))
return x
def standardize(x, axis, eps):
x = x - jnp.mean(x, axis=axis, keepdims=True)
x = x / jnp.sqrt(jnp.mean(jnp.square(x), axis=axis, keepdims=True) + eps)
return x
class GroupNorm(nn.Module):
"""Group normalization (arxiv.org/abs/1803.08494)."""
def apply(self, x, num_groups=32):
input_shape = x.shape
group_shape = x.shape[:-1] + (num_groups, x.shape[-1] // num_groups)
x = x.reshape(group_shape)
# Standardize along spatial and group dimensions
x = standardize(x, axis=[1, 2, 4], eps=1e-5)
x = x.reshape(input_shape)
bias_scale_shape = tuple([1, 1, 1] + [input_shape[-1]])
x = x * self.param('scale', bias_scale_shape, nn.initializers.ones)
x = x + self.param('bias', bias_scale_shape, nn.initializers.zeros)
return x
class StdConv(nn.Conv):
def param(self, name, shape, initializer):
param = super().param(name, shape, initializer)
if name == 'kernel':
param = standardize(param, axis=[0, 1, 2], eps=1e-10)
return param
class RootBlock(nn.Module):
def apply(self, x, width):
x = fixed_padding(x, 7)
x = StdConv(x, width, (7, 7), (2, 2),
padding="VALID",
bias=False,
name="conv_root")
x = fixed_padding(x, 3)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding="VALID")
return x
class ResidualUnit(nn.Module):
"""Bottleneck ResNet block."""
def apply(self, x, nout, strides=(1, 1)):
x_shortcut = x
needs_projection = x.shape[-1] != nout * 4 or strides != (1, 1)
group_norm = GroupNorm
conv = StdConv.partial(bias=False)
x = group_norm(x, name="gn1")
x = nn.relu(x)
if needs_projection:
x_shortcut = conv(x, nout * 4, (1, 1), strides, name="conv_proj")
x = conv(x, nout, (1, 1), name="conv1")
x = group_norm(x, name="gn2")
x = nn.relu(x)
x = fixed_padding(x, 3)
x = conv(x, nout, (3, 3), strides, name="conv2", padding='VALID')
x = group_norm(x, name="gn3")
x = nn.relu(x)
x = conv(x, nout * 4, (1, 1), name="conv3")
return x + x_shortcut
class ResidualBlock(nn.Module):
def apply(self, x, block_size, nout, first_stride):
x = ResidualUnit(
x, nout, strides=first_stride,
name="unit01")
for i in range(1, block_size):
x = ResidualUnit(
x, nout, strides=(1, 1),
name=f"unit{i+1:02d}")
return x
class ResNet(nn.Module):
"""ResNetV2."""
def apply(self, x, num_classes=1000,
width_factor=1, num_layers=50):
block_sizes = _block_sizes[num_layers]
width = 64 * width_factor
root_block = RootBlock.partial(width=width)
x = root_block(x, name='root_block')
# Blocks
for i, block_size in enumerate(block_sizes):
x = ResidualBlock(x, block_size, width * 2 ** i,
first_stride=(1, 1) if i == 0 else (2, 2),
name=f"block{i + 1}")
# Pre-head
x = GroupNorm(x, name='norm-pre-head')
x = nn.relu(x)
x = jnp.mean(x, axis=(1, 2))
# Head
x = nn.Dense(x, num_classes, name="conv_head",
kernel_init=nn.initializers.zeros)
return x.astype(jnp.float32)
_block_sizes = {
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}
KNOWN_MODELS = dict(
[(bit + f'-R{l}x{w}', ResNet.partial(num_layers=l, width_factor=w))
for bit in ['BiT-S', 'BiT-M']
for l, w in [(50, 1), (50, 3), (101, 1), (152, 2), (101, 3), (152, 4)]]
)
| 4,397 | 25.493976 | 75 | py |
big_transfer | big_transfer-master/bit_jax/train.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
import numpy as np
import jax
import jax.numpy as jnp
import flax.optim as optim
import flax.jax_utils as flax_utils
import input_pipeline_tf2_or_jax as input_pipeline
import bit_jax.models as models
import bit_jax.tf2jax as tf2jax
import bit_common
import bit_hyperrule
def main(args):
logger = bit_common.setup_logger(args)
logger.info(f'Available devices: {jax.devices()}')
model = models.KNOWN_MODELS[args.model]
# Load weigths of a BiT model
bit_model_file = os.path.join(args.bit_pretrained_dir, f'{args.model}.npz')
if not os.path.exists(bit_model_file):
raise FileNotFoundError(
f'Model file is not found in "{args.bit_pretrained_dir}" directory.')
with open(bit_model_file, 'rb') as f:
params_tf = np.load(f)
params_tf = dict(zip(params_tf.keys(), params_tf.values()))
resize_size, crop_size = bit_hyperrule.get_resolution_from_dataset(
args.dataset)
# Setup input pipeline
dataset_info = input_pipeline.get_dataset_info(
args.dataset, 'train', args.examples_per_class)
data_train = input_pipeline.get_data(
dataset=args.dataset,
mode='train',
repeats=None, batch_size=args.batch,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=args.examples_per_class,
examples_per_class_seed=args.examples_per_class_seed,
mixup_alpha=bit_hyperrule.get_mixup(dataset_info['num_examples']),
num_devices=jax.local_device_count(),
tfds_manual_dir=args.tfds_manual_dir)
logger.info(data_train)
data_test = input_pipeline.get_data(
dataset=args.dataset,
mode='test',
repeats=1, batch_size=args.batch_eval,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=None, examples_per_class_seed=0,
mixup_alpha=None,
num_devices=jax.local_device_count(),
tfds_manual_dir=args.tfds_manual_dir)
logger.info(data_test)
# Build ResNet architecture
ResNet = model.partial(num_classes=dataset_info['num_classes'])
_, params = ResNet.init_by_shape(
jax.random.PRNGKey(0),
[([1, crop_size, crop_size, 3], jnp.float32)])
resnet_fn = ResNet.call
# pmap replicates the models over all GPUs
resnet_fn_repl = jax.pmap(ResNet.call)
def cross_entropy_loss(*, logits, labels):
logp = jax.nn.log_softmax(logits)
return -jnp.mean(jnp.sum(logp * labels, axis=1))
def loss_fn(params, images, labels):
logits = resnet_fn(params, images)
return cross_entropy_loss(logits=logits, labels=labels)
# Update step, replicated over all GPUs
@partial(jax.pmap, axis_name='batch')
def update_fn(opt, lr, batch):
l, g = jax.value_and_grad(loss_fn)(opt.target,
batch['image'],
batch['label'])
g = jax.tree_map(lambda x: jax.lax.pmean(x, axis_name='batch'), g)
opt = opt.apply_gradient(g, learning_rate=lr)
return opt
# In-place update of randomly initialized weights by BiT weigths
tf2jax.transform_params(params, params_tf,
num_classes=dataset_info['num_classes'])
# Create optimizer and replicate it over all GPUs
opt = optim.Momentum(beta=0.9).create(params)
opt_repl = flax_utils.replicate(opt)
# Delete referenes to the objects that are not needed anymore
del opt
del params
total_steps = bit_hyperrule.get_schedule(dataset_info['num_examples'])[-1]
# Run training loop
for step, batch in zip(range(1, total_steps + 1),
data_train.as_numpy_iterator()):
lr = bit_hyperrule.get_lr(step - 1,
dataset_info['num_examples'],
args.base_lr)
opt_repl = update_fn(opt_repl, flax_utils.replicate(lr), batch)
# Run eval step
if ((args.eval_every and step % args.eval_every == 0)
or (step == total_steps)):
accuracy_test = np.mean([
c
for batch in data_test.as_numpy_iterator()
for c in (
np.argmax(resnet_fn_repl(opt_repl.target, batch['image']), axis=2) ==
np.argmax(batch['label'], axis=2)).ravel()])
logger.info(
f'Step: {step}, '
f'learning rate: {lr:.07f}, '
f'Test accuracy: {accuracy_test:0.3f}')
if __name__ == "__main__":
parser = bit_common.argparser(models.KNOWN_MODELS.keys())
parser.add_argument("--tfds_manual_dir", default=None,
help="Path to maually downloaded dataset.")
parser.add_argument("--batch_eval", default=32, type=int,
help="Eval batch size.")
main(parser.parse_args())
| 5,183 | 32.662338 | 79 | py |
big_transfer | big_transfer-master/bit_pytorch/models.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Bottleneck ResNet v2 with GroupNorm and Weight Standardization."""
from collections import OrderedDict # pylint: disable=g-importing-member
import torch
import torch.nn as nn
import torch.nn.functional as F
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding,
self.dilation, self.groups)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride,
padding=1, bias=bias, groups=groups)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride,
padding=0, bias=bias)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout//4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride) # Original code has it on conv1!!
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if (stride != 1 or cin != cout):
# Projection also with pre-activation according to paper.
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
# Residual branch
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
# Unit's branch
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma']))
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma']))
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma']))
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor, head_size=21843, zero_head=False):
super().__init__()
wf = width_factor # shortcut 'cause we'll use it a lot.
# The following will be unreadable if we split lines.
# pylint: disable=line-too-long
self.root = nn.Sequential(OrderedDict([
('conv', StdConv2d(3, 64*wf, kernel_size=7, stride=2, padding=3, bias=False)),
('pad', nn.ConstantPad2d(1, 0)),
('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0)),
# The following is subtly not the same!
# ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
self.body = nn.Sequential(OrderedDict([
('block1', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=64*wf, cout=256*wf, cmid=64*wf))] +
[(f'unit{i:02d}', PreActBottleneck(cin=256*wf, cout=256*wf, cmid=64*wf)) for i in range(2, block_units[0] + 1)],
))),
('block2', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=256*wf, cout=512*wf, cmid=128*wf, stride=2))] +
[(f'unit{i:02d}', PreActBottleneck(cin=512*wf, cout=512*wf, cmid=128*wf)) for i in range(2, block_units[1] + 1)],
))),
('block3', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=512*wf, cout=1024*wf, cmid=256*wf, stride=2))] +
[(f'unit{i:02d}', PreActBottleneck(cin=1024*wf, cout=1024*wf, cmid=256*wf)) for i in range(2, block_units[2] + 1)],
))),
('block4', nn.Sequential(OrderedDict(
[('unit01', PreActBottleneck(cin=1024*wf, cout=2048*wf, cmid=512*wf, stride=2))] +
[(f'unit{i:02d}', PreActBottleneck(cin=2048*wf, cout=2048*wf, cmid=512*wf)) for i in range(2, block_units[3] + 1)],
))),
]))
# pylint: enable=line-too-long
self.zero_head = zero_head
self.head = nn.Sequential(OrderedDict([
('gn', nn.GroupNorm(32, 2048*wf)),
('relu', nn.ReLU(inplace=True)),
('avg', nn.AdaptiveAvgPool2d(output_size=1)),
('conv', nn.Conv2d(2048*wf, head_size, kernel_size=1, bias=True)),
]))
def forward(self, x):
x = self.head(self.body(self.root(x)))
assert x.shape[-2:] == (1, 1) # We should have no spatial shape left.
return x[...,0,0]
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) # pylint: disable=line-too-long
self.head.gn.weight.copy_(tf2th(weights[f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[f'{prefix}head/conv2d/kernel'])) # pylint: disable=line-too-long
self.head.conv.bias.copy_(tf2th(weights[f'{prefix}head/conv2d/bias']))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
KNOWN_MODELS = OrderedDict([
('BiT-M-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)),
('BiT-M-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)),
('BiT-M-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)),
('BiT-M-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)),
('BiT-M-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)),
('BiT-M-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)),
('BiT-S-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)),
('BiT-S-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)),
('BiT-S-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)),
('BiT-S-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)),
('BiT-S-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)),
('BiT-S-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)),
])
| 7,962 | 40.691099 | 132 | py |
big_transfer | big_transfer-master/bit_pytorch/fewshot.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utility to find k-shot dataset indices, outputs the indices on stdout."""
#!/usr/bin/env python3
# coding: utf-8
from collections import *
from functools import *
import random
import sys
import torch
import torchvision as tv
class AddIndexIter(torch.utils.data.dataloader._SingleProcessDataLoaderIter):
def _next_data(self):
index = self._next_index() # may raise StopIteration
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
if self._pin_memory:
data = torch.utils.data._utils.pin_memory.pin_memory(data)
return index, data
def find_indices_loader(loader, n_shots, n_classes):
per_label_indices = defaultdict(partial(deque, maxlen=n_shots))
for ibatch, (indices, (images, labels)) in enumerate(AddIndexIter(loader)):
for idx, lbl in zip(indices, labels):
per_label_indices[lbl.item()].append(idx)
findings = sum(map(len, per_label_indices.values()))
if findings == n_shots * n_classes:
return per_label_indices
raise RuntimeError("Unable to find enough examples!")
def find_fewshot_indices(dataset, n_shots):
n_classes = len(dataset.classes)
orig_transform = dataset.transform
dataset.transform = tv.transforms.Compose([
tv.transforms.CenterCrop(1),
tv.transforms.ToTensor()
])
# TODO(lbeyer): if dataset isinstance DatasetFolder, we can (maybe?) do much better!
loader = torch.utils.data.DataLoader(dataset, batch_size=1024, shuffle=True, num_workers=0)
per_label_indices = find_indices_loader(loader, n_shots, n_classes)
all_indices = [i for indices in per_label_indices.values() for i in indices]
random.shuffle(all_indices)
dataset.transform = orig_transform
return all_indices
if __name__ == "__main__":
dataset = tv.datasets.ImageFolder(sys.argv[2], preprocess)
all_indices = find_fewshot_indices(dataset, int(sys.argv[1]))
for i in all_indices:
print(i)
| 2,508 | 31.584416 | 93 | py |
big_transfer | big_transfer-master/bit_pytorch/train.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Fine-tune a BiT model on some downstream dataset."""
#!/usr/bin/env python3
# coding: utf-8
from os.path import join as pjoin # pylint: disable=g-importing-member
import time
import numpy as np
import torch
import torchvision as tv
import bit_pytorch.fewshot as fs
import bit_pytorch.lbtoolbox as lb
import bit_pytorch.models as models
import bit_common
import bit_hyperrule
def topk(output, target, ks=(1,)):
"""Returns one boolean vector for each k, whether the target is within the output's top-k."""
_, pred = output.topk(max(ks), 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [correct[:k].max(0)[0] for k in ks]
def recycle(iterable):
"""Variant of itertools.cycle that does not save iterates."""
while True:
for i in iterable:
yield i
def mktrainval(args, logger):
"""Returns train and validation datasets."""
precrop, crop = bit_hyperrule.get_resolution_from_dataset(args.dataset)
train_tx = tv.transforms.Compose([
tv.transforms.Resize((precrop, precrop)),
tv.transforms.RandomCrop((crop, crop)),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
val_tx = tv.transforms.Compose([
tv.transforms.Resize((crop, crop)),
tv.transforms.ToTensor(),
tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
if args.dataset == "cifar10":
train_set = tv.datasets.CIFAR10(args.datadir, transform=train_tx, train=True, download=True)
valid_set = tv.datasets.CIFAR10(args.datadir, transform=val_tx, train=False, download=True)
elif args.dataset == "cifar100":
train_set = tv.datasets.CIFAR100(args.datadir, transform=train_tx, train=True, download=True)
valid_set = tv.datasets.CIFAR100(args.datadir, transform=val_tx, train=False, download=True)
elif args.dataset == "imagenet2012":
train_set = tv.datasets.ImageFolder(pjoin(args.datadir, "train"), train_tx)
valid_set = tv.datasets.ImageFolder(pjoin(args.datadir, "val"), val_tx)
else:
raise ValueError(f"Sorry, we have not spent time implementing the "
f"{args.dataset} dataset in the PyTorch codebase. "
f"In principle, it should be easy to add :)")
if args.examples_per_class is not None:
logger.info(f"Looking for {args.examples_per_class} images per class...")
indices = fs.find_fewshot_indices(train_set, args.examples_per_class)
train_set = torch.utils.data.Subset(train_set, indices=indices)
logger.info(f"Using a training set with {len(train_set)} images.")
logger.info(f"Using a validation set with {len(valid_set)} images.")
micro_batch_size = args.batch // args.batch_split
valid_loader = torch.utils.data.DataLoader(
valid_set, batch_size=micro_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True, drop_last=False)
if micro_batch_size <= len(train_set):
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=micro_batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=False)
else:
# In the few-shot cases, the total dataset size might be smaller than the batch-size.
# In these cases, the default sampler doesn't repeat, so we need to make it do that
# if we want to match the behaviour from the paper.
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=micro_batch_size, num_workers=args.workers, pin_memory=True,
sampler=torch.utils.data.RandomSampler(train_set, replacement=True, num_samples=micro_batch_size))
return train_set, valid_set, train_loader, valid_loader
def run_eval(model, data_loader, device, chrono, logger, step):
# switch to evaluate mode
model.eval()
logger.info("Running validation...")
logger.flush()
all_c, all_top1, all_top5 = [], [], []
end = time.time()
for b, (x, y) in enumerate(data_loader):
with torch.no_grad():
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
# measure data loading time
chrono._done("eval load", time.time() - end)
# compute output, measure accuracy and record loss.
with chrono.measure("eval fprop"):
logits = model(x)
c = torch.nn.CrossEntropyLoss(reduction='none')(logits, y)
top1, top5 = topk(logits, y, ks=(1, 5))
all_c.extend(c.cpu()) # Also ensures a sync point.
all_top1.extend(top1.cpu())
all_top5.extend(top5.cpu())
# measure elapsed time
end = time.time()
model.train()
logger.info(f"Validation@{step} loss {np.mean(all_c):.5f}, "
f"top1 {np.mean(all_top1):.2%}, "
f"top5 {np.mean(all_top5):.2%}")
logger.flush()
return all_c, all_top1, all_top5
def mixup_data(x, y, l):
"""Returns mixed inputs, pairs of targets, and lambda"""
indices = torch.randperm(x.shape[0]).to(x.device)
mixed_x = l * x + (1 - l) * x[indices]
y_a, y_b = y, y[indices]
return mixed_x, y_a, y_b
def mixup_criterion(criterion, pred, y_a, y_b, l):
return l * criterion(pred, y_a) + (1 - l) * criterion(pred, y_b)
def main(args):
logger = bit_common.setup_logger(args)
# Lets cuDNN benchmark conv implementations and choose the fastest.
# Only good if sizes stay the same within the main loop!
torch.backends.cudnn.benchmark = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
logger.info(f"Going to train on {device}")
train_set, valid_set, train_loader, valid_loader = mktrainval(args, logger)
logger.info(f"Loading model from {args.model}.npz")
model = models.KNOWN_MODELS[args.model](head_size=len(valid_set.classes), zero_head=True)
model.load_from(np.load(f"{args.model}.npz"))
logger.info("Moving model onto all GPUs")
model = torch.nn.DataParallel(model)
# Optionally resume from a checkpoint.
# Load it to CPU first as we'll move the model to GPU later.
# This way, we save a little bit of GPU memory when loading.
step = 0
# Note: no weight-decay!
optim = torch.optim.SGD(model.parameters(), lr=0.003, momentum=0.9)
# Resume fine-tuning if we find a saved model.
savename = pjoin(args.logdir, args.name, "bit.pth.tar")
try:
logger.info(f"Model will be saved in '{savename}'")
checkpoint = torch.load(savename, map_location="cpu")
logger.info(f"Found saved model to resume from at '{savename}'")
step = checkpoint["step"]
model.load_state_dict(checkpoint["model"])
optim.load_state_dict(checkpoint["optim"])
logger.info(f"Resumed at step {step}")
except FileNotFoundError:
logger.info("Fine-tuning from BiT")
model = model.to(device)
optim.zero_grad()
model.train()
mixup = bit_hyperrule.get_mixup(len(train_set))
cri = torch.nn.CrossEntropyLoss().to(device)
logger.info("Starting training!")
chrono = lb.Chrono()
accum_steps = 0
mixup_l = np.random.beta(mixup, mixup) if mixup > 0 else 1
end = time.time()
with lb.Uninterrupt() as u:
for x, y in recycle(train_loader):
# measure data loading time, which is spent in the `for` statement.
chrono._done("load", time.time() - end)
if u.interrupted:
break
# Schedule sending to GPU(s)
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
# Update learning-rate, including stop training if over.
lr = bit_hyperrule.get_lr(step, len(train_set), args.base_lr)
if lr is None:
break
for param_group in optim.param_groups:
param_group["lr"] = lr
if mixup > 0.0:
x, y_a, y_b = mixup_data(x, y, mixup_l)
# compute output
with chrono.measure("fprop"):
logits = model(x)
if mixup > 0.0:
c = mixup_criterion(cri, logits, y_a, y_b, mixup_l)
else:
c = cri(logits, y)
c_num = float(c.data.cpu().numpy()) # Also ensures a sync point.
# Accumulate grads
with chrono.measure("grads"):
(c / args.batch_split).backward()
accum_steps += 1
accstep = f" ({accum_steps}/{args.batch_split})" if args.batch_split > 1 else ""
logger.info(f"[step {step}{accstep}]: loss={c_num:.5f} (lr={lr:.1e})") # pylint: disable=logging-format-interpolation
logger.flush()
# Update params
if accum_steps == args.batch_split:
with chrono.measure("update"):
optim.step()
optim.zero_grad()
step += 1
accum_steps = 0
# Sample new mixup ratio for next batch
mixup_l = np.random.beta(mixup, mixup) if mixup > 0 else 1
# Run evaluation and save the model.
if args.eval_every and step % args.eval_every == 0:
run_eval(model, valid_loader, device, chrono, logger, step)
if args.save:
torch.save({
"step": step,
"model": model.state_dict(),
"optim" : optim.state_dict(),
}, savename)
end = time.time()
# Final eval at end of training.
run_eval(model, valid_loader, device, chrono, logger, step='end')
logger.info(f"Timings:\n{chrono}")
if __name__ == "__main__":
parser = bit_common.argparser(models.KNOWN_MODELS.keys())
parser.add_argument("--datadir", required=True,
help="Path to the ImageNet data folder, preprocessed for torchvision.")
parser.add_argument("--workers", type=int, default=8,
help="Number of background threads used to load data.")
parser.add_argument("--no-save", dest="save", action="store_false")
main(parser.parse_args())
| 10,239 | 34.555556 | 124 | py |
big_transfer | big_transfer-master/bit_tf2/models.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet architecture as used in BiT."""
import tensorflow.compat.v2 as tf
from . import normalization
def add_name_prefix(name, prefix=None):
return prefix + "/" + name if prefix else name
class ReLU(tf.keras.layers.ReLU):
def compute_output_shape(self, input_shape):
return tf.TensorShape(input_shape)
class PaddingFromKernelSize(tf.keras.layers.Layer):
"""Layer that adds padding to an image taking into a given kernel size."""
def __init__(self, kernel_size, **kwargs):
super(PaddingFromKernelSize, self).__init__(**kwargs)
pad_total = kernel_size - 1
self._pad_beg = pad_total // 2
self._pad_end = pad_total - self._pad_beg
def compute_output_shape(self, input_shape):
batch_size, height, width, channels = tf.TensorShape(input_shape).as_list()
if height is not None:
height = height + self._pad_beg + self._pad_end
if width is not None:
width = width + self._pad_beg + self._pad_end
return tf.TensorShape((batch_size, height, width, channels))
def call(self, x):
padding = [
[0, 0],
[self._pad_beg, self._pad_end],
[self._pad_beg, self._pad_end],
[0, 0]]
return tf.pad(x, padding)
class StandardizedConv2D(tf.keras.layers.Conv2D):
"""Implements the abs/1903.10520 technique (see go/dune-gn).
You can simply replace any Conv2D with this one to use re-parametrized
convolution operation in which the kernels are standardized before conv.
Note that it does not come with extra learnable scale/bias parameters,
as those used in "Weight normalization" (abs/1602.07868). This does not
matter if combined with BN/GN/..., but it would matter if the convolution
was used standalone.
Author: Lucas Beyer
"""
def build(self, input_shape):
super(StandardizedConv2D, self).build(input_shape)
# Wrap a standardization around the conv OP.
default_conv_op = self._convolution_op
def standardized_conv_op(inputs, kernel):
# Kernel has shape HWIO, normalize over HWI
mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True)
# Author code uses std + 1e-5
return default_conv_op(inputs, (kernel - mean) / tf.sqrt(var + 1e-10))
self._convolution_op = standardized_conv_op
self.built = True
class BottleneckV2Unit(tf.keras.layers.Layer):
"""Implements a standard ResNet's unit (version 2).
"""
def __init__(self, num_filters, stride=1, **kwargs):
"""Initializer.
Args:
num_filters: number of filters in the bottleneck.
stride: specifies block's stride.
**kwargs: other tf.keras.layers.Layer keyword arguments.
"""
super(BottleneckV2Unit, self).__init__(**kwargs)
self._num_filters = num_filters
self._stride = stride
self._proj = None
self._unit_a = tf.keras.Sequential([
normalization.GroupNormalization(name="group_norm"),
ReLU(),
], name="a")
self._unit_a_conv = StandardizedConv2D(
filters=num_filters,
kernel_size=1,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="a/standardized_conv2d")
self._unit_b = tf.keras.Sequential([
normalization.GroupNormalization(name="group_norm"),
ReLU(),
PaddingFromKernelSize(kernel_size=3),
StandardizedConv2D(
filters=num_filters,
kernel_size=3,
strides=stride,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="standardized_conv2d")
], name="b")
self._unit_c = tf.keras.Sequential([
normalization.GroupNormalization(name="group_norm"),
ReLU(),
StandardizedConv2D(
filters=4 * num_filters,
kernel_size=1,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="standardized_conv2d")
], name="c")
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
# Add projection layer if necessary.
if (self._stride > 1) or (4 * self._num_filters != input_shape[-1]):
self._proj = StandardizedConv2D(
filters=4 * self._num_filters,
kernel_size=1,
strides=self._stride,
use_bias=False,
padding="VALID",
trainable=self.trainable,
name="a/proj/standardized_conv2d")
self.built = True
def compute_output_shape(self, input_shape):
current_shape = self._unit_a.compute_output_shape(input_shape)
current_shape = self._unit_a_conv.compute_output_shape(current_shape)
current_shape = self._unit_b.compute_output_shape(current_shape)
current_shape = self._unit_c.compute_output_shape(current_shape)
return current_shape
def call(self, x):
x_shortcut = x
# Unit "a".
x = self._unit_a(x)
if self._proj is not None:
x_shortcut = self._proj(x)
x = self._unit_a_conv(x)
# Unit "b".
x = self._unit_b(x)
# Unit "c".
x = self._unit_c(x)
return x + x_shortcut
class ResnetV2(tf.keras.Model):
"""Generic ResnetV2 architecture, as used in the BiT paper."""
def __init__(self,
num_units=(3, 4, 6, 3),
num_outputs=1000,
filters_factor=4,
strides=(1, 2, 2, 2),
**kwargs):
super(ResnetV2, self).__init__(**kwargs)
num_blocks = len(num_units)
num_filters = tuple(16 * filters_factor * 2**b for b in range(num_blocks))
self._root = self._create_root_block(num_filters=num_filters[0])
self._blocks = []
for b, (f, u, s) in enumerate(zip(num_filters, num_units, strides), 1):
n = "block{}".format(b)
self._blocks.append(
self._create_block(num_units=u, num_filters=f, stride=s, name=n))
self._pre_head = [
normalization.GroupNormalization(name="group_norm"),
ReLU(),
tf.keras.layers.GlobalAveragePooling2D()
]
self._head = None
if num_outputs:
self._head = tf.keras.layers.Dense(
units=num_outputs,
use_bias=True,
kernel_initializer="zeros",
trainable=self.trainable,
name="head/dense")
def _create_root_block(self,
num_filters,
conv_size=7,
conv_stride=2,
pool_size=3,
pool_stride=2):
layers = [
PaddingFromKernelSize(conv_size),
StandardizedConv2D(
filters=num_filters,
kernel_size=conv_size,
strides=conv_stride,
trainable=self.trainable,
use_bias=False,
name="standardized_conv2d"),
PaddingFromKernelSize(pool_size),
tf.keras.layers.MaxPool2D(
pool_size=pool_size, strides=pool_stride, padding="valid")
]
return tf.keras.Sequential(layers, name="root_block")
def _create_block(self, num_units, num_filters, stride, name):
layers = []
for i in range(1, num_units + 1):
layers.append(
BottleneckV2Unit(
num_filters=num_filters,
stride=(stride if i == 1 else 1),
name="unit%02d" % i))
return tf.keras.Sequential(layers, name=name)
def compute_output_shape(self, input_shape):
current_shape = self._root.compute_output_shape(input_shape)
for block in self._blocks:
current_shape = block.compute_output_shape(current_shape)
for layer in self._pre_head:
current_shape = layer.compute_output_shape(current_shape)
if self._head is not None:
batch_size, features = current_shape.as_list()
current_shape = (batch_size, 1, 1, features)
current_shape = self._head.compute_output_shape(current_shape).as_list()
current_shape = (current_shape[0], current_shape[3])
return tf.TensorShape(current_shape)
def call(self, x):
x = self._root(x)
for block in self._blocks:
x = block(x)
for layer in self._pre_head:
x = layer(x)
if self._head is not None:
x = self._head(x)
return x
KNOWN_MODELS = {
f'{bit}-R{l}x{w}': f'gs://bit_models/{bit}-R{l}x{w}.h5'
for bit in ['BiT-S', 'BiT-M']
for l, w in [(50, 1), (50, 3), (101, 1), (101, 3), (152, 4)]
}
NUM_UNITS = {
k: (3, 4, 6, 3) if 'R50' in k else
(3, 4, 23, 3) if 'R101' in k else
(3, 8, 36, 3)
for k in KNOWN_MODELS
}
| 9,000 | 31.261649 | 79 | py |
big_transfer | big_transfer-master/bit_tf2/normalization.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Group normalization."""
import tensorflow.compat.v2 as tf
def group_normalize(x, gamma, beta, num_groups=None, group_size=None, eps=1e-5):
"""Applies group-normalization to NHWC `x` (see abs/1803.08494, go/dune-gn).
This function just does the math, if you want a "layer" that creates the
necessary variables etc., see `group_norm` below.
You must either specify a fixed number of groups `num_groups`, which will
automatically select a corresponding group size depending on the input's
number of channels, or you must specify a `group_size`, which leads to an
automatic number of groups depending on the input's number of channels.
Args:
x: N..C-tensor, the input to group-normalize. For images, this would be a
NHWC-tensor, for time-series a NTC, for videos a NHWTC or NTHWC, all of
them work, as normalization includes everything between N and C. Even just
NC shape works, as C is grouped and normalized.
gamma: tensor with C entries, learnable scale after normalization.
beta: tensor with C entries, learnable bias after normalization.
num_groups: int, number of groups to normalize over (divides C).
group_size: int, size of the groups to normalize over (divides C).
eps: float, a small additive constant to avoid /sqrt(0).
Returns:
Group-normalized `x`, of the same shape and type as `x`.
Author: Lucas Beyer
"""
assert x.shape.ndims >= 2, (
"Less than 2-dim Tensor passed to GroupNorm. Something's fishy.")
num_channels = x.shape[-1]
assert num_channels is not None, "Cannot apply GroupNorm on dynamic channels."
assert (num_groups is None) != (group_size is None), (
"You must specify exactly one of `num_groups`, `group_size`")
if group_size is not None:
num_groups = num_channels // group_size
assert num_channels % num_groups == 0, (
"GroupNorm: {} not divisible by {}".format(num_channels, num_groups))
orig_shape = tf.shape(x)
# This shape is NHWGS where G is #groups and S is group-size.
extra_shape = [num_groups, num_channels // num_groups]
group_shape = tf.concat([orig_shape[:-1], extra_shape], axis=-1)
x = tf.reshape(x, group_shape)
# The dimensions to normalize over: HWS for images, but more generally all
# dimensions except N (batch, first) and G (cross-groups, next-to-last).
# So more visually, normdims are the dots in N......G. (note the last one is
# also a dot, not a full-stop, argh!)
normdims = list(range(1, x.shape.ndims - 2)) + [x.shape.ndims - 1]
mean, var = tf.nn.moments(x, normdims, keepdims=True)
# Interestingly, we don't have a beta/gamma per group, but still one per
# channel, at least according to the original paper. Reshape such that they
# broadcast correctly.
beta = tf.reshape(beta, extra_shape)
gamma = tf.reshape(gamma, extra_shape)
x = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return tf.reshape(x, orig_shape)
class GroupNormalization(tf.keras.layers.Layer):
"""A group-norm "layer" (see abs/1803.08494 go/dune-gn).
This function creates beta/gamma variables in a name_scope, and uses them to
apply `group_normalize` on the input `x`.
You can either specify a fixed number of groups `num_groups`, which will
automatically select a corresponding group size depending on the input's
number of channels, or you must specify a `group_size`, which leads to an
automatic number of groups depending on the input's number of channels.
If you specify neither, the paper's recommended `num_groups=32` is used.
Authors: Lucas Beyer, Joan Puigcerver.
"""
def __init__(self,
num_groups=None,
group_size=None,
eps=1e-5,
beta_init=tf.zeros_initializer(),
gamma_init=tf.ones_initializer(),
**kwargs):
"""Initializer.
Args:
num_groups: int, the number of channel-groups to normalize over.
group_size: int, size of the groups to normalize over.
eps: float, a small additive constant to avoid /sqrt(0).
beta_init: initializer for bias, defaults to zeros.
gamma_init: initializer for scale, defaults to ones.
**kwargs: other tf.keras.layers.Layer arguments.
"""
super(GroupNormalization, self).__init__(**kwargs)
if num_groups is None and group_size is None:
num_groups = 32
self._num_groups = num_groups
self._group_size = group_size
self._eps = eps
self._beta_init = beta_init
self._gamma_init = gamma_init
def build(self, input_size):
channels = input_size[-1]
assert channels is not None, "Cannot apply GN on dynamic channels."
self._gamma = self.add_weight(
name="gamma", shape=(channels,), initializer=self._gamma_init,
dtype=self.dtype)
self._beta = self.add_weight(
name="beta", shape=(channels,), initializer=self._beta_init,
dtype=self.dtype)
super(GroupNormalization, self).build(input_size)
def call(self, x):
return group_normalize(x, self._gamma, self._beta, self._num_groups,
self._group_size, self._eps)
| 5,692 | 39.664286 | 80 | py |
big_transfer | big_transfer-master/bit_tf2/train.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# coding: utf-8
from functools import partial
import time
import os
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import bit_common
import bit_hyperrule
import bit_tf2.models as models
import input_pipeline_tf2_or_jax as input_pipeline
def reshape_for_keras(features, batch_size, crop_size):
features["image"] = tf.reshape(features["image"], (batch_size, crop_size, crop_size, 3))
features["label"] = tf.reshape(features["label"], (batch_size, -1))
return (features["image"], features["label"])
class BiTLRSched(tf.keras.callbacks.Callback):
def __init__(self, base_lr, num_samples):
self.step = 0
self.base_lr = base_lr
self.num_samples = num_samples
def on_train_batch_begin(self, batch, logs=None):
lr = bit_hyperrule.get_lr(self.step, self.num_samples, self.base_lr)
tf.keras.backend.set_value(self.model.optimizer.lr, lr)
self.step += 1
def main(args):
tf.io.gfile.makedirs(args.logdir)
logger = bit_common.setup_logger(args)
logger.info(f'Available devices: {tf.config.list_physical_devices()}')
tf.io.gfile.makedirs(args.bit_pretrained_dir)
bit_model_file = os.path.join(args.bit_pretrained_dir, f'{args.model}.h5')
if not tf.io.gfile.exists(bit_model_file):
model_url = models.KNOWN_MODELS[args.model]
logger.info(f'Downloading the model from {model_url}...')
tf.io.gfile.copy(model_url, bit_model_file)
# Set up input pipeline
dataset_info = input_pipeline.get_dataset_info(
args.dataset, 'train', args.examples_per_class)
# Distribute training
strategy = tf.distribute.MirroredStrategy()
num_devices = strategy.num_replicas_in_sync
print('Number of devices: {}'.format(num_devices))
resize_size, crop_size = bit_hyperrule.get_resolution_from_dataset(args.dataset)
data_train = input_pipeline.get_data(
dataset=args.dataset, mode='train',
repeats=None, batch_size=args.batch,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=args.examples_per_class,
examples_per_class_seed=args.examples_per_class_seed,
mixup_alpha=bit_hyperrule.get_mixup(dataset_info['num_examples']),
num_devices=num_devices,
tfds_manual_dir=args.tfds_manual_dir)
data_test = input_pipeline.get_data(
dataset=args.dataset, mode='test',
repeats=1, batch_size=args.batch,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=1, examples_per_class_seed=0,
mixup_alpha=None,
num_devices=num_devices,
tfds_manual_dir=args.tfds_manual_dir)
data_train = data_train.map(lambda x: reshape_for_keras(
x, batch_size=args.batch, crop_size=crop_size))
data_test = data_test.map(lambda x: reshape_for_keras(
x, batch_size=args.batch, crop_size=crop_size))
with strategy.scope():
filters_factor = int(args.model[-1])*4
model = models.ResnetV2(
num_units=models.NUM_UNITS[args.model],
num_outputs=21843,
filters_factor=filters_factor,
name="resnet",
trainable=True,
dtype=tf.float32)
model.build((None, None, None, 3))
logger.info(f'Loading weights...')
model.load_weights(bit_model_file)
logger.info(f'Weights loaded into model!')
model._head = tf.keras.layers.Dense(
units=dataset_info['num_classes'],
use_bias=True,
kernel_initializer="zeros",
trainable=True,
name="head/dense")
lr_supports = bit_hyperrule.get_schedule(dataset_info['num_examples'])
schedule_length = lr_supports[-1]
# NOTE: Let's not do that unless verified necessary and we do the same
# across all three codebases.
# schedule_length = schedule_length * 512 / args.batch
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy'])
logger.info(f'Fine-tuning the model...')
steps_per_epoch = args.eval_every or schedule_length
history = model.fit(
data_train,
steps_per_epoch=steps_per_epoch,
epochs=schedule_length // steps_per_epoch,
validation_data=data_test, # here we are only using
# this data to evaluate our performance
callbacks=[BiTLRSched(args.base_lr, dataset_info['num_examples'])],
)
for epoch, accu in enumerate(history.history['val_accuracy']):
logger.info(
f'Step: {epoch * args.eval_every}, '
f'Test accuracy: {accu:0.3f}')
if __name__ == "__main__":
parser = bit_common.argparser(models.KNOWN_MODELS.keys())
parser.add_argument("--tfds_manual_dir", default=None,
help="Path to maually downloaded dataset.")
parser.add_argument("--batch_eval", default=32, type=int,
help="Eval batch size.")
main(parser.parse_args())
| 5,417 | 34.181818 | 90 | py |
tirg | tirg-master/main.py | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main method to train the model."""
#!/usr/bin/python
import argparse
import sys
import time
import datasets
import img_text_composition_models
import numpy as np
from tensorboardX import SummaryWriter
import test_retrieval
import torch
import torch.utils.data
import torchvision
from tqdm import tqdm as tqdm
torch.set_num_threads(3)
def parse_opt():
"""Parses the input arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('-f', type=str, default='')
parser.add_argument('--comment', type=str, default='test_notebook')
parser.add_argument('--dataset', type=str, default='css3d')
parser.add_argument(
'--dataset_path', type=str, default='../imgcomsearch/CSSDataset/output')
parser.add_argument('--model', type=str, default='tirg')
parser.add_argument('--embed_dim', type=int, default=512)
parser.add_argument('--learning_rate', type=float, default=1e-2)
parser.add_argument(
'--learning_rate_decay_frequency', type=int, default=9999999)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--weight_decay', type=float, default=1e-6)
parser.add_argument('--num_iters', type=int, default=210000)
parser.add_argument('--loss', type=str, default='soft_triplet')
parser.add_argument('--loader_num_workers', type=int, default=4)
args = parser.parse_args()
return args
def load_dataset(opt):
"""Loads the input datasets."""
print('Reading dataset ', opt.dataset)
if opt.dataset == 'css3d':
trainset = datasets.CSSDataset(
path=opt.dataset_path,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
testset = datasets.CSSDataset(
path=opt.dataset_path,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
elif opt.dataset == 'fashion200k':
trainset = datasets.Fashion200k(
path=opt.dataset_path,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
testset = datasets.Fashion200k(
path=opt.dataset_path,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
elif opt.dataset == 'mitstates':
trainset = datasets.MITStates(
path=opt.dataset_path,
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
testset = datasets.MITStates(
path=opt.dataset_path,
split='test',
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]))
else:
print('Invalid dataset', opt.dataset)
sys.exit()
print('trainset size:', len(trainset))
print('testset size:', len(testset))
return trainset, testset
def create_model_and_optimizer(opt, texts):
"""Builds the model and related optimizer."""
print('Creating model and optimizer for', opt.model)
if opt.model == 'imgonly':
model = img_text_composition_models.SimpleModelImageOnly(
texts, embed_dim=opt.embed_dim)
elif opt.model == 'textonly':
model = img_text_composition_models.SimpleModelTextOnly(
texts, embed_dim=opt.embed_dim)
elif opt.model == 'concat':
model = img_text_composition_models.Concat(texts, embed_dim=opt.embed_dim)
elif opt.model == 'tirg':
model = img_text_composition_models.TIRG(texts, embed_dim=opt.embed_dim)
elif opt.model == 'tirg_lastconv':
model = img_text_composition_models.TIRGLastConv(
texts, embed_dim=opt.embed_dim)
else:
print('Invalid model', opt.model)
print('available: imgonly, textonly, concat, tirg or tirg_lastconv')
sys.exit()
model = model.cuda()
# create optimizer
params = []
# low learning rate for pretrained layers on real image datasets
if opt.dataset != 'css3d':
params.append({
'params': [p for p in model.img_model.fc.parameters()],
'lr': opt.learning_rate
})
params.append({
'params': [p for p in model.img_model.parameters()],
'lr': 0.1 * opt.learning_rate
})
params.append({'params': [p for p in model.parameters()]})
for _, p1 in enumerate(params): # remove duplicated params
for _, p2 in enumerate(params):
if p1 is not p2:
for p11 in p1['params']:
for j, p22 in enumerate(p2['params']):
if p11 is p22:
p2['params'][j] = torch.tensor(0.0, requires_grad=True)
optimizer = torch.optim.SGD(
params, lr=opt.learning_rate, momentum=0.9, weight_decay=opt.weight_decay)
return model, optimizer
def train_loop(opt, logger, trainset, testset, model, optimizer):
"""Function for train loop"""
print('Begin training')
losses_tracking = {}
it = 0
epoch = -1
tic = time.time()
while it < opt.num_iters:
epoch += 1
# show/log stats
print('It', it, 'epoch', epoch, 'Elapsed time', round(time.time() - tic, 4), opt.comment)
tic = time.time()
for loss_name in losses_tracking:
avg_loss = np.mean(losses_tracking[loss_name][-len(trainloader):])
print(' Loss', loss_name, round(avg_loss, 4))
logger.add_scalar(loss_name, avg_loss, it)
logger.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], it)
# test
if epoch % 3 == 1:
tests = []
for name, dataset in [('train', trainset), ('test', testset)]:
t = test_retrieval.test(opt, model, dataset)
tests += [(name + ' ' + metric_name, metric_value)
for metric_name, metric_value in t]
for metric_name, metric_value in tests:
logger.add_scalar(metric_name, metric_value, it)
print(' ', metric_name, round(metric_value, 4))
# save checkpoint
torch.save({
'it': it,
'opt': opt,
'model_state_dict': model.state_dict(),
},
logger.file_writer.get_logdir() + '/latest_checkpoint.pth')
# run trainning for 1 epoch
model.train()
trainloader = trainset.get_loader(
batch_size=opt.batch_size,
shuffle=True,
drop_last=True,
num_workers=opt.loader_num_workers)
def training_1_iter(data):
assert type(data) is list
img1 = np.stack([d['source_img_data'] for d in data])
img1 = torch.from_numpy(img1).float()
img1 = torch.autograd.Variable(img1).cuda()
img2 = np.stack([d['target_img_data'] for d in data])
img2 = torch.from_numpy(img2).float()
img2 = torch.autograd.Variable(img2).cuda()
mods = [str(d['mod']['str']) for d in data]
# compute loss
losses = []
if opt.loss == 'soft_triplet':
loss_value = model.compute_loss(
img1, mods, img2, soft_triplet_loss=True)
elif opt.loss == 'batch_based_classification':
loss_value = model.compute_loss(
img1, mods, img2, soft_triplet_loss=False)
else:
print('Invalid loss function', opt.loss)
sys.exit()
loss_name = opt.loss
loss_weight = 1.0
losses += [(loss_name, loss_weight, loss_value)]
total_loss = sum([
loss_weight * loss_value
for loss_name, loss_weight, loss_value in losses
])
assert not torch.isnan(total_loss)
losses += [('total training loss', None, total_loss)]
# track losses
for loss_name, loss_weight, loss_value in losses:
if loss_name not in losses_tracking:
losses_tracking[loss_name] = []
losses_tracking[loss_name].append(float(loss_value))
# gradient descend
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
for data in tqdm(trainloader, desc='Training for epoch ' + str(epoch)):
it += 1
training_1_iter(data)
# decay learing rate
if it >= opt.learning_rate_decay_frequency and it % opt.learning_rate_decay_frequency == 0:
for g in optimizer.param_groups:
g['lr'] *= 0.1
print('Finished training')
def main():
opt = parse_opt()
print('Arguments:')
for k in opt.__dict__.keys():
print(' ', k, ':', str(opt.__dict__[k]))
logger = SummaryWriter(comment=opt.comment)
print('Log files saved to', logger.file_writer.get_logdir())
for k in opt.__dict__.keys():
logger.add_text(k, str(opt.__dict__[k]))
trainset, testset = load_dataset(opt)
model, optimizer = create_model_and_optimizer(opt, trainset.get_all_texts())
train_loop(opt, logger, trainset, testset, model, optimizer)
logger.close()
if __name__ == '__main__':
main()
| 10,540 | 34.372483 | 97 | py |
tirg | tirg-master/test_retrieval.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates the retrieval model."""
import numpy as np
import torch
from tqdm import tqdm as tqdm
def test(opt, model, testset):
"""Tests a model over the given testset."""
model.eval()
test_queries = testset.get_test_queries()
all_imgs = []
all_captions = []
all_queries = []
all_target_captions = []
if test_queries:
# compute test query features
imgs = []
mods = []
for t in tqdm(test_queries):
imgs += [testset.get_img(t['source_img_id'])]
mods += [t['mod']['str']]
if len(imgs) >= opt.batch_size or t is test_queries[-1]:
if 'torch' not in str(type(imgs[0])):
imgs = [torch.from_numpy(d).float() for d in imgs]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs).cuda()
f = model.compose_img_text(imgs, mods).data.cpu().numpy()
all_queries += [f]
imgs = []
mods = []
all_queries = np.concatenate(all_queries)
all_target_captions = [t['target_caption'] for t in test_queries]
# compute all image features
imgs = []
for i in tqdm(range(len(testset.imgs))):
imgs += [testset.get_img(i)]
if len(imgs) >= opt.batch_size or i == len(testset.imgs) - 1:
if 'torch' not in str(type(imgs[0])):
imgs = [torch.from_numpy(d).float() for d in imgs]
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs).cuda()
imgs = model.extract_img_feature(imgs).data.cpu().numpy()
all_imgs += [imgs]
imgs = []
all_imgs = np.concatenate(all_imgs)
all_captions = [img['captions'][0] for img in testset.imgs]
else:
# use training queries to approximate training retrieval performance
imgs0 = []
imgs = []
mods = []
for i in range(10000):
item = testset[i]
imgs += [item['source_img_data']]
mods += [item['mod']['str']]
if len(imgs) > opt.batch_size or i == 9999:
imgs = torch.stack(imgs).float()
imgs = torch.autograd.Variable(imgs)
f = model.compose_img_text(imgs.cuda(), mods).data.cpu().numpy()
all_queries += [f]
imgs = []
mods = []
imgs0 += [item['target_img_data']]
if len(imgs0) > opt.batch_size or i == 9999:
imgs0 = torch.stack(imgs0).float()
imgs0 = torch.autograd.Variable(imgs0)
imgs0 = model.extract_img_feature(imgs0.cuda()).data.cpu().numpy()
all_imgs += [imgs0]
imgs0 = []
all_captions += [item['target_caption']]
all_target_captions += [item['target_caption']]
all_imgs = np.concatenate(all_imgs)
all_queries = np.concatenate(all_queries)
# feature normalization
for i in range(all_queries.shape[0]):
all_queries[i, :] /= np.linalg.norm(all_queries[i, :])
for i in range(all_imgs.shape[0]):
all_imgs[i, :] /= np.linalg.norm(all_imgs[i, :])
# match test queries to target images, get nearest neighbors
nn_result = []
for i in tqdm(range(all_queries.shape[0])):
sims = all_queries[i:(i+1), :].dot(all_imgs.T)
if test_queries:
sims[0, test_queries[i]['source_img_id']] = -10e10 # remove query image
nn_result.append(np.argsort(-sims[0, :])[:110])
# compute recalls
out = []
nn_result = [[all_captions[nn] for nn in nns] for nns in nn_result]
for k in [1, 5, 10, 50, 100]:
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i] in nns[:k]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_composition', r)]
if opt.dataset == 'mitstates':
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[0] in [c.split()[0] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_adj', r)]
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[1] in [c.split()[1] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_noun', r)]
return out
| 4,735 | 34.343284 | 80 | py |
tirg | tirg-master/text_model.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for text data."""
import string
import numpy as np
import torch
class SimpleVocab(object):
def __init__(self):
super(SimpleVocab, self).__init__()
self.word2id = {}
self.wordcount = {}
self.word2id['<UNK>'] = 0
self.wordcount['<UNK>'] = 9e9
def tokenize_text(self, text):
text = text.encode('ascii', 'ignore').decode('ascii')
tokens = str(text).lower()
tokens = tokens.translate(str.maketrans('','',string.punctuation))
tokens = tokens.strip().split()
return tokens
def build(self, texts):
for text in texts:
tokens = self.tokenize_text(text)
for token in tokens:
if token not in self.wordcount:
self.wordcount[token] = 0
self.wordcount[token] += 1
for token in sorted(list(self.wordcount.keys())):
if token not in self.word2id:
self.word2id[token] = len(self.word2id)
def threshold_rare_words(self, wordcount_threshold=5):
for w in self.word2id:
if self.wordcount[w] < wordcount_threshold:
self.word2id[w] = 0
def encode_text(self, text):
tokens = self.tokenize_text(text)
x = [self.word2id.get(t, 0) for t in tokens]
return x
def get_size(self):
return len(self.word2id)
class TextLSTMModel(torch.nn.Module):
def __init__(self,
texts_to_build_vocab,
word_embed_dim=512,
lstm_hidden_dim=512):
super(TextLSTMModel, self).__init__()
self.vocab = SimpleVocab()
self.vocab.build(texts_to_build_vocab)
vocab_size = self.vocab.get_size()
self.word_embed_dim = word_embed_dim
self.lstm_hidden_dim = lstm_hidden_dim
self.embedding_layer = torch.nn.Embedding(vocab_size, word_embed_dim)
self.lstm = torch.nn.LSTM(word_embed_dim, lstm_hidden_dim)
self.fc_output = torch.nn.Sequential(
torch.nn.Dropout(p=0.1),
torch.nn.Linear(lstm_hidden_dim, lstm_hidden_dim),
)
def forward(self, x):
""" input x: list of strings"""
if type(x) is list:
if type(x[0]) is str or type(x[0]) is unicode:
x = [self.vocab.encode_text(text) for text in x]
assert type(x) is list
assert type(x[0]) is list
assert type(x[0][0]) is int
return self.forward_encoded_texts(x)
def forward_encoded_texts(self, texts):
# to tensor
lengths = [len(t) for t in texts]
itexts = torch.zeros((np.max(lengths), len(texts))).long()
for i in range(len(texts)):
itexts[:lengths[i], i] = torch.tensor(texts[i])
# embed words
itexts = torch.autograd.Variable(itexts).cuda()
etexts = self.embedding_layer(itexts)
# lstm
lstm_output, _ = self.forward_lstm_(etexts)
# get last output (using length)
text_features = []
for i in range(len(texts)):
text_features.append(lstm_output[lengths[i] - 1, i, :])
# output
text_features = torch.stack(text_features)
text_features = self.fc_output(text_features)
return text_features
def forward_lstm_(self, etexts):
batch_size = etexts.shape[1]
first_hidden = (torch.zeros(1, batch_size, self.lstm_hidden_dim),
torch.zeros(1, batch_size, self.lstm_hidden_dim))
first_hidden = (first_hidden[0].cuda(), first_hidden[1].cuda())
lstm_output, last_hidden = self.lstm(etexts, first_hidden)
return lstm_output, last_hidden
| 4,013 | 30.359375 | 80 | py |
tirg | tirg-master/img_text_composition_models.py | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Models for Text and Image Composition."""
import numpy as np
import torch
import torchvision
import torch.nn.functional as F
import text_model
import torch_functions
class ConCatModule(torch.nn.Module):
def __init__(self):
super(ConCatModule, self).__init__()
def forward(self, x):
x = torch.cat(x, dim=1)
return x
class ImgTextCompositionBase(torch.nn.Module):
"""Base class for image + text composition."""
def __init__(self):
super(ImgTextCompositionBase, self).__init__()
self.normalization_layer = torch_functions.NormalizationLayer(
normalize_scale=4.0, learn_scale=True)
self.soft_triplet_loss = torch_functions.TripletLoss()
def extract_img_feature(self, imgs):
raise NotImplementedError
def extract_text_feature(self, texts):
raise NotImplementedError
def compose_img_text(self, imgs, texts):
raise NotImplementedError
def compute_loss(self,
imgs_query,
modification_texts,
imgs_target,
soft_triplet_loss=True):
mod_img1 = self.compose_img_text(imgs_query, modification_texts)
mod_img1 = self.normalization_layer(mod_img1)
img2 = self.extract_img_feature(imgs_target)
img2 = self.normalization_layer(img2)
assert (mod_img1.shape[0] == img2.shape[0] and
mod_img1.shape[1] == img2.shape[1])
if soft_triplet_loss:
return self.compute_soft_triplet_loss_(mod_img1, img2)
else:
return self.compute_batch_based_classification_loss_(mod_img1, img2)
def compute_soft_triplet_loss_(self, mod_img1, img2):
triplets = []
labels = range(mod_img1.shape[0]) + range(img2.shape[0])
for i in range(len(labels)):
triplets_i = []
for j in range(len(labels)):
if labels[i] == labels[j] and i != j:
for k in range(len(labels)):
if labels[i] != labels[k]:
triplets_i.append([i, j, k])
np.random.shuffle(triplets_i)
triplets += triplets_i[:3]
assert (triplets and len(triplets) < 2000)
return self.soft_triplet_loss(torch.cat([mod_img1, img2]), triplets)
def compute_batch_based_classification_loss_(self, mod_img1, img2):
x = torch.mm(mod_img1, img2.transpose(0, 1))
labels = torch.tensor(range(x.shape[0])).long()
labels = torch.autograd.Variable(labels).cuda()
return F.cross_entropy(x, labels)
class ImgEncoderTextEncoderBase(ImgTextCompositionBase):
"""Base class for image and text encoder."""
def __init__(self, texts, embed_dim):
super(ImgEncoderTextEncoderBase, self).__init__()
# img model
img_model = torchvision.models.resnet18(pretrained=True)
class GlobalAvgPool2d(torch.nn.Module):
def forward(self, x):
return F.adaptive_avg_pool2d(x, (1, 1))
img_model.avgpool = GlobalAvgPool2d()
img_model.fc = torch.nn.Sequential(torch.nn.Linear(512, embed_dim))
self.img_model = img_model
# text model
self.text_model = text_model.TextLSTMModel(
texts_to_build_vocab=texts,
word_embed_dim=embed_dim,
lstm_hidden_dim=embed_dim)
def extract_img_feature(self, imgs):
return self.img_model(imgs)
def extract_text_feature(self, texts):
return self.text_model(texts)
class SimpleModelImageOnly(ImgEncoderTextEncoderBase):
def compose_img_text(self, imgs, texts):
return self.extract_img_feature(imgs)
class SimpleModelTextOnly(ImgEncoderTextEncoderBase):
def compose_img_text(self, imgs, texts):
return self.extract_text_feature(texts)
class Concat(ImgEncoderTextEncoderBase):
"""Concatenation model."""
def __init__(self, texts, embed_dim):
super(Concat, self).__init__(texts, embed_dim)
# composer
class Composer(torch.nn.Module):
"""Inner composer class."""
def __init__(self):
super(Composer, self).__init__()
self.m = torch.nn.Sequential(
torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Linear(2 * embed_dim, 2 * embed_dim),
torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Dropout(0.1), torch.nn.Linear(2 * embed_dim, embed_dim))
def forward(self, x):
f = torch.cat(x, dim=1)
f = self.m(f)
return f
self.composer = Composer()
def compose_img_text(self, imgs, texts):
img_features = self.extract_img_feature(imgs)
text_features = self.extract_text_feature(texts)
return self.compose_img_text_features(img_features, text_features)
def compose_img_text_features(self, img_features, text_features):
return self.composer((img_features, text_features))
class TIRG(ImgEncoderTextEncoderBase):
"""The TIGR model.
The method is described in
Nam Vo, Lu Jiang, Chen Sun, Kevin Murphy, Li-Jia Li, Li Fei-Fei, James Hays.
"Composing Text and Image for Image Retrieval - An Empirical Odyssey"
CVPR 2019. arXiv:1812.07119
"""
def __init__(self, texts, embed_dim):
super(TIRG, self).__init__(texts, embed_dim)
self.a = torch.nn.Parameter(torch.tensor([1.0, 10.0, 1.0, 1.0]))
self.gated_feature_composer = torch.nn.Sequential(
ConCatModule(), torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Linear(2 * embed_dim, embed_dim))
self.res_info_composer = torch.nn.Sequential(
ConCatModule(), torch.nn.BatchNorm1d(2 * embed_dim), torch.nn.ReLU(),
torch.nn.Linear(2 * embed_dim, 2 * embed_dim), torch.nn.ReLU(),
torch.nn.Linear(2 * embed_dim, embed_dim))
def compose_img_text(self, imgs, texts):
img_features = self.extract_img_feature(imgs)
text_features = self.extract_text_feature(texts)
return self.compose_img_text_features(img_features, text_features)
def compose_img_text_features(self, img_features, text_features):
f1 = self.gated_feature_composer((img_features, text_features))
f2 = self.res_info_composer((img_features, text_features))
f = F.sigmoid(f1) * img_features * self.a[0] + f2 * self.a[1]
return f
class TIRGLastConv(ImgEncoderTextEncoderBase):
"""The TIGR model with spatial modification over the last conv layer.
The method is described in
Nam Vo, Lu Jiang, Chen Sun, Kevin Murphy, Li-Jia Li, Li Fei-Fei, James Hays.
"Composing Text and Image for Image Retrieval - An Empirical Odyssey"
CVPR 2019. arXiv:1812.07119
"""
def __init__(self, texts, embed_dim):
super(TIRGLastConv, self).__init__(texts, embed_dim)
self.a = torch.nn.Parameter(torch.tensor([1.0, 10.0, 1.0, 1.0]))
self.mod2d = torch.nn.Sequential(
torch.nn.BatchNorm2d(512 + embed_dim),
torch.nn.Conv2d(512 + embed_dim, 512 + embed_dim, [3, 3], padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(512 + embed_dim, 512, [3, 3], padding=1),
)
self.mod2d_gate = torch.nn.Sequential(
torch.nn.BatchNorm2d(512 + embed_dim),
torch.nn.Conv2d(512 + embed_dim, 512 + embed_dim, [3, 3], padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(512 + embed_dim, 512, [3, 3], padding=1),
)
def compose_img_text(self, imgs, texts):
text_features = self.extract_text_feature(texts)
x = imgs
x = self.img_model.conv1(x)
x = self.img_model.bn1(x)
x = self.img_model.relu(x)
x = self.img_model.maxpool(x)
x = self.img_model.layer1(x)
x = self.img_model.layer2(x)
x = self.img_model.layer3(x)
x = self.img_model.layer4(x)
# mod
y = text_features
y = y.reshape((y.shape[0], y.shape[1], 1, 1)).repeat(
1, 1, x.shape[2], x.shape[3])
z = torch.cat((x, y), dim=1)
t = self.mod2d(z)
tgate = self.mod2d_gate(z)
x = self.a[0] * F.sigmoid(tgate) * x + self.a[1] * t
x = self.img_model.avgpool(x)
x = x.view(x.size(0), -1)
x = self.img_model.fc(x)
return x
| 8,478 | 32.121094 | 80 | py |
tirg | tirg-master/torch_functions.py |
# TODO(lujiang): put it into the third-party
# MIT License
# Copyright (c) 2018 Nam Vo
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
"""Metric learning functions.
Codes are modified from:
https://github.com/lugiavn/generalization-dml/blob/master/nams.py
"""
import numpy as np
import torch
import torchvision
def pairwise_distances(x, y=None):
"""
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between
x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
source:
https://discuss.pytorch.org/t/efficient-distance-matrix-computation/9065/2
"""
x_norm = (x**2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y**2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
# Ensure diagonal is zero if x=y
# if y is None:
# dist = dist - torch.diag(dist.diag)
return torch.clamp(dist, 0.0, np.inf)
class MyTripletLossFunc(torch.autograd.Function):
def __init__(self, triplets):
super(MyTripletLossFunc, self).__init__()
self.triplets = triplets
self.triplet_count = len(triplets)
def forward(self, features):
self.save_for_backward(features)
self.distances = pairwise_distances(features).cpu().numpy()
loss = 0.0
triplet_count = 0.0
correct_count = 0.0
for i, j, k in self.triplets:
w = 1.0
triplet_count += w
loss += w * np.log(1 +
np.exp(self.distances[i, j] - self.distances[i, k]))
if self.distances[i, j] < self.distances[i, k]:
correct_count += 1
loss /= triplet_count
return torch.FloatTensor((loss,))
def backward(self, grad_output):
features, = self.saved_tensors
features_np = features.cpu().numpy()
grad_features = features.clone() * 0.0
grad_features_np = grad_features.cpu().numpy()
for i, j, k in self.triplets:
w = 1.0
f = 1.0 - 1.0 / (
1.0 + np.exp(self.distances[i, j] - self.distances[i, k]))
grad_features_np[i, :] += w * f * (
features_np[i, :] - features_np[j, :]) / self.triplet_count
grad_features_np[j, :] += w * f * (
features_np[j, :] - features_np[i, :]) / self.triplet_count
grad_features_np[i, :] += -w * f * (
features_np[i, :] - features_np[k, :]) / self.triplet_count
grad_features_np[k, :] += -w * f * (
features_np[k, :] - features_np[i, :]) / self.triplet_count
for i in range(features_np.shape[0]):
grad_features[i, :] = torch.from_numpy(grad_features_np[i, :])
grad_features *= float(grad_output.data[0])
return grad_features
class TripletLoss(torch.nn.Module):
"""Class for the triplet loss."""
def __init__(self, pre_layer=None):
super(TripletLoss, self).__init__()
self.pre_layer = pre_layer
def forward(self, x, triplets):
if self.pre_layer is not None:
x = self.pre_layer(x)
loss = MyTripletLossFunc(triplets)(x)
return loss
class NormalizationLayer(torch.nn.Module):
"""Class for normalization layer."""
def __init__(self, normalize_scale=1.0, learn_scale=True):
super(NormalizationLayer, self).__init__()
self.norm_s = float(normalize_scale)
if learn_scale:
self.norm_s = torch.nn.Parameter(torch.FloatTensor((self.norm_s,)))
def forward(self, x):
features = self.norm_s * x / torch.norm(x, dim=1, keepdim=True).expand_as(x)
return features
| 4,133 | 31.046512 | 80 | py |
tirg | tirg-master/datasets.py | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for training and testing."""
import numpy as np
import PIL
import skimage.io
import torch
import json
import torch.utils.data
import torchvision
import warnings
import random
class BaseDataset(torch.utils.data.Dataset):
"""Base class for a dataset."""
def __init__(self):
super(BaseDataset, self).__init__()
self.imgs = []
self.test_queries = []
def get_loader(self,
batch_size,
shuffle=False,
drop_last=False,
num_workers=0):
return torch.utils.data.DataLoader(
self,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
drop_last=drop_last,
collate_fn=lambda i: i)
def get_test_queries(self):
return self.test_queries
def get_all_texts(self):
raise NotImplementedError
def __getitem__(self, idx):
return self.generate_random_query_target()
def generate_random_query_target(self):
raise NotImplementedError
def get_img(self, idx, raw_img=False):
raise NotImplementedError
class CSSDataset(BaseDataset):
"""CSS dataset."""
def __init__(self, path, split='train', transform=None):
super(CSSDataset, self).__init__()
self.img_path = path + '/images/'
self.transform = transform
self.split = split
self.data = np.load(path + '/css_toy_dataset_novel2_small.dup.npy').item()
self.mods = self.data[self.split]['mods']
self.imgs = []
for objects in self.data[self.split]['objects_img']:
label = len(self.imgs)
if 'labels' in self.data[self.split]:
label = self.data[self.split]['labels'][label]
self.imgs += [{
'objects': objects,
'label': label,
'captions': [str(label)]
}]
self.imgid2modtarget = {}
for i in range(len(self.imgs)):
self.imgid2modtarget[i] = []
for i, mod in enumerate(self.mods):
for k in range(len(mod['from'])):
f = mod['from'][k]
t = mod['to'][k]
self.imgid2modtarget[f] += [(i, t)]
self.generate_test_queries_()
def generate_test_queries_(self):
test_queries = []
for mod in self.mods:
for i, j in zip(mod['from'], mod['to']):
test_queries += [{
'source_img_id': i,
'target_caption': self.imgs[j]['captions'][0],
'mod': {
'str': mod['to_str']
}
}]
self.test_queries = test_queries
def get_1st_training_query(self):
i = np.random.randint(0, len(self.mods))
mod = self.mods[i]
j = np.random.randint(0, len(mod['from']))
self.last_from = mod['from'][j]
self.last_mod = [i]
return mod['from'][j], i, mod['to'][j]
def get_2nd_training_query(self):
modid, new_to = random.choice(self.imgid2modtarget[self.last_from])
while modid in self.last_mod:
modid, new_to = random.choice(self.imgid2modtarget[self.last_from])
self.last_mod += [modid]
# mod = self.mods[modid]
return self.last_from, modid, new_to
def generate_random_query_target(self):
try:
if len(self.last_mod) < 2:
img1id, modid, img2id = self.get_2nd_training_query()
else:
img1id, modid, img2id = self.get_1st_training_query()
except:
img1id, modid, img2id = self.get_1st_training_query()
out = {}
out['source_img_id'] = img1id
out['source_img_data'] = self.get_img(img1id)
out['target_img_id'] = img2id
out['target_img_data'] = self.get_img(img2id)
out['mod'] = {'id': modid, 'str': self.mods[modid]['to_str']}
return out
def __len__(self):
return len(self.imgs)
def get_all_texts(self):
return [mod['to_str'] for mod in self.mods]
def get_img(self, idx, raw_img=False, get_2d=False):
"""Gets CSS images."""
def generate_2d_image(objects):
img = np.ones((64, 64, 3))
colortext2values = {
'gray': [87, 87, 87],
'red': [244, 35, 35],
'blue': [42, 75, 215],
'green': [29, 205, 20],
'brown': [129, 74, 25],
'purple': [129, 38, 192],
'cyan': [41, 208, 208],
'yellow': [255, 238, 51]
}
for obj in objects:
s = 4.0
if obj['size'] == 'large':
s *= 2
c = [0, 0, 0]
for j in range(3):
c[j] = 1.0 * colortext2values[obj['color']][j] / 255.0
y = obj['pos'][0] * img.shape[0]
x = obj['pos'][1] * img.shape[1]
if obj['shape'] == 'rectangle':
img[int(y - s):int(y + s), int(x - s):int(x + s), :] = c
if obj['shape'] == 'circle':
for y0 in range(int(y - s), int(y + s) + 1):
x0 = x + (abs(y0 - y) - s)
x1 = 2 * x - x0
img[y0, int(x0):int(x1), :] = c
if obj['shape'] == 'triangle':
for y0 in range(int(y - s), int(y + s)):
x0 = x + (y0 - y + s) / 2
x1 = 2 * x - x0
x0, x1 = min(x0, x1), max(x0, x1)
img[y0, int(x0):int(x1), :] = c
return img
if self.img_path is None or get_2d:
img = generate_2d_image(self.imgs[idx]['objects'])
else:
img_path = self.img_path + ('/css_%s_%06d.png' % (self.split, int(idx)))
with open(img_path, 'rb') as f:
img = PIL.Image.open(f)
img = img.convert('RGB')
if raw_img:
return img
if self.transform:
img = self.transform(img)
return img
class Fashion200k(BaseDataset):
"""Fashion200k dataset."""
def __init__(self, path, split='train', transform=None):
super(Fashion200k, self).__init__()
self.split = split
self.transform = transform
self.img_path = path + '/'
# get label files for the split
label_path = path + '/labels/'
from os import listdir
from os.path import isfile
from os.path import join
label_files = [
f for f in listdir(label_path) if isfile(join(label_path, f))
]
label_files = [f for f in label_files if split in f]
# read image info from label files
self.imgs = []
def caption_post_process(s):
return s.strip().replace('.',
'dotmark').replace('?', 'questionmark').replace(
'&', 'andmark').replace('*', 'starmark')
for filename in label_files:
print('read ' + filename)
with open(label_path + '/' + filename) as f:
lines = f.readlines()
for line in lines:
line = line.split(' ')
img = {
'file_path': line[0],
'detection_score': line[1],
'captions': [caption_post_process(line[2])],
'split': split,
'modifiable': False
}
self.imgs += [img]
print('Fashion200k:', len(self.imgs), 'images')
# generate query for training or testing
if split == 'train':
self.caption_index_init_()
else:
self.generate_test_queries_()
def get_different_word(self, source_caption, target_caption):
source_words = source_caption.split()
target_words = target_caption.split()
for source_word in source_words:
if source_word not in target_words:
break
for target_word in target_words:
if target_word not in source_words:
break
mod_str = 'replace ' + source_word + ' with ' + target_word
return source_word, target_word, mod_str
def generate_test_queries_(self):
file2imgid = {}
for i, img in enumerate(self.imgs):
file2imgid[img['file_path']] = i
with open(self.img_path + '/test_queries.txt') as f:
lines = f.readlines()
self.test_queries = []
for line in lines:
source_file, target_file = line.split()
idx = file2imgid[source_file]
target_idx = file2imgid[target_file]
source_caption = self.imgs[idx]['captions'][0]
target_caption = self.imgs[target_idx]['captions'][0]
source_word, target_word, mod_str = self.get_different_word(
source_caption, target_caption)
self.test_queries += [{
'source_img_id': idx,
'source_caption': source_caption,
'target_caption': target_caption,
'mod': {
'str': mod_str
}
}]
def caption_index_init_(self):
""" index caption to generate training query-target example on the fly later"""
# index caption 2 caption_id and caption 2 image_ids
caption2id = {}
id2caption = {}
caption2imgids = {}
for i, img in enumerate(self.imgs):
for c in img['captions']:
if c not in caption2id:
id2caption[len(caption2id)] = c
caption2id[c] = len(caption2id)
caption2imgids[c] = []
caption2imgids[c].append(i)
self.caption2imgids = caption2imgids
print(len(caption2imgids), 'unique cations')
# parent captions are 1-word shorter than their children
parent2children_captions = {}
for c in caption2id.keys():
for w in c.split():
p = c.replace(w, '')
p = p.replace(' ', ' ').strip()
if p not in parent2children_captions:
parent2children_captions[p] = []
if c not in parent2children_captions[p]:
parent2children_captions[p].append(c)
self.parent2children_captions = parent2children_captions
# identify parent captions for each image
for img in self.imgs:
img['modifiable'] = False
img['parent_captions'] = []
for p in parent2children_captions:
if len(parent2children_captions[p]) >= 2:
for c in parent2children_captions[p]:
for imgid in caption2imgids[c]:
self.imgs[imgid]['modifiable'] = True
self.imgs[imgid]['parent_captions'] += [p]
num_modifiable_imgs = 0
for img in self.imgs:
if img['modifiable']:
num_modifiable_imgs += 1
print('Modifiable images', num_modifiable_imgs)
def caption_index_sample_(self, idx):
while not self.imgs[idx]['modifiable']:
idx = np.random.randint(0, len(self.imgs))
# find random target image (same parent)
img = self.imgs[idx]
while True:
p = random.choice(img['parent_captions'])
c = random.choice(self.parent2children_captions[p])
if c not in img['captions']:
break
target_idx = random.choice(self.caption2imgids[c])
# find the word difference between query and target (not in parent caption)
source_caption = self.imgs[idx]['captions'][0]
target_caption = self.imgs[target_idx]['captions'][0]
source_word, target_word, mod_str = self.get_different_word(
source_caption, target_caption)
return idx, target_idx, source_word, target_word, mod_str
def get_all_texts(self):
texts = []
for img in self.imgs:
for c in img['captions']:
texts.append(c)
return texts
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
idx, target_idx, source_word, target_word, mod_str = self.caption_index_sample_(
idx)
out = {}
out['source_img_id'] = idx
out['source_img_data'] = self.get_img(idx)
out['source_caption'] = self.imgs[idx]['captions'][0]
out['target_img_id'] = target_idx
out['target_img_data'] = self.get_img(target_idx)
out['target_caption'] = self.imgs[target_idx]['captions'][0]
out['mod'] = {'str': mod_str}
return out
def get_img(self, idx, raw_img=False):
img_path = self.img_path + self.imgs[idx]['file_path']
with open(img_path, 'rb') as f:
img = PIL.Image.open(f)
img = img.convert('RGB')
if raw_img:
return img
if self.transform:
img = self.transform(img)
return img
class MITStates(BaseDataset):
"""MITStates dataset."""
def __init__(self, path, split='train', transform=None):
super(MITStates, self).__init__()
self.path = path
self.transform = transform
self.split = split
self.imgs = []
test_nouns = [
u'armor', u'bracelet', u'bush', u'camera', u'candy', u'castle',
u'ceramic', u'cheese', u'clock', u'clothes', u'coffee', u'fan', u'fig',
u'fish', u'foam', u'forest', u'fruit', u'furniture', u'garden', u'gate',
u'glass', u'horse', u'island', u'laptop', u'lead', u'lightning',
u'mirror', u'orange', u'paint', u'persimmon', u'plastic', u'plate',
u'potato', u'road', u'rubber', u'sand', u'shell', u'sky', u'smoke',
u'steel', u'stream', u'table', u'tea', u'tomato', u'vacuum', u'wax',
u'wheel', u'window', u'wool'
]
from os import listdir
for f in listdir(path + '/images'):
if ' ' not in f:
continue
adj, noun = f.split()
if adj == 'adj':
continue
if split == 'train' and noun in test_nouns:
continue
if split == 'test' and noun not in test_nouns:
continue
for file_path in listdir(path + '/images/' + f):
assert (file_path.endswith('jpg'))
self.imgs += [{
'file_path': path + '/images/' + f + '/' + file_path,
'captions': [f],
'adj': adj,
'noun': noun
}]
self.caption_index_init_()
if split == 'test':
self.generate_test_queries_()
def get_all_texts(self):
texts = []
for img in self.imgs:
texts += img['captions']
return texts
def __getitem__(self, idx):
try:
self.saved_item
except:
self.saved_item = None
if self.saved_item is None:
while True:
idx, target_idx1 = self.caption_index_sample_(idx)
idx, target_idx2 = self.caption_index_sample_(idx)
if self.imgs[target_idx1]['adj'] != self.imgs[target_idx2]['adj']:
break
idx, target_idx = [idx, target_idx1]
self.saved_item = [idx, target_idx2]
else:
idx, target_idx = self.saved_item
self.saved_item = None
mod_str = self.imgs[target_idx]['adj']
return {
'source_img_id': idx,
'source_img_data': self.get_img(idx),
'source_caption': self.imgs[idx]['captions'][0],
'target_img_id': target_idx,
'target_img_data': self.get_img(target_idx),
'target_caption': self.imgs[target_idx]['captions'][0],
'mod': {
'str': mod_str
}
}
def caption_index_init_(self):
self.caption2imgids = {}
self.noun2adjs = {}
for i, img in enumerate(self.imgs):
cap = img['captions'][0]
adj = img['adj']
noun = img['noun']
if cap not in self.caption2imgids.keys():
self.caption2imgids[cap] = []
if noun not in self.noun2adjs.keys():
self.noun2adjs[noun] = []
self.caption2imgids[cap].append(i)
if adj not in self.noun2adjs[noun]:
self.noun2adjs[noun].append(adj)
for noun, adjs in self.noun2adjs.iteritems():
assert len(adjs) >= 2
def caption_index_sample_(self, idx):
noun = self.imgs[idx]['noun']
# adj = self.imgs[idx]['adj']
target_adj = random.choice(self.noun2adjs[noun])
target_caption = target_adj + ' ' + noun
target_idx = random.choice(self.caption2imgids[target_caption])
return idx, target_idx
def generate_test_queries_(self):
self.test_queries = []
for idx, img in enumerate(self.imgs):
adj = img['adj']
noun = img['noun']
for target_adj in self.noun2adjs[noun]:
if target_adj != adj:
mod_str = target_adj
self.test_queries += [{
'source_img_id': idx,
'source_caption': adj + ' ' + noun,
'target_caption': target_adj + ' ' + noun,
'mod': {
'str': mod_str
}
}]
print(len(self.test_queries), 'test queries')
def __len__(self):
return len(self.imgs)
def get_img(self, idx, raw_img=False):
img_path = self.imgs[idx]['file_path']
with open(img_path, 'rb') as f:
img = PIL.Image.open(f)
img = img.convert('RGB')
if raw_img:
return img
if self.transform:
img = self.transform(img)
return img
| 16,642 | 30.520833 | 84 | py |
tirg | tirg-master/third_party/torch_functions.py | # Copyright (c) 2018 Nam Vo
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
"""Metric learning functions.
Codes are modified from:
https://github.com/lugiavn/generalization-dml/blob/master/nams.py
"""
import numpy as np
import torch
import torchvision
def pairwise_distances(x, y=None):
"""
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between
x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
source:
https://discuss.pytorch.org/t/efficient-distance-matrix-computation/9065/2
"""
x_norm = (x**2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y**2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
# Ensure diagonal is zero if x=y
# if y is None:
# dist = dist - torch.diag(dist.diag)
return torch.clamp(dist, 0.0, np.inf)
class MyTripletLossFunc(torch.autograd.Function):
def __init__(self, triplets):
super(MyTripletLossFunc, self).__init__()
self.triplets = triplets
self.triplet_count = len(triplets)
def forward(self, features):
self.save_for_backward(features)
self.distances = pairwise_distances(features).cpu().numpy()
loss = 0.0
triplet_count = 0.0
correct_count = 0.0
for i, j, k in self.triplets:
w = 1.0
triplet_count += w
loss += w * np.log(1 +
np.exp(self.distances[i, j] - self.distances[i, k]))
if self.distances[i, j] < self.distances[i, k]:
correct_count += 1
loss /= triplet_count
return torch.FloatTensor((loss,))
def backward(self, grad_output):
features, = self.saved_tensors
features_np = features.cpu().numpy()
grad_features = features.clone() * 0.0
grad_features_np = grad_features.cpu().numpy()
for i, j, k in self.triplets:
w = 1.0
f = 1.0 - 1.0 / (
1.0 + np.exp(self.distances[i, j] - self.distances[i, k]))
grad_features_np[i, :] += w * f * (
features_np[i, :] - features_np[j, :]) / self.triplet_count
grad_features_np[j, :] += w * f * (
features_np[j, :] - features_np[i, :]) / self.triplet_count
grad_features_np[i, :] += -w * f * (
features_np[i, :] - features_np[k, :]) / self.triplet_count
grad_features_np[k, :] += -w * f * (
features_np[k, :] - features_np[i, :]) / self.triplet_count
for i in range(features_np.shape[0]):
grad_features[i, :] = torch.from_numpy(grad_features_np[i, :])
grad_features *= float(grad_output.data[0])
return grad_features
class TripletLoss(torch.nn.Module):
"""Class for the triplet loss."""
def __init__(self, pre_layer=None):
super(TripletLoss, self).__init__()
self.pre_layer = pre_layer
def forward(self, x, triplets):
if self.pre_layer is not None:
x = self.pre_layer(x)
loss = MyTripletLossFunc(triplets)(x)
return loss
class NormalizationLayer(torch.nn.Module):
"""Class for normalization layer."""
def __init__(self, normalize_scale=1.0, learn_scale=True):
super(NormalizationLayer, self).__init__()
self.norm_s = float(normalize_scale)
if learn_scale:
self.norm_s = torch.nn.Parameter(torch.FloatTensor((self.norm_s,)))
def forward(self, x):
features = self.norm_s * x / torch.norm(x, dim=1, keepdim=True).expand_as(x)
return features
| 4,072 | 31.584 | 80 | py |
2s-AGCN | 2s-AGCN-master/main.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import inspect
import os
import pickle
import random
import shutil
import time
from collections import OrderedDict
import numpy as np
# torch
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import yaml
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from torch.optim.lr_scheduler import _LRScheduler
from tqdm import tqdm
class GradualWarmupScheduler(_LRScheduler):
def __init__(self, optimizer, total_epoch, after_scheduler=None):
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
self.last_epoch = -1
super().__init__(optimizer)
def get_lr(self):
return [base_lr * (self.last_epoch + 1) / self.total_epoch for base_lr in self.base_lrs]
def step(self, epoch=None, metric=None):
if self.last_epoch >= self.total_epoch - 1:
if metric is None:
return self.after_scheduler.step(epoch)
else:
return self.after_scheduler.step(metric, epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch)
def init_seed(_):
torch.cuda.manual_seed_all(1)
torch.manual_seed(1)
np.random.seed(1)
random.seed(1)
# torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_parser():
# parameter priority: command line > config > default
parser = argparse.ArgumentParser(
description='Spatial Temporal Graph Convolution Network')
parser.add_argument(
'--work-dir',
default='./work_dir/temp',
help='the work folder for storing results')
parser.add_argument('-model_saved_name', default='')
parser.add_argument(
'--config',
default='./config/nturgbd-cross-view/test_bone.yaml',
help='path to the configuration file')
# processor
parser.add_argument(
'--phase', default='train', help='must be train or test')
parser.add_argument(
'--save-score',
type=str2bool,
default=False,
help='if ture, the classification score will be stored')
# visulize and debug
parser.add_argument(
'--seed', type=int, default=1, help='random seed for pytorch')
parser.add_argument(
'--log-interval',
type=int,
default=100,
help='the interval for printing messages (#iteration)')
parser.add_argument(
'--save-interval',
type=int,
default=2,
help='the interval for storing models (#iteration)')
parser.add_argument(
'--eval-interval',
type=int,
default=5,
help='the interval for evaluating models (#iteration)')
parser.add_argument(
'--print-log',
type=str2bool,
default=True,
help='print logging or not')
parser.add_argument(
'--show-topk',
type=int,
default=[1, 5],
nargs='+',
help='which Top K accuracy will be shown')
# feeder
parser.add_argument(
'--feeder', default='feeder.feeder', help='data loader will be used')
parser.add_argument(
'--num-worker',
type=int,
default=32,
help='the number of worker for data loader')
parser.add_argument(
'--train-feeder-args',
default=dict(),
help='the arguments of data loader for training')
parser.add_argument(
'--test-feeder-args',
default=dict(),
help='the arguments of data loader for test')
# model
parser.add_argument('--model', default=None, help='the model will be used')
parser.add_argument(
'--model-args',
type=dict,
default=dict(),
help='the arguments of model')
parser.add_argument(
'--weights',
default=None,
help='the weights for network initialization')
parser.add_argument(
'--ignore-weights',
type=str,
default=[],
nargs='+',
help='the name of weights which will be ignored in the initialization')
# optim
parser.add_argument(
'--base-lr', type=float, default=0.01, help='initial learning rate')
parser.add_argument(
'--step',
type=int,
default=[20, 40, 60],
nargs='+',
help='the epoch where optimizer reduce the learning rate')
parser.add_argument(
'--device',
type=int,
default=0,
nargs='+',
help='the indexes of GPUs for training or testing')
parser.add_argument('--optimizer', default='SGD', help='type of optimizer')
parser.add_argument(
'--nesterov', type=str2bool, default=False, help='use nesterov or not')
parser.add_argument(
'--batch-size', type=int, default=256, help='training batch size')
parser.add_argument(
'--test-batch-size', type=int, default=256, help='test batch size')
parser.add_argument(
'--start-epoch',
type=int,
default=0,
help='start training from which epoch')
parser.add_argument(
'--num-epoch',
type=int,
default=80,
help='stop training in which epoch')
parser.add_argument(
'--weight-decay',
type=float,
default=0.0005,
help='weight decay for optimizer')
parser.add_argument('--only_train_part', default=False)
parser.add_argument('--only_train_epoch', default=0)
parser.add_argument('--warm_up_epoch', default=0)
return parser
class Processor():
"""
Processor for Skeleton-based Action Recgnition
"""
def __init__(self, arg):
self.arg = arg
self.save_arg()
if arg.phase == 'train':
if not arg.train_feeder_args['debug']:
if os.path.isdir(arg.model_saved_name):
print('log_dir: ', arg.model_saved_name, 'already exist')
answer = input('delete it? y/n:')
if answer == 'y':
shutil.rmtree(arg.model_saved_name)
print('Dir removed: ', arg.model_saved_name)
input('Refresh the website of tensorboard by pressing any keys')
else:
print('Dir not removed: ', arg.model_saved_name)
self.train_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'train'), 'train')
self.val_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'val'), 'val')
else:
self.train_writer = self.val_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'test'), 'test')
self.global_step = 0
self.load_model()
self.load_optimizer()
self.load_data()
self.lr = self.arg.base_lr
self.best_acc = 0
def load_data(self):
Feeder = import_class(self.arg.feeder)
self.data_loader = dict()
if self.arg.phase == 'train':
self.data_loader['train'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.train_feeder_args),
batch_size=self.arg.batch_size,
shuffle=True,
num_workers=self.arg.num_worker,
drop_last=True,
worker_init_fn=init_seed)
self.data_loader['test'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.test_feeder_args),
batch_size=self.arg.test_batch_size,
shuffle=False,
num_workers=self.arg.num_worker,
drop_last=False,
worker_init_fn=init_seed)
def load_model(self):
output_device = self.arg.device[0] if type(self.arg.device) is list else self.arg.device
self.output_device = output_device
Model = import_class(self.arg.model)
shutil.copy2(inspect.getfile(Model), self.arg.work_dir)
print(Model)
self.model = Model(**self.arg.model_args).cuda(output_device)
print(self.model)
self.loss = nn.CrossEntropyLoss().cuda(output_device)
if self.arg.weights:
self.global_step = int(arg.weights[:-3].split('-')[-1])
self.print_log('Load weights from {}.'.format(self.arg.weights))
if '.pkl' in self.arg.weights:
with open(self.arg.weights, 'r') as f:
weights = pickle.load(f)
else:
weights = torch.load(self.arg.weights)
weights = OrderedDict(
[[k.split('module.')[-1],
v.cuda(output_device)] for k, v in weights.items()])
keys = list(weights.keys())
for w in self.arg.ignore_weights:
for key in keys:
if w in key:
if weights.pop(key, None) is not None:
self.print_log('Sucessfully Remove Weights: {}.'.format(key))
else:
self.print_log('Can Not Remove Weights: {}.'.format(key))
try:
self.model.load_state_dict(weights)
except:
state = self.model.state_dict()
diff = list(set(state.keys()).difference(set(weights.keys())))
print('Can not find these weights:')
for d in diff:
print(' ' + d)
state.update(weights)
self.model.load_state_dict(state)
if type(self.arg.device) is list:
if len(self.arg.device) > 1:
self.model = nn.DataParallel(
self.model,
device_ids=self.arg.device,
output_device=output_device)
def load_optimizer(self):
if self.arg.optimizer == 'SGD':
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay)
elif self.arg.optimizer == 'Adam':
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay)
else:
raise ValueError()
lr_scheduler_pre = optim.lr_scheduler.MultiStepLR(
self.optimizer, milestones=self.arg.step, gamma=0.1)
self.lr_scheduler = GradualWarmupScheduler(self.optimizer, total_epoch=self.arg.warm_up_epoch,
after_scheduler=lr_scheduler_pre)
self.print_log('using warm up, epoch: {}'.format(self.arg.warm_up_epoch))
def save_arg(self):
# save arg
arg_dict = vars(self.arg)
if not os.path.exists(self.arg.work_dir):
os.makedirs(self.arg.work_dir)
with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f:
yaml.dump(arg_dict, f)
def adjust_learning_rate(self, epoch):
if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam':
if epoch < self.arg.warm_up_epoch:
lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
else:
lr = self.arg.base_lr * (
0.1 ** np.sum(epoch >= np.array(self.arg.step)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
else:
raise ValueError()
def print_time(self):
localtime = time.asctime(time.localtime(time.time()))
self.print_log("Local current time : " + localtime)
def print_log(self, str, print_time=True):
if print_time:
localtime = time.asctime(time.localtime(time.time()))
str = "[ " + localtime + ' ] ' + str
print(str)
if self.arg.print_log:
with open('{}/log.txt'.format(self.arg.work_dir), 'a') as f:
print(str, file=f)
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
def train(self, epoch, save_model=False):
self.model.train()
self.print_log('Training epoch: {}'.format(epoch + 1))
loader = self.data_loader['train']
self.adjust_learning_rate(epoch)
# for name, param in self.model.named_parameters():
# self.train_writer.add_histogram(name, param.clone().cpu().data.numpy(), epoch)
loss_value = []
self.train_writer.add_scalar('epoch', epoch, self.global_step)
self.record_time()
timer = dict(dataloader=0.001, model=0.001, statistics=0.001)
process = tqdm(loader)
if self.arg.only_train_part:
if epoch > self.arg.only_train_epoch:
print('only train part, require grad')
for key, value in self.model.named_parameters():
if 'PA' in key:
value.requires_grad = True
# print(key + '-require grad')
else:
print('only train part, do not require grad')
for key, value in self.model.named_parameters():
if 'PA' in key:
value.requires_grad = False
# print(key + '-not require grad')
for batch_idx, (data, label, index) in enumerate(process):
self.global_step += 1
# get data
data = Variable(data.float().cuda(self.output_device), requires_grad=False)
label = Variable(label.long().cuda(self.output_device), requires_grad=False)
timer['dataloader'] += self.split_time()
# forward
output = self.model(data)
# if batch_idx == 0 and epoch == 0:
# self.train_writer.add_graph(self.model, output)
if isinstance(output, tuple):
output, l1 = output
l1 = l1.mean()
else:
l1 = 0
loss = self.loss(output, label) + l1
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_value.append(loss.data.item())
timer['model'] += self.split_time()
value, predict_label = torch.max(output.data, 1)
acc = torch.mean((predict_label == label.data).float())
self.train_writer.add_scalar('acc', acc, self.global_step)
self.train_writer.add_scalar('loss', loss.data.item(), self.global_step)
self.train_writer.add_scalar('loss_l1', l1, self.global_step)
# self.train_writer.add_scalar('batch_time', process.iterable.last_duration, self.global_step)
# statistics
self.lr = self.optimizer.param_groups[0]['lr']
self.train_writer.add_scalar('lr', self.lr, self.global_step)
# if self.global_step % self.arg.log_interval == 0:
# self.print_log(
# '\tBatch({}/{}) done. Loss: {:.4f} lr:{:.6f}'.format(
# batch_idx, len(loader), loss.data[0], lr))
timer['statistics'] += self.split_time()
# statistics of time consumption and loss
proportion = {
k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values()))))
for k, v in timer.items()
}
self.print_log(
'\tMean training loss: {:.4f}.'.format(np.mean(loss_value)))
self.print_log(
'\tTime consumption: [Data]{dataloader}, [Network]{model}'.format(
**proportion))
if save_model:
state_dict = self.model.state_dict()
weights = OrderedDict([[k.split('module.')[-1],
v.cpu()] for k, v in state_dict.items()])
torch.save(weights, self.arg.model_saved_name + '-' + str(epoch) + '-' + str(int(self.global_step)) + '.pt')
def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, result_file=None):
if wrong_file is not None:
f_w = open(wrong_file, 'w')
if result_file is not None:
f_r = open(result_file, 'w')
self.model.eval()
self.print_log('Eval epoch: {}'.format(epoch + 1))
for ln in loader_name:
loss_value = []
score_frag = []
right_num_total = 0
total_num = 0
loss_total = 0
step = 0
process = tqdm(self.data_loader[ln])
for batch_idx, (data, label, index) in enumerate(process):
with torch.no_grad():
data = Variable(
data.float().cuda(self.output_device),
requires_grad=False,
volatile=True)
label = Variable(
label.long().cuda(self.output_device),
requires_grad=False,
volatile=True)
output = self.model(data)
if isinstance(output, tuple):
output, l1 = output
l1 = l1.mean()
else:
l1 = 0
loss = self.loss(output, label)
score_frag.append(output.data.cpu().numpy())
loss_value.append(loss.data.item())
_, predict_label = torch.max(output.data, 1)
step += 1
if wrong_file is not None or result_file is not None:
predict = list(predict_label.cpu().numpy())
true = list(label.data.cpu().numpy())
for i, x in enumerate(predict):
if result_file is not None:
f_r.write(str(x) + ',' + str(true[i]) + '\n')
if x != true[i] and wrong_file is not None:
f_w.write(str(index[i]) + ',' + str(x) + ',' + str(true[i]) + '\n')
score = np.concatenate(score_frag)
loss = np.mean(loss_value)
accuracy = self.data_loader[ln].dataset.top_k(score, 1)
if accuracy > self.best_acc:
self.best_acc = accuracy
# self.lr_scheduler.step(loss)
print('Accuracy: ', accuracy, ' model: ', self.arg.model_saved_name)
if self.arg.phase == 'train':
self.val_writer.add_scalar('loss', loss, self.global_step)
self.val_writer.add_scalar('loss_l1', l1, self.global_step)
self.val_writer.add_scalar('acc', accuracy, self.global_step)
score_dict = dict(
zip(self.data_loader[ln].dataset.sample_name, score))
self.print_log('\tMean {} loss of {} batches: {}.'.format(
ln, len(self.data_loader[ln]), np.mean(loss_value)))
for k in self.arg.show_topk:
self.print_log('\tTop{}: {:.2f}%'.format(
k, 100 * self.data_loader[ln].dataset.top_k(score, k)))
if save_score:
with open('{}/epoch{}_{}_score.pkl'.format(
self.arg.work_dir, epoch + 1, ln), 'wb') as f:
pickle.dump(score_dict, f)
def start(self):
if self.arg.phase == 'train':
self.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
self.global_step = self.arg.start_epoch * len(self.data_loader['train']) / self.arg.batch_size
for epoch in range(self.arg.start_epoch, self.arg.num_epoch):
if self.lr < 1e-3:
break
save_model = ((epoch + 1) % self.arg.save_interval == 0) or (
epoch + 1 == self.arg.num_epoch)
self.train(epoch, save_model=save_model)
self.eval(
epoch,
save_score=self.arg.save_score,
loader_name=['test'])
print('best accuracy: ', self.best_acc, ' model_name: ', self.arg.model_saved_name)
elif self.arg.phase == 'test':
if not self.arg.test_feeder_args['debug']:
wf = self.arg.model_saved_name + '_wrong.txt'
rf = self.arg.model_saved_name + '_right.txt'
else:
wf = rf = None
if self.arg.weights is None:
raise ValueError('Please appoint --weights.')
self.arg.print_log = False
self.print_log('Model: {}.'.format(self.arg.model))
self.print_log('Weights: {}.'.format(self.arg.weights))
self.eval(epoch=0, save_score=self.arg.save_score, loader_name=['test'], wrong_file=wf, result_file=rf)
self.print_log('Done.\n')
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def import_class(name):
components = name.split('.')
mod = __import__(components[0]) # import return model
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
if __name__ == '__main__':
parser = get_parser()
# load arg form config file
p = parser.parse_args()
if p.config is not None:
with open(p.config, 'r') as f:
default_arg = yaml.load(f)
key = vars(p).keys()
for k in default_arg.keys():
if k not in key:
print('WRONG ARG: {}'.format(k))
assert (k in key)
parser.set_defaults(**default_arg)
arg = parser.parse_args()
init_seed(0)
processor = Processor(arg)
processor.start()
| 22,084 | 37.143351 | 120 | py |
2s-AGCN | 2s-AGCN-master/model/agcn.py | import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def conv_branch_init(conv, branches):
weight = conv.weight
n = weight.size(0)
k1 = weight.size(1)
k2 = weight.size(2)
nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches)))
nn.init.constant_(conv.bias, 0)
def conv_init(conv):
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale):
nn.init.constant_(bn.weight, scale)
nn.init.constant_(bn.bias, 0)
class unit_tcn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=9, stride=1):
super(unit_tcn, self).__init__()
pad = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0),
stride=(stride, 1))
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
conv_init(self.conv)
bn_init(self.bn, 1)
def forward(self, x):
x = self.bn(self.conv(x))
return x
class unit_gcn(nn.Module):
def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3):
super(unit_gcn, self).__init__()
inter_channels = out_channels // coff_embedding
self.inter_c = inter_channels
self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32)))
nn.init.constant_(self.PA, 1e-6)
self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
self.num_subset = num_subset
self.conv_a = nn.ModuleList()
self.conv_b = nn.ModuleList()
self.conv_d = nn.ModuleList()
for i in range(self.num_subset):
self.conv_a.append(nn.Conv2d(in_channels, inter_channels, 1))
self.conv_b.append(nn.Conv2d(in_channels, inter_channels, 1))
self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1))
if in_channels != out_channels:
self.down = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
else:
self.down = lambda x: x
self.bn = nn.BatchNorm2d(out_channels)
self.soft = nn.Softmax(-2)
self.relu = nn.ReLU()
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
bn_init(self.bn, 1e-6)
for i in range(self.num_subset):
conv_branch_init(self.conv_d[i], self.num_subset)
def forward(self, x):
N, C, T, V = x.size()
A = self.A.cuda(x.get_device())
A = A + self.PA
y = None
for i in range(self.num_subset):
A1 = self.conv_a[i](x).permute(0, 3, 1, 2).contiguous().view(N, V, self.inter_c * T)
A2 = self.conv_b[i](x).view(N, self.inter_c * T, V)
A1 = self.soft(torch.matmul(A1, A2) / A1.size(-1)) # N V V
A1 = A1 + A[i]
A2 = x.view(N, C * T, V)
z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V))
y = z + y if y is not None else z
y = self.bn(y)
y += self.down(x)
return self.relu(y)
class TCN_GCN_unit(nn.Module):
def __init__(self, in_channels, out_channels, A, stride=1, residual=True):
super(TCN_GCN_unit, self).__init__()
self.gcn1 = unit_gcn(in_channels, out_channels, A)
self.tcn1 = unit_tcn(out_channels, out_channels, stride=stride)
self.relu = nn.ReLU()
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride)
def forward(self, x):
x = self.tcn1(self.gcn1(x)) + self.residual(x)
return self.relu(x)
class Model(nn.Module):
def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3):
super(Model, self).__init__()
if graph is None:
raise ValueError()
else:
Graph = import_class(graph)
self.graph = Graph(**graph_args)
A = self.graph.A
self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)
self.l1 = TCN_GCN_unit(3, 64, A, residual=False)
self.l2 = TCN_GCN_unit(64, 64, A)
self.l3 = TCN_GCN_unit(64, 64, A)
self.l4 = TCN_GCN_unit(64, 64, A)
self.l5 = TCN_GCN_unit(64, 128, A, stride=2)
self.l6 = TCN_GCN_unit(128, 128, A)
self.l7 = TCN_GCN_unit(128, 128, A)
self.l8 = TCN_GCN_unit(128, 256, A, stride=2)
self.l9 = TCN_GCN_unit(256, 256, A)
self.l10 = TCN_GCN_unit(256, 256, A)
self.fc = nn.Linear(256, num_class)
nn.init.normal_(self.fc.weight, 0, math.sqrt(2. / num_class))
bn_init(self.data_bn, 1)
def forward(self, x):
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V)
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = self.l5(x)
x = self.l6(x)
x = self.l7(x)
x = self.l8(x)
x = self.l9(x)
x = self.l10(x)
# N*M,C,T,V
c_new = x.size(1)
x = x.view(N, M, c_new, -1)
x = x.mean(3).mean(1)
return self.fc(x)
| 5,882 | 30.972826 | 111 | py |
2s-AGCN | 2s-AGCN-master/model/aagcn.py | import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def conv_branch_init(conv, branches):
weight = conv.weight
n = weight.size(0)
k1 = weight.size(1)
k2 = weight.size(2)
nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches)))
nn.init.constant_(conv.bias, 0)
def conv_init(conv):
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale):
nn.init.constant_(bn.weight, scale)
nn.init.constant_(bn.bias, 0)
class unit_tcn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=9, stride=1):
super(unit_tcn, self).__init__()
pad = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0),
stride=(stride, 1))
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
conv_init(self.conv)
bn_init(self.bn, 1)
def forward(self, x):
x = self.bn(self.conv(x))
return x
class unit_gcn(nn.Module):
def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3, adaptive=True, attention=True):
super(unit_gcn, self).__init__()
inter_channels = out_channels // coff_embedding
self.inter_c = inter_channels
self.out_c = out_channels
self.in_c = in_channels
self.num_subset = num_subset
num_jpts = A.shape[-1]
self.conv_d = nn.ModuleList()
for i in range(self.num_subset):
self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1))
if adaptive:
self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32)))
self.alpha = nn.Parameter(torch.zeros(1))
# self.beta = nn.Parameter(torch.ones(1))
# nn.init.constant_(self.PA, 1e-6)
# self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
# self.A = self.PA
self.conv_a = nn.ModuleList()
self.conv_b = nn.ModuleList()
for i in range(self.num_subset):
self.conv_a.append(nn.Conv2d(in_channels, inter_channels, 1))
self.conv_b.append(nn.Conv2d(in_channels, inter_channels, 1))
else:
self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
self.adaptive = adaptive
if attention:
# self.beta = nn.Parameter(torch.zeros(1))
# self.gamma = nn.Parameter(torch.zeros(1))
# unified attention
# self.Attention = nn.Parameter(torch.ones(num_jpts))
# temporal attention
self.conv_ta = nn.Conv1d(out_channels, 1, 9, padding=4)
nn.init.constant_(self.conv_ta.weight, 0)
nn.init.constant_(self.conv_ta.bias, 0)
# s attention
ker_jpt = num_jpts - 1 if not num_jpts % 2 else num_jpts
pad = (ker_jpt - 1) // 2
self.conv_sa = nn.Conv1d(out_channels, 1, ker_jpt, padding=pad)
nn.init.xavier_normal_(self.conv_sa.weight)
nn.init.constant_(self.conv_sa.bias, 0)
# channel attention
rr = 2
self.fc1c = nn.Linear(out_channels, out_channels // rr)
self.fc2c = nn.Linear(out_channels // rr, out_channels)
nn.init.kaiming_normal_(self.fc1c.weight)
nn.init.constant_(self.fc1c.bias, 0)
nn.init.constant_(self.fc2c.weight, 0)
nn.init.constant_(self.fc2c.bias, 0)
# self.bn = nn.BatchNorm2d(out_channels)
# bn_init(self.bn, 1)
self.attention = attention
if in_channels != out_channels:
self.down = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
else:
self.down = lambda x: x
self.bn = nn.BatchNorm2d(out_channels)
self.soft = nn.Softmax(-2)
self.tan = nn.Tanh()
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
bn_init(self.bn, 1e-6)
for i in range(self.num_subset):
conv_branch_init(self.conv_d[i], self.num_subset)
def forward(self, x):
N, C, T, V = x.size()
y = None
if self.adaptive:
A = self.PA
# A = A + self.PA
for i in range(self.num_subset):
A1 = self.conv_a[i](x).permute(0, 3, 1, 2).contiguous().view(N, V, self.inter_c * T)
A2 = self.conv_b[i](x).view(N, self.inter_c * T, V)
A1 = self.tan(torch.matmul(A1, A2) / A1.size(-1)) # N V V
A1 = A[i] + A1 * self.alpha
A2 = x.view(N, C * T, V)
z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V))
y = z + y if y is not None else z
else:
A = self.A.cuda(x.get_device()) * self.mask
for i in range(self.num_subset):
A1 = A[i]
A2 = x.view(N, C * T, V)
z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V))
y = z + y if y is not None else z
y = self.bn(y)
y += self.down(x)
y = self.relu(y)
if self.attention:
# spatial attention
se = y.mean(-2) # N C V
se1 = self.sigmoid(self.conv_sa(se))
y = y * se1.unsqueeze(-2) + y
# a1 = se1.unsqueeze(-2)
# temporal attention
se = y.mean(-1)
se1 = self.sigmoid(self.conv_ta(se))
y = y * se1.unsqueeze(-1) + y
# a2 = se1.unsqueeze(-1)
# channel attention
se = y.mean(-1).mean(-1)
se1 = self.relu(self.fc1c(se))
se2 = self.sigmoid(self.fc2c(se1))
y = y * se2.unsqueeze(-1).unsqueeze(-1) + y
# a3 = se2.unsqueeze(-1).unsqueeze(-1)
# unified attention
# y = y * self.Attention + y
# y = y + y * ((a2 + a3) / 2)
# y = self.bn(y)
return y
class TCN_GCN_unit(nn.Module):
def __init__(self, in_channels, out_channels, A, stride=1, residual=True, adaptive=True, attention=True):
super(TCN_GCN_unit, self).__init__()
self.gcn1 = unit_gcn(in_channels, out_channels, A, adaptive=adaptive, attention=attention)
self.tcn1 = unit_tcn(out_channels, out_channels, stride=stride)
self.relu = nn.ReLU(inplace=True)
# if attention:
# self.alpha = nn.Parameter(torch.zeros(1))
# self.beta = nn.Parameter(torch.ones(1))
# temporal attention
# self.conv_ta1 = nn.Conv1d(out_channels, out_channels//rt, 9, padding=4)
# self.bn = nn.BatchNorm2d(out_channels)
# bn_init(self.bn, 1)
# self.conv_ta2 = nn.Conv1d(out_channels, 1, 9, padding=4)
# nn.init.kaiming_normal_(self.conv_ta1.weight)
# nn.init.constant_(self.conv_ta1.bias, 0)
# nn.init.constant_(self.conv_ta2.weight, 0)
# nn.init.constant_(self.conv_ta2.bias, 0)
# rt = 4
# self.inter_c = out_channels // rt
# self.conv_ta1 = nn.Conv2d(out_channels, out_channels // rt, 1)
# self.conv_ta2 = nn.Conv2d(out_channels, out_channels // rt, 1)
# nn.init.constant_(self.conv_ta1.weight, 0)
# nn.init.constant_(self.conv_ta1.bias, 0)
# nn.init.constant_(self.conv_ta2.weight, 0)
# nn.init.constant_(self.conv_ta2.bias, 0)
# s attention
# num_jpts = A.shape[-1]
# ker_jpt = num_jpts - 1 if not num_jpts % 2 else num_jpts
# pad = (ker_jpt - 1) // 2
# self.conv_sa = nn.Conv1d(out_channels, 1, ker_jpt, padding=pad)
# nn.init.constant_(self.conv_sa.weight, 0)
# nn.init.constant_(self.conv_sa.bias, 0)
# channel attention
# rr = 16
# self.fc1c = nn.Linear(out_channels, out_channels // rr)
# self.fc2c = nn.Linear(out_channels // rr, out_channels)
# nn.init.kaiming_normal_(self.fc1c.weight)
# nn.init.constant_(self.fc1c.bias, 0)
# nn.init.constant_(self.fc2c.weight, 0)
# nn.init.constant_(self.fc2c.bias, 0)
#
# self.softmax = nn.Softmax(-2)
# self.sigmoid = nn.Sigmoid()
self.attention = attention
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride)
def forward(self, x):
if self.attention:
y = self.relu(self.tcn1(self.gcn1(x)) + self.residual(x))
# spatial attention
# se = y.mean(-2) # N C V
# se1 = self.sigmoid(self.conv_sa(se))
# y = y * se1.unsqueeze(-2) + y
# a1 = se1.unsqueeze(-2)
# temporal attention
# se = y.mean(-1) # N C T
# # se1 = self.relu(self.bn(self.conv_ta1(se)))
# se2 = self.sigmoid(self.conv_ta2(se))
# # y = y * se1.unsqueeze(-1) + y
# a2 = se2.unsqueeze(-1)
# se = y # NCTV
# N, C, T, V = y.shape
# se1 = self.conv_ta1(se).permute(0, 2, 1, 3).contiguous().view(N, T, self.inter_c * V) # NTCV
# se2 = self.conv_ta2(se).permute(0, 1, 3, 2).contiguous().view(N, self.inter_c * V, T) # NCVT
# a2 = self.softmax(torch.matmul(se1, se2) / np.sqrt(se1.size(-1))) # N T T
# y = torch.matmul(y.permute(0, 1, 3, 2).contiguous().view(N, C * V, T), a2) \
# .view(N, C, V, T).permute(0, 1, 3, 2) * self.alpha + y
# channel attention
# se = y.mean(-1).mean(-1)
# se1 = self.relu(self.fc1c(se))
# se2 = self.sigmoid(self.fc2c(se1))
# # y = y * se2.unsqueeze(-1).unsqueeze(-1) + y
# a3 = se2.unsqueeze(-1).unsqueeze(-1)
#
# y = y * ((a2 + a3) / 2) + y
# y = self.bn(y)
else:
y = self.relu(self.tcn1(self.gcn1(x)) + self.residual(x))
return y
class Model(nn.Module):
def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3,
drop_out=0, adaptive=True, attention=True):
super(Model, self).__init__()
if graph is None:
raise ValueError()
else:
Graph = import_class(graph)
self.graph = Graph(**graph_args)
A = self.graph.A
self.num_class = num_class
self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)
self.l1 = TCN_GCN_unit(3, 64, A, residual=False, adaptive=adaptive, attention=attention)
self.l2 = TCN_GCN_unit(64, 64, A, adaptive=adaptive, attention=attention)
self.l3 = TCN_GCN_unit(64, 64, A, adaptive=adaptive, attention=attention)
self.l4 = TCN_GCN_unit(64, 64, A, adaptive=adaptive, attention=attention)
self.l5 = TCN_GCN_unit(64, 128, A, stride=2, adaptive=adaptive, attention=attention)
self.l6 = TCN_GCN_unit(128, 128, A, adaptive=adaptive, attention=attention)
self.l7 = TCN_GCN_unit(128, 128, A, adaptive=adaptive, attention=attention)
self.l8 = TCN_GCN_unit(128, 256, A, stride=2, adaptive=adaptive, attention=attention)
self.l9 = TCN_GCN_unit(256, 256, A, adaptive=adaptive, attention=attention)
self.l10 = TCN_GCN_unit(256, 256, A, adaptive=adaptive, attention=attention)
self.fc = nn.Linear(256, num_class)
nn.init.normal_(self.fc.weight, 0, math.sqrt(2. / num_class))
bn_init(self.data_bn, 1)
if drop_out:
self.drop_out = nn.Dropout(drop_out)
else:
self.drop_out = lambda x: x
def forward(self, x):
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V)
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = self.l5(x)
x = self.l6(x)
x = self.l7(x)
x = self.l8(x)
x = self.l9(x)
x = self.l10(x)
# N*M,C,T,V
c_new = x.size(1)
x = x.view(N, M, c_new, -1)
x = x.mean(3).mean(1)
x = self.drop_out(x)
return self.fc(x)
| 13,013 | 36.504323 | 116 | py |
2s-AGCN | 2s-AGCN-master/data_gen/kinetics_gendata.py | import argparse
import os
import numpy as np
import json
from torch.utils.data import Dataset
import pickle
from tqdm import tqdm
num_joint = 18
max_frame = 300
num_person_out = 2
num_person_in = 5
class Feeder_kinetics(Dataset):
""" Feeder for skeleton-based action recognition in kinetics-skeleton dataset
# Joint index:
# {0, "Nose"}
# {1, "Neck"},
# {2, "RShoulder"},
# {3, "RElbow"},
# {4, "RWrist"},
# {5, "LShoulder"},
# {6, "LElbow"},
# {7, "LWrist"},
# {8, "RHip"},
# {9, "RKnee"},
# {10, "RAnkle"},
# {11, "LHip"},
# {12, "LKnee"},
# {13, "LAnkle"},
# {14, "REye"},
# {15, "LEye"},
# {16, "REar"},
# {17, "LEar"},
Arguments:
data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M)
label_path: the path to label
window_size: The length of the output sequence
num_person_in: The number of people the feeder can observe in the input sequence
num_person_out: The number of people the feeder in the output sequence
debug: If true, only use the first 100 samples
"""
def __init__(self,
data_path,
label_path,
ignore_empty_sample=True,
window_size=-1,
num_person_in=5,
num_person_out=2):
self.data_path = data_path
self.label_path = label_path
self.window_size = window_size
self.num_person_in = num_person_in
self.num_person_out = num_person_out
self.ignore_empty_sample = ignore_empty_sample
self.load_data()
def load_data(self):
# load file list
self.sample_name = os.listdir(self.data_path)
# load label
label_path = self.label_path
with open(label_path) as f:
label_info = json.load(f)
sample_id = [name.split('.')[0] for name in self.sample_name]
self.label = np.array([label_info[id]['label_index'] for id in sample_id])
has_skeleton = np.array([label_info[id]['has_skeleton'] for id in sample_id])
# ignore the samples which does not has skeleton sequence
if self.ignore_empty_sample:
self.sample_name = [s for h, s in zip(has_skeleton, self.sample_name) if h]
self.label = self.label[has_skeleton]
# output data shape (N, C, T, V, M)
self.N = len(self.sample_name) # sample
self.C = 3 # channel
self.T = max_frame # frame
self.V = num_joint # joint
self.M = self.num_person_out # person
def __len__(self):
return len(self.sample_name)
def __iter__(self):
return self
def __getitem__(self, index):
# output shape (C, T, V, M)
# get data
sample_name = self.sample_name[index]
sample_path = os.path.join(self.data_path, sample_name)
with open(sample_path, 'r') as f:
video_info = json.load(f)
# fill data_numpy
data_numpy = np.zeros((self.C, self.T, self.V, self.num_person_in))
for frame_info in video_info['data']:
frame_index = frame_info['frame_index']
for m, skeleton_info in enumerate(frame_info["skeleton"]):
if m >= self.num_person_in:
break
pose = skeleton_info['pose']
score = skeleton_info['score']
data_numpy[0, frame_index, :, m] = pose[0::2]
data_numpy[1, frame_index, :, m] = pose[1::2]
data_numpy[2, frame_index, :, m] = score
# centralization
data_numpy[0:2] = data_numpy[0:2] - 0.5
data_numpy[1:2] = -data_numpy[1:2]
data_numpy[0][data_numpy[2] == 0] = 0
data_numpy[1][data_numpy[2] == 0] = 0
# get & check label index
label = video_info['label_index']
assert (self.label[index] == label)
# sort by score
sort_index = (-data_numpy[2, :, :, :].sum(axis=1)).argsort(axis=1)
for t, s in enumerate(sort_index):
data_numpy[:, t, :, :] = data_numpy[:, t, :, s].transpose((1, 2,
0))
data_numpy = data_numpy[:, :, :, 0:self.num_person_out]
return data_numpy, label
def gendata(data_path, label_path,
data_out_path, label_out_path,
num_person_in=num_person_in, # observe the first 5 persons
num_person_out=num_person_out, # then choose 2 persons with the highest score
max_frame=max_frame):
feeder = Feeder_kinetics(
data_path=data_path,
label_path=label_path,
num_person_in=num_person_in,
num_person_out=num_person_out,
window_size=max_frame)
sample_name = feeder.sample_name
sample_label = []
fp = np.zeros((len(sample_name), 3, max_frame, num_joint, num_person_out), dtype=np.float32)
for i, s in enumerate(tqdm(sample_name)):
data, label = feeder[i]
fp[i, :, 0:data.shape[1], :, :] = data
sample_label.append(label)
with open(label_out_path, 'wb') as f:
pickle.dump((sample_name, list(sample_label)), f)
np.save(data_out_path, fp)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Kinetics-skeleton Data Converter.')
parser.add_argument(
'--data_path', default='../data/kinetics_raw')
parser.add_argument(
'--out_folder', default='../data/kinetics')
arg = parser.parse_args()
part = ['val', 'train']
for p in part:
print('kinetics ', p)
if not os.path.exists(arg.out_folder):
os.makedirs(arg.out_folder)
data_path = '{}/kinetics_{}'.format(arg.data_path, p)
label_path = '{}/kinetics_{}_label.json'.format(arg.data_path, p)
data_out_path = '{}/{}_data_joint.npy'.format(arg.out_folder, p)
label_out_path = '{}/{}_label.pkl'.format(arg.out_folder, p)
gendata(data_path, label_path, data_out_path, label_out_path)
| 6,090 | 32.467033 | 96 | py |
2s-AGCN | 2s-AGCN-master/feeders/feeder.py | import numpy as np
import pickle
import torch
from torch.utils.data import Dataset
import sys
sys.path.extend(['../'])
from feeders import tools
class Feeder(Dataset):
def __init__(self, data_path, label_path,
random_choose=False, random_shift=False, random_move=False,
window_size=-1, normalization=False, debug=False, use_mmap=True):
"""
:param data_path:
:param label_path:
:param random_choose: If true, randomly choose a portion of the input sequence
:param random_shift: If true, randomly pad zeros at the begining or end of sequence
:param random_move:
:param window_size: The length of the output sequence
:param normalization: If true, normalize input sequence
:param debug: If true, only use the first 100 samples
:param use_mmap: If true, use mmap mode to load data, which can save the running memory
"""
self.debug = debug
self.data_path = data_path
self.label_path = label_path
self.random_choose = random_choose
self.random_shift = random_shift
self.random_move = random_move
self.window_size = window_size
self.normalization = normalization
self.use_mmap = use_mmap
self.load_data()
if normalization:
self.get_mean_map()
def load_data(self):
# data: N C V T M
try:
with open(self.label_path) as f:
self.sample_name, self.label = pickle.load(f)
except:
# for pickle file from python2
with open(self.label_path, 'rb') as f:
self.sample_name, self.label = pickle.load(f, encoding='latin1')
# load data
if self.use_mmap:
self.data = np.load(self.data_path, mmap_mode='r')
else:
self.data = np.load(self.data_path)
if self.debug:
self.label = self.label[0:100]
self.data = self.data[0:100]
self.sample_name = self.sample_name[0:100]
def get_mean_map(self):
data = self.data
N, C, T, V, M = data.shape
self.mean_map = data.mean(axis=2, keepdims=True).mean(axis=4, keepdims=True).mean(axis=0)
self.std_map = data.transpose((0, 2, 4, 1, 3)).reshape((N * T * M, C * V)).std(axis=0).reshape((C, 1, V, 1))
def __len__(self):
return len(self.label)
def __iter__(self):
return self
def __getitem__(self, index):
data_numpy = self.data[index]
label = self.label[index]
data_numpy = np.array(data_numpy)
if self.normalization:
data_numpy = (data_numpy - self.mean_map) / self.std_map
if self.random_shift:
data_numpy = tools.random_shift(data_numpy)
if self.random_choose:
data_numpy = tools.random_choose(data_numpy, self.window_size)
elif self.window_size > 0:
data_numpy = tools.auto_pading(data_numpy, self.window_size)
if self.random_move:
data_numpy = tools.random_move(data_numpy)
return data_numpy, label, index
def top_k(self, score, top_k):
rank = score.argsort()
hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)]
return sum(hit_top_k) * 1.0 / len(hit_top_k)
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def test(data_path, label_path, vid=None, graph=None, is_3d=False):
'''
vis the samples using matplotlib
:param data_path:
:param label_path:
:param vid: the id of sample
:param graph:
:param is_3d: when vis NTU, set it True
:return:
'''
import matplotlib.pyplot as plt
loader = torch.utils.data.DataLoader(
dataset=Feeder(data_path, label_path),
batch_size=64,
shuffle=False,
num_workers=2)
if vid is not None:
sample_name = loader.dataset.sample_name
sample_id = [name.split('.')[0] for name in sample_name]
index = sample_id.index(vid)
data, label, index = loader.dataset[index]
data = data.reshape((1,) + data.shape)
# for batch_idx, (data, label) in enumerate(loader):
N, C, T, V, M = data.shape
plt.ion()
fig = plt.figure()
if is_3d:
from mpl_toolkits.mplot3d import Axes3D
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.add_subplot(111)
if graph is None:
p_type = ['b.', 'g.', 'r.', 'c.', 'm.', 'y.', 'k.', 'k.', 'k.', 'k.']
pose = [
ax.plot(np.zeros(V), np.zeros(V), p_type[m])[0] for m in range(M)
]
ax.axis([-1, 1, -1, 1])
for t in range(T):
for m in range(M):
pose[m].set_xdata(data[0, 0, t, :, m])
pose[m].set_ydata(data[0, 1, t, :, m])
fig.canvas.draw()
plt.pause(0.001)
else:
p_type = ['b-', 'g-', 'r-', 'c-', 'm-', 'y-', 'k-', 'k-', 'k-', 'k-']
import sys
from os import path
sys.path.append(
path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
G = import_class(graph)()
edge = G.inward
pose = []
for m in range(M):
a = []
for i in range(len(edge)):
if is_3d:
a.append(ax.plot(np.zeros(3), np.zeros(3), p_type[m])[0])
else:
a.append(ax.plot(np.zeros(2), np.zeros(2), p_type[m])[0])
pose.append(a)
ax.axis([-1, 1, -1, 1])
if is_3d:
ax.set_zlim3d(-1, 1)
for t in range(T):
for m in range(M):
for i, (v1, v2) in enumerate(edge):
x1 = data[0, :2, t, v1, m]
x2 = data[0, :2, t, v2, m]
if (x1.sum() != 0 and x2.sum() != 0) or v1 == 1 or v2 == 1:
pose[m][i].set_xdata(data[0, 0, t, [v1, v2], m])
pose[m][i].set_ydata(data[0, 1, t, [v1, v2], m])
if is_3d:
pose[m][i].set_3d_properties(data[0, 2, t, [v1, v2], m])
fig.canvas.draw()
# plt.savefig('/home/lshi/Desktop/skeleton_sequence/' + str(t) + '.jpg')
plt.pause(0.01)
if __name__ == '__main__':
import os
os.environ['DISPLAY'] = 'localhost:10.0'
data_path = "../data/ntu/xview/val_data_joint.npy"
label_path = "../data/ntu/xview/val_label.pkl"
graph = 'graph.ntu_rgb_d.Graph'
test(data_path, label_path, vid='S004C001P003R001A032', graph=graph, is_3d=True)
# data_path = "../data/kinetics/val_data.npy"
# label_path = "../data/kinetics/val_label.pkl"
# graph = 'graph.Kinetics'
# test(data_path, label_path, vid='UOD7oll3Kqo', graph=graph)
| 7,161 | 34.81 | 116 | py |
GA_CARS_2020 | GA_CARS_2020-master/GA_Family_Selection.py | from typing import List
import tensorflow as tf
import numpy as np
import os
from deap import base
from deap import creator
from deap import tools
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import time
from sklearn.metrics import roc_auc_score
import json
from operator import attrgetter
from keras.models import load_model
import random
import gc
import keras.backend as K
tqdm.pandas()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # The GPU id to use, usually either "0" or "1";
os.environ["CUDA_VISIBLE_DEVICES"] = "1" # Do other imports now...
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
family_exp_weights = lambda x: (1 - np.exp(-x)) / (1 - np.exp(-1))
def create_output_folder(path: str) -> None:
"""
Creates an output folder in the given path
:param path:
:return:
"""
try:
os.mkdir(path)
except FileExistsError:
pass
def read_dict(name: str):
"""
:param name: path to a json dict
:return: Dict
"""
with open(name, 'r') as f:
return json.load(f)
def single_point_mutation(individual):
"""
Currently not used
Flips a random bit in the given individual
:param individual: GA Individual
:return: Mutated Ind
"""
size = len(individual)
rand_index = random.randint(0, size - 1)
# 0 if value was 1 , and 1 if value was 0
rand_value = abs(individual[rand_index] - 1)
individual[rand_index] = rand_value
return individual,
def generate_child(ind1, ind2):
"""
N point Crossover between two individuals
:param ind1: GA Parent 1
:param ind2: GA Parent 2
:return: 2 new children
"""
child = []
# I assume that len(ind1)=len(ind2)
# 50% chance to take a bit from Parent-1 and 50% to take a bit from Parent-2
for i in range(len(ind1)):
if random.random() < 0.5:
child += [ind1[i]]
else:
child += [ind2[i]]
return child
# Generating two children based on n_point_crossover
def n_point_crossover(ind1, ind2):
return generate_child(ind1, ind2), generate_child(ind1, ind2)
def five_point_crossover(ind1, ind2):
"""
5 Point crossover between two individuals
:param ind1: Parent-1
:param ind2: Parent-2
:return: Two generated children
"""
points = random.sample(range(1, len(ind1)), 4)
points.sort()
i1, i2, i3, i4 = points[0], points[1], points[2], points[3]
ind1[i1:i2], ind1[i3:i4], ind2[i1:i2], ind2[i3:i4] = ind2[i1:i2], ind2[i3:i4], ind1[i1:i2], ind1[i3:i4]
return ind1, ind2
def mixedCrossovers(ind1, ind2):
"""
Crossover used in this Expr.
50% chance for a 5-point crossover and 50% chance for n-point crossover.
Notice that family-vector is not used in the crossover (stays as it was).
:param ind1: Parent-1
:param ind2: Parent-2
:return:
"""
features1, family1 = ind1
features2, family2 = ind2
crossoverSelectionThres = 0.5
if random.random() < crossoverSelectionThres:
new_features1, new_features2 = five_point_crossover(features1, features2)
else:
new_features1, new_features2 = n_point_crossover(features1, features2)
new_ind1 = [new_features1, family1]
new_ind2 = [new_features2, family2]
return new_ind1, new_ind2
def save_list(lst, path):
"""
Saves a list in text-form, so results can be read mid-expr.
:param lst: Some list
:param path: path to save
:return: None
"""
with open(path, "w") as f:
for s in lst:
f.write(str(s) + "\n")
def get_active_families(ind):
"""
:param ind: feature-selection vector & family mask vector
:return: Number of features that are active
"""
_, family = ind
return sum(family)
def set_fits_for_inds(inds, max_ones, max_family, auc_func):
"""
Sets the fitness for the given generation of individuals
:param inds: GA Individuals
:param max_ones: Max number of 1's in the given generation
:param max_family: Max number of families in the given generation
:param auc_func: Function that maps inds to their AUC score.
:return: None
"""
# Weights for the fitness.
AUC_WEIGHT = 0.8
FAMILY_WEIGHT = 0.05
ONES_WEIGHTS = 0.15
auc_mapper = lambda indv: 1 - auc_func(indv)
METRIC_INDS = list(map(auc_mapper, inds))
max_metric_inds = max(METRIC_INDS)
for ind, mse_ind in zip(inds, METRIC_INDS):
fitness_score = AUC_WEIGHT * (mse_ind / max_metric_inds) + ONES_WEIGHTS * (sum(ind[0]) / max_ones)
fitness_score += FAMILY_WEIGHT * family_exp_weights((sum(ind[1]) / max_family))
ind.fitness.values = fitness_score,
def mutate_family_ind(ind, low, up, indpb_features, indpb_family):
"""
Mutation is to redraw a bit with some given prob
:param ind: GA individual
:param low: Minimum value in vector (0 in a binary vector)
:param up: Maximum value in vector (1 in a binary vector)
:param indpb_features: Prob to mutate the feature-vector
:param indpb_family: Prob to mutate the family-mask-vector
:return: Mutated ind
"""
features, family = ind
for i in range(len(features)):
if random.random() < indpb_features:
features[i] = random.randint(low, up)
for i in range(len(family)):
if random.random() < indpb_family:
family[i] = random.randint(low, up)
return [features, family],
def get_ind_code(ind):
"""
:param ind: GA Ind
:return: Code that is used to save scores in the cache (dictionary key)
"""
features, family = ind
code = tuple(features), tuple(family)
return code
def opt_tournament(individuals, k, auc_func, tournsize, fit_attr="fitness"):
"""
:param individuals: Population
:param k: How many individuals to select
:param auc_func: function to calculate the AUC of every individual
:param tournsize: How many individuals per tournament
:param fit_attr: key for selection
:return: k selected individuals
"""
chosen = []
sum_first = lambda x: sum(x[0])
sum_second = lambda x: sum(x[1])
max_ones = max(list(map(sum_first, individuals)))
max_family = max(list(map(sum_second, individuals)))
# getting k inds
for _ in range(k):
comps = tools.selRandom(individuals, tournsize)
set_fits_for_inds(comps, max_ones, max_family, auc_func)
chosen.append(max(comps, key=attrgetter(fit_attr)))
return chosen
class FeatureFamilySelectionGA:
def __init__(self, n_generations: int, population_size: int, mu_for_sampling: float, sigma_for_sampling: float,
crossover_prob: float, mutation_prob: float, ind_length, family_length, family_intervals,
random_state=42):
"""
:param n_generations: Number of generations to run
:param population_size: Population Size
:param mu_for_sampling: Used to initialize a normal distribution
:param sigma_for_sampling: Used to initialize a normal distribution
:param crossover_prob: Crossover probability
:param mutation_prob: Mutation probability
:param ind_length: Individual length
:param random_state: Initial random seed
"""
assert 0 <= crossover_prob <= 1, "ILLEGAL CROSSOVER PROBABILITY"
assert 0 <= mutation_prob <= 1, "ILLEGAL MUTATION PROBABILITY"
assert population_size > 0, "Population size must be a positive integer"
assert n_generations > 0, "Number of generations must be a positive integer"
assert mu_for_sampling > 0 and sigma_for_sampling > 0, 'Illegal selection params'
# params
self.n_generations = n_generations
self.population_size = population_size
self.mu_for_sampling = mu_for_sampling
self.sigma_for_sampling = sigma_for_sampling
self.crossover_prob = crossover_prob
self.mutation_prob = mutation_prob
self.random_state = random_state
self.family_length = family_length
self.family_intervals = family_intervals
# params
# GA toolbox
self.toolbox = base.Toolbox()
# solving for minimum fitness
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
# probability to generate '1' in a random selection
self.one_prob = None
self.fitness_dict = {}
self.mask_dicts = {}
self.pop_dict = {}
self.std_gen = []
self.mean_gen = []
self.median_gen = []
self.max_gen = []
self.min_gen = []
self.time_gen = []
self.__set_ind_generator(max_ind_size=ind_length, family_size=self.family_length)
self.toolbox.register("individual", tools.initIterate, creator.Individual, self.toolbox.gen_ind)
self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)
self.toolbox.register("evaluate", self.evalInd)
self.toolbox.register("mate", mixedCrossovers)
self.toolbox.register("mutate", mutate_family_ind, low=0, up=1,
indpb_features=0.005, indpb_family=0.01)
self.toolbox.register("select", opt_tournament, tournsize=5)
def reset_stats(self):
"""
Resets saved-metrics.
:return: None
"""
self.fitness_dict = {}
self.pop_dict = {}
self.std_gen = []
self.mean_gen = []
self.median_gen = []
self.max_gen = []
self.min_gen = []
self.time_gen = []
def save_all_data(self, curr_generation, output_folder):
"""
Saves metrics for the given generations in a given path.
:param curr_generation: Current generation (int)
:param output_folder: folder to save data in.
:return: None
"""
written_dict = {str(k): v for k, v in self.fitness_dict.items()}
with open(output_folder + "fitness_dict.json", 'w') as file:
json.dump(written_dict, file)
with open(output_folder + "gens_dict.json", 'w') as file:
json.dump(self.pop_dict, file)
with open(output_folder + "am_alive", "w") as f:
f.write("Still running at generation :" + str(curr_generation) + "\n")
save_list(self.std_gen, output_folder + "std.txt")
save_list(self.mean_gen, output_folder + "mean.txt")
save_list(self.median_gen, output_folder + "median.txt")
save_list(self.max_gen, output_folder + "max.txt")
save_list(self.min_gen, output_folder + "min.txt")
save_list(self.time_gen, output_folder + "time.txt")
def __init_normal_dist(self, length_to_gen: int, family_len: int) -> List[list]:
"""
Creates a random individual with a given length
:param length_to_gen: How many '1/0' to generate
:return: A random binary array of the given size
"""
family = []
feature_arr = []
while len(feature_arr) < length_to_gen:
feature_arr += [int(random.random() <= self.one_prob)]
family_one_prob = 0.8
while len(family) < family_len:
family += [int(random.random() <= family_one_prob)]
return [feature_arr, family]
def __set_ind_generator(self, max_ind_size, family_size) -> None:
"""
Sets the individual generator. This is done by sampling from a normal distribution (from the given __init__
params) the probability of '1' to generate. For example:
max_ind_size = 480
mu = 120
sigma = 40
Assume randomly chose 130, then one_probability = 0.2708 = (130 / 480) = (normal_dist_sample / max_ind_size)
:param max_ind_size: Size of the individual to generate
:return: None
"""
normal_dist_sample = np.random.normal(self.mu_for_sampling, self.sigma_for_sampling, 1000)
self.one_prob = random.choice(normal_dist_sample) / max_ind_size
self.toolbox.register("gen_ind", self.__init_normal_dist, max_ind_size, family_size)
def __init_seeds(self) -> None:
"""
Resets seeds back to the initial seed
:return: None
"""
tf.random.set_seed(self.random_state)
np.random.seed(self.random_state)
random.seed(self.random_state)
os.environ['PYTHONHASHSEED'] = str(self.random_state)
def learn_only_pred(self, output_folder, context_val, y_val, classifier_path) -> None:
"""
:param output_folder: Where to write the output to
:param context_val: Validation Context [matrix]
:param y_val: Target values for validation
:param classifier_path: Path to a keras classifier that will be used
used to load fresh weights for every fitness calculation (for retraining the network from scratch)
:return:
"""
self.reset_stats()
self.__init_seeds()
create_output_folder(output_folder)
assert len(context_val) == len(y_val)
classifier = load_model(classifier_path)
get_auc_func = lambda ind: self.only_predict_get_auc(ind, context_val, y_val, classifier)
self.__run_gens(output_folder, get_auc_func)
def learn_only_pred_full_arch(self, output_folder: str, val_input, y_val, classifier_path: str) -> None:
"""
Starts the learning process to the only-predict algorithm
:param output_folder: Folder to save results in
:param val_input: Validation Data
:param y_val: Validation Labels
:param classifier_path: Path to the Keras MLP classifier.
:return: None
"""
self.reset_stats()
self.__init_seeds()
create_output_folder(output_folder)
input_size = 3
# item, user and context (3)
assert len(val_input) == input_size
classifier = load_model(classifier_path)
get_auc_func = lambda ind: self.only_predict_get_auc_full_arch(ind, val_input, y_val, classifier)
self.__run_gens(output_folder, get_auc_func)
def learn_full_train(self, output_folder, context_train, y_train, context_val, y_val, classifier_path,
fresh_weights_path):
"""
Starts the learning process for the full-train algorithm
:param output_folder: Folder to save results in
:param context_train: Training Data
:param y_train: Training labels
:param context_val: Validation Data
:param y_val: Validation Labels
:param classifier_path: Path to the keras MLP classifier
:param fresh_weights_path: Path to fresh-weights (non-trained) for the classifier.
:return: None
"""
self.reset_stats()
self.__init_seeds()
create_output_folder(output_folder)
assert len(context_val) == len(y_val) and len(context_train) == len(y_train)
# classifier = load_model(classifier_path)
density = sum(y_train) / len(y_train)
ones_weight = (1 - density) / density
zero_weight = 1
class_weights = {0: zero_weight, 1: ones_weight}
get_auc_func = lambda ind: self.train_mlp_net_get_auc(ind, context_train, y_train, context_val, y_val,
classifier_path, fresh_weights_path, class_weights)
self.__run_gens(output_folder, get_auc_func)
def transform_array_by_ind_and_family(self, ind, arr):
"""
Returns the input-arr (training/validation data) after applying the feature-selection via the feature-vector
and the mask-vector.
- Selected features are not modified.
- Non selected features are replaced with 0 (Not removed to retain the architecture input dimensions)
:param ind: Binary Vector for feature selection & family mask
:param arr: Feature vector
:return: The feature vector after performing feature-selection by the family mask and feature-selection vector
"""
# saving masks, in order to not recalculate it again
features, family = ind
code = tuple(family)
if code in self.mask_dicts:
mask = self.mask_dicts[code]
else:
mask = np.array([0] * len(features))
for family_mask_index in range(len(family)):
family_interval = self.family_intervals[family_mask_index]
for feature_index in family_interval:
mask[feature_index] = family[family_mask_index]
self.mask_dicts[code] = mask
return np.multiply(np.multiply(features, arr), mask)
def train_mlp_net_get_auc(self, individual, context_train, y_train, context_val, y_val, classifier_path,
fresh_weights, class_weights):
"""
:param individual: Ind to evaluate
:param context_train: Training data
:param y_train: Training labels
:param context_val: Validation Data
:param y_val: Validation Labels
:param classifier_path: Path to a MLP classifier
:param fresh_weights: Path to fresh weights
:param class_weights: Class weights for training
:return: AUC Score
"""
if self.is_in_dict(individual):
return self.get_fit(individual)
best_model_path = '../data/expr_running_best_full_train.h5'
es = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='min')
mcp = ModelCheckpoint(best_model_path, save_best_only=True, mode='min')
make_zero = lambda element: self.transform_array_by_ind_and_family(individual, element)
x_val = np.array(list(map(make_zero, context_val)))
x_train = np.array(list(map(make_zero, context_train)))
classifier = load_model(classifier_path)
classifier.load_weights(fresh_weights)
classifier.fit(x_train, y_train, validation_data=(x_val, y_val),
epochs=100, batch_size=256, verbose=0, callbacks=[es, mcp], class_weight=class_weights)
best_classifier = load_model(best_model_path)
val_predict = best_classifier.predict(x_val, batch_size=256)
res_auc = roc_auc_score(y_val, val_predict)
self.save_fitness(individual, res_auc)
# Clearing model, to avoid a memory leak.
del best_classifier, es, mcp, x_val, x_train, classifier, val_predict
gc.collect()
K.clear_session()
tf.compat.v1.reset_default_graph()
return res_auc
def __run_gens(self, outputFolder: str, get_auc_func) -> None:
"""
:param outputFolder: Folder to save output data
:param get_auc_func: Function that receives an individual and returns his AUC score
:return: None
"""
# Crossover probability & Mutation Probability
CXPB, MUTPB = self.crossover_prob, self.mutation_prob
pop = self.toolbox.population(n=self.population_size)
for g in tqdm(range(0, self.n_generations)):
start_time = time.time()
self.pop_dict[g] = [list(x).copy() for x in pop]
# A new generation
print("-- Generation %i --" % g)
# Note : The fitness is calculated inside of the select function, for better performance.
# Meaning that the fitness is calculated only for the selected individuals in the tournament.
# Select the next generation individuals
offspring = self.toolbox.select(pop, len(pop), get_auc_func)
# Clone the selected individuals
offspring = list(map(self.toolbox.clone, offspring))
fits = [ind.fitness.values[0] for ind in offspring]
self.update_progress_arrays(offspring, fits, g, start_time, outputFolder)
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
self.toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
self.toolbox.mutate(mutant)
del mutant.fitness.values
# Changed to offsprings because fitness changes for the entire generation
pop[:] = offspring
def only_predict_get_auc_full_arch(self, individual, input_val, y_val, fully_trained_mlp) -> float:
"""
Calculates the fitness of a given individual by predicting on the fully trained arch
:param individual: Ind to predict on
:param input_val: a tuple of 3 elements - [item_val, user_val, context_val]
:param y_val: Validation Target
:param fully_trained_mlp: Fully trained arch classifier
:return: AUC of the prediction
"""
if self.is_in_dict(individual):
return self.get_fit(individual)
item_val, user_val, con_val = input_val
make_zero = lambda element: self.transform_array_by_ind_and_family(individual, element)
x_cv = np.array(list(map(make_zero, con_val)))
input_val_ind = [item_val, user_val, x_cv]
val_predict = fully_trained_mlp.predict(input_val_ind)
res_auc = roc_auc_score(y_val, val_predict)
self.save_fitness(individual, res_auc)
return res_auc
def only_predict_get_auc(self, individual, context_val, y_val, fully_trained_mlp) -> float:
"""
Calculates the fitness of a given individual by predicting on the fully trained mlp
:param individual: Ind to predict on
:param context_val: Validation context
:param y_val: Validation Target
:param fully_trained_mlp: Fully trained classifier
:return: AUC of the prediction
"""
if self.is_in_dict(individual):
return self.get_fit(individual)
make_zero = lambda element: self.transform_array_by_ind_and_family(individual, element)
x_cv = np.array(list(map(make_zero, context_val)))
val_predict = fully_trained_mlp.predict(x_cv)
res_auc = roc_auc_score(y_val, val_predict)
self.save_fitness(individual, res_auc)
return res_auc
def save_fitness(self, ind, val):
"""
:param ind: GA individual to save
:param val: metric to save
:return: None
"""
code = get_ind_code(ind)
self.fitness_dict[code] = val
def is_in_dict(self, ind) -> bool:
"""
:param ind: GA individual
:return: True iff the given individual has a value store in the cache (past metric)
"""
code = get_ind_code(ind)
return code in self.fitness_dict
def get_fit(self, ind):
"""
:param ind: GA individual
:return: Saved metric
"""
code = get_ind_code(ind)
if code in self.fitness_dict:
return self.fitness_dict[code]
"""
Not used on purpose, evaluation of individuals is done inside the selection function (for performance reasons).
Instead of evaluating the entire generation, we evaluate only the individuals that were randomly
selected in the tournament selection, thus saving unnecessary training of the MLP network .
"""
def evalInd(self, individual, METRIC):
pass
def update_progress_arrays(self, pop, fits, g, gen_start, outputpath):
"""
:param pop: current generation of GA individuals
:param fits: list of fitness scores
:param g: current generation number
:param gen_start: time that this generation started
:param outputpath: output to save the data
:return: None
"""
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x * x for x in fits)
std = abs(sum2 / length - mean ** 2) ** 0.5
min_feat = min(fits)
max_feat = max(fits)
median_feat = np.median(fits)
self.min_gen += [min_feat]
self.max_gen += [max_feat]
self.mean_gen += [mean]
self.std_gen += [std]
self.median_gen += [median_feat]
self.time_gen += [time.time() - gen_start]
self.save_all_data(g, outputpath)
def get_train_test_val_split(arr):
x_train, x_test = train_test_split(arr, test_size=0.3, shuffle=False)
x_val, x_test = train_test_split(x_test, test_size=0.66, shuffle=False)
return x_train, x_test, x_val
if __name__ == '__main__':
# Family intervals for dataset
family_intervals_ = read_dict('../data_hero_ds/family_intervals.json')
index_to_name = read_dict('../data_hero_ds/Family_Indexes.json')
index_to_interval = []
for key in index_to_name.keys():
index_to_interval += [family_intervals_[index_to_name[key]]]
n_generations_ = 300
n_population_size = 100
sigma_for_init = 40
CXPB_ = 0.65
MUTPB_ = 1
ind_length_ = 661
number_of_families = len(family_intervals_.keys())
mu_for_init = 0.25 * ind_length_
fs = FeatureFamilySelectionGA(n_generations_, n_population_size, mu_for_init, sigma_for_init, CXPB_, MUTPB_,
ind_length_, number_of_families, index_to_interval)
context_train_, _, context_val_ = get_train_test_val_split(np.load('../data_hero_ds/X.npy'))
y_train_, _, y_val_ = get_train_test_val_split(np.load('../data_hero_ds/y.npy'))
assert len(context_train_) == len(y_train_) and len(context_val_) == len(y_val_)
mlp_path = '../fresh_mlp_data/HEARO_fresh_mlp_200_100_50_1.h5'
fresh_weights_ = '../fresh_mlp_data/HEARO_fresh_weights_200_100_50_1.h5'
output_folder_ = '../RESULTS_HERO_DS/GA_FAMILY/GA_RES_20_5_FULL_TRAIN/'
fs.learn_full_train(output_folder_, context_train_, y_train_, context_val_, y_val_, mlp_path, fresh_weights_)
| 26,115 | 34.196765 | 118 | py |
GA_CARS_2020 | GA_CARS_2020-master/Feature_Selection_Baselines.py | import os
import tensorflow as tf
import numpy as np
import random
from keras import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import load_model
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
from sklearn.model_selection import train_test_split
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # The GPU id to use, usually either "0" or "1";
os.environ["CUDA_VISIBLE_DEVICES"] = "1" # Do other imports now...
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
seed_value = 42
def get_mlp_model(save_initial_weights_path=None, save_fresh_mlp_path=None, input_shape=480):
"""
Creates A fresh-mlp model for evaluation
:param save_initial_weights_path:
:param save_fresh_mlp_path:
:param input_shape:
:return:
"""
classifier = Sequential()
classifier.add(Dense(200, activation='relu', kernel_initializer='random_normal', input_dim=input_shape))
classifier.add(Dense(100, activation='relu', kernel_initializer='random_normal', input_dim=200))
classifier.add(Dense(50, activation='relu', kernel_initializer='random_normal', input_dim=100))
classifier.add(Dense(1, activation='sigmoid', kernel_initializer='random_normal'))
classifier.compile(optimizer='ADAM', loss='binary_crossentropy', metrics=['mse', 'accuracy'])
if save_initial_weights_path is not None:
classifier.save_weights(save_initial_weights_path)
if save_fresh_mlp_path is not None:
classifier.save(save_fresh_mlp_path)
return classifier
def init_seeds():
os.environ['PYTHONHASHSEED'] = str(seed_value)
random.seed(seed_value)
np.random.seed(seed_value)
tf.random.set_seed(seed_value)
def reduce_by_indexes(arr, indexes):
"""
:param arr: Context-Feature arr
:param indexes: indexes of chosen features
:return: reduced arr
"""
res = []
for i in range(len(arr)):
if i in indexes:
res += [arr[i]]
else:
res += [0]
return np.array(res)
def reduce_all_by_indexes(matrix, indexes):
"""
reduces every vector in the arr
:param matrix: matrix of contextual features
:param indexes: indexes of chosen features
:return:
"""
return np.array(list(map(lambda element: reduce_by_indexes(element, indexes), matrix)))
def run_feed_forward_selection(context_train, context_val, class_weights, model, fresh_weights_path, y_train, y_val,
alive_path):
"""
:param context_train: training data
:param context_val: validation data
:param class_weights: class-weights for training
:param model: Model for evaluation
:param fresh_weights_path: Fresh Weights for initialization
:param y_train: training labels
:param y_val: validation labels
:param alive_path: Path to write results to
:return: Selected indexes
"""
selected_features = []
sample = context_train[0]
best_score = -float('inf')
while True:
print(f"Starting iteration : Best AUC {best_score} , len(featurs) = {len(selected_features)}")
best_feature = -1
current_size = len(selected_features)
for i in tqdm(range(len(sample))):
np.save(alive_path, np.array(selected_features))
model.load_weights(fresh_weights_path)
assert len(selected_features) == current_size
if i in selected_features:
continue
current_features = selected_features.copy()
current_features += [i]
assert len(current_features) == current_size + 1
x_train = reduce_all_by_indexes(context_train, current_features)
x_val = reduce_all_by_indexes(context_val, current_features)
es = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='min')
mcp = ModelCheckpoint('../data_cars_ds/FF_best_mlp_feature_selection.h5', save_best_only=True, mode='min')
model.fit(x_train, y_train, validation_data=(x_val, y_val),
epochs=100, batch_size=256, verbose=0, callbacks=[es, mcp], class_weights=class_weights)
model = load_model('../data_cars_ds/FF_best_mlp_feature_selection.h5')
val_predict = model.predict(x_val, batch_size=256)
current_auc = roc_auc_score(y_val, val_predict)
if current_auc > best_score:
print(f"{current_auc} took over {best_score} , with index {i}")
best_feature = i
best_score = current_auc
del es, mcp, current_features
# no improvement
if best_feature == -1:
break
selected_features.append(best_feature)
return selected_features
def run_backwards_elimination_selection(context_train, context_val, class_weights, model, fresh_weights_path, y_train,
y_val, alive_path):
"""
:param context_train: training data
:param context_val: validation data
:param class_weights: class-weights for training
:param model: Model for evaluation
:param fresh_weights_path: Fresh Weights for initialization
:param y_train: training labels
:param y_val: validation labels
:param alive_path: Path to write results to
:return: Selected indexes
"""
sample = context_train[0]
dims = len(sample)
selected_features = list(range(dims))
best_score = -float('inf')
while True:
print(f"Starting iteration : Best AUC {best_score} , len(featurs) = {len(selected_features)}")
worst_feature = -1
current_size = len(selected_features)
for i in tqdm(range(len(sample))):
np.save(alive_path, np.array(selected_features))
model.load_weights(fresh_weights_path)
assert len(selected_features) == current_size
if i not in selected_features:
continue
current_features = selected_features.copy()
current_features.remove(i)
assert len(current_features) == current_size - 1
x_train = reduce_all_by_indexes(context_train, current_features)
x_val = reduce_all_by_indexes(context_val, current_features)
es = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='min')
mcp = ModelCheckpoint('../data_cars_ds/BE_best_mlp_feature_selection.h5', save_best_only=True, mode='min')
model.fit(x_train, y_train, validation_data=(x_val, y_val),
epochs=100, batch_size=256, verbose=0, callbacks=[es, mcp], class_weights=class_weights)
model = load_model('../data_cars_ds/BE_best_mlp_feature_selection.h5')
val_predict = model.predict(x_val, batch_size=256)
current_auc = roc_auc_score(y_val, val_predict)
if current_auc > best_score:
print(f"{current_auc} took over {best_score} , with index {i}")
worst_feature = i
best_score = current_auc
del es, mcp, current_features
# no improvement
if worst_feature == -1:
break
selected_features.remove(worst_feature)
return selected_features
def get_train_test_val_split(arr):
x_train, x_test = train_test_split(arr, test_size=0.3, shuffle=False)
x_val, x_test = train_test_split(x_test, test_size=0.66, shuffle=False)
return x_train, x_test, x_val
if __name__ == '__main__':
init_seeds()
Y_train, _, Y_val = get_train_test_val_split(np.load('../data_hero_ds/y.npy'))
X_train, _, X_val = get_train_test_val_split(np.load('../data_hero_ds/X.npy'))
density = sum(Y_train) / len(Y_train)
ones_weight = (1 - density) / density
zero_weight = 1
print(ones_weight, density)
class_Weights = {0: zero_weight, 1: ones_weight}
fresh_weights_Path = '../feature_selections/FF_mlp_initial.h5'
model_ = get_mlp_model(fresh_weights_Path, None, len(X_train[0]))
save_path = '../feature_selections/FF_selection.npy'
features = run_feed_forward_selection(X_train, X_val, class_Weights, model_, fresh_weights_Path, Y_train,
Y_val, save_path)
np.save(save_path, features)
| 8,391 | 33.821577 | 118 | py |
GA_CARS_2020 | GA_CARS_2020-master/base_solution.py | import gc
import json
import os
import random
import time
from operator import attrgetter
from typing import List
import keras.backend as K
import numpy as np
import tensorflow as tf
from deap import base
from deap import creator
from deap import tools
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import load_model
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import cross_val_score, train_test_split
from tqdm import tqdm
tqdm.pandas()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # The GPU id to use, usually either "0" or "1";
os.environ["CUDA_VISIBLE_DEVICES"] = "1" # Do other imports now...
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
def create_output_folder(path: str) -> None:
"""
Creates an output folder in the given path
:param path:
:return:
"""
try:
os.mkdir(path)
except FileExistsError:
pass
def read_dict(name: str):
"""
:param name: path to a json dict
:return: Dict
"""
with open(name, 'r') as f:
return json.load(f)
def opt_tournament(individuals, k, auc_func, tournsize, fit_attr="fitness"):
"""
:param individuals: Population
:param k: How many individuals to select
:param auc_func: function to calculate the AUC of every individual
:param tournsize: How many individuals per tournament
:param fit_attr: key for selection
:return: k selected individuals
"""
chosen = []
max_ones = max(list(map(sum, individuals)))
for i in range(k):
comps = tools.selRandom(individuals, tournsize)
set_fits_for_inds(comps, max_ones, auc_func)
chosen.append(max(comps, key=attrgetter(fit_attr)))
return chosen
def single_point_mutation(individual):
"""
Currently not used
Flips a random bit in the given individual
:param individual: GA Individual
:return: Mutated Ind
"""
size = len(individual)
rand_index = random.randint(0, size - 1)
# 0 if value was 1 , and 1 if value was 0
rand_value = abs(individual[rand_index] - 1)
individual[rand_index] = rand_value
return individual,
def generate_child(ind1, ind2):
"""
N point Crossover between two individuals
:param ind1: GA Parent 1
:param ind2: GA Parent 2
:return: 2 new children
"""
child = []
# I assume that len(ind1)=len(ind2)
for i in range(len(ind1)):
if random.random() < 0.5:
child += [ind1[i]]
else:
child += [ind2[i]]
return child
# Generating two childern based on n_point_crossover
def n_point_crossover(ind1, ind2):
return generate_child(ind1, ind2), generate_child(ind1, ind2)
def five_point_crossover(ind1, ind2):
"""
5 Point crossover between two individuals
:param ind1: Parent-1
:param ind2: Parent-2
:return: Two generated children
"""
points = random.sample(range(1, len(ind1)), 4)
points.sort()
i1, i2, i3, i4 = points[0], points[1], points[2], points[3]
ind1[i1:i2], ind1[i3:i4], ind2[i1:i2], ind2[i3:i4] = ind2[i1:i2], ind2[i3:i4], ind1[i1:i2], ind1[i3:i4]
return ind1, ind2
def mixedCrossovers(ind1, ind2):
"""
Crossover used in this Expr.
50% chance for a 5-point crossover and 50% chance for n-point crossover.
Notice that family-vector is not used in the crossover (stays as it was).
:param ind1: Parent-1
:param ind2: Parent-2
:return:
"""
crossoverSelectionThres = 0.5
if random.random() < crossoverSelectionThres:
# 5 point crossover
return five_point_crossover(ind1, ind2)
else:
# n point crossover
return n_point_crossover(ind1, ind2)
def save_list(lst, path):
"""
Saves a list in text-form, so results can be read mid-expr.
:param lst: Some list
:param path: path to save
:return: None
"""
with open(path, "w") as f:
for s in lst:
f.write(str(s) + "\n")
def set_fits_for_inds(inds, max_ones, auc_func):
"""
Sets the fitness for the given generation of individuals
:param inds: GA Individuals
:param max_ones: Max number of 1's in the given generation
:param auc_func: Function that maps inds to their AUC score.
:return: None
"""
AUC_WEIGHT = 0.8
auc_mapper = lambda indv: 1 - auc_func(indv)
METRIC_INDS = list(map(auc_mapper, inds))
max_metric_inds = max(METRIC_INDS)
for ind, mse_ind in zip(inds, METRIC_INDS):
fitness_score = AUC_WEIGHT * (mse_ind / max_metric_inds) + (1 - AUC_WEIGHT) * (sum(ind) / max_ones)
ind.fitness.values = fitness_score,
class FeatureSelectionGA:
def __init__(self, n_generations: int, population_size: int, mu_for_sampling: float, sigma_for_sampling: float,
crossover_prob: float, mutation_prob: float, ind_length, random_state=42):
"""
:param n_generations: Number of generations to run
:param population_size: Population Size
:param mu_for_sampling: Used to initialize a normal distribution
:param sigma_for_sampling: Used to initialize a normal distribution
:param crossover_prob: Crossover probability
:param mutation_prob: Mutation probability
:param ind_length: Individual length
:param random_state: Initial random seed
"""
assert 0 <= crossover_prob <= 1, "ILLEGAL CROSSOVER PROBABILITY"
assert 0 <= mutation_prob <= 1, "ILLEGAL MUTATION PROBABILITY"
assert population_size > 0, "Population size must be a positive integer"
assert n_generations > 0, "Number of generations must be a positive integer"
assert mu_for_sampling > 0 and sigma_for_sampling > 0, 'Illegal selection params'
# params
self.n_generations = n_generations
self.population_size = population_size
self.mu_for_sampling = mu_for_sampling
self.sigma_for_sampling = sigma_for_sampling
self.crossover_prob = crossover_prob
self.mutation_prob = mutation_prob
self.random_state = random_state
# params
# GA toolbox
self.toolbox = base.Toolbox()
# Trained mlp, used in 'only_pred' method
self.fully_trained_mlp = None
# solving for minimum fitness
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
# probability to generate '1' in a random selection
self.one_prob = None
self.fitness_dict = {}
self.pop_dict = {}
self.std_gen = []
self.mean_gen = []
self.median_gen = []
self.max_gen = []
self.min_gen = []
self.time_gen = []
self.__set_ind_generator(max_ind_size=ind_length)
self.toolbox.register("individual", tools.initIterate, creator.Individual, self.toolbox.gen_ind)
self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)
self.toolbox.register("evaluate", self.evalInd)
self.toolbox.register("mate", mixedCrossovers)
self.toolbox.register("mutate", tools.mutUniformInt, low=0, up=1,
indpb=0.005) # between 0 and 1 , with probability of 0.005
self.toolbox.register("select", opt_tournament, tournsize=5)
def reset_stats(self):
"""
Resets saved-metrics.
:return: None
"""
self.fitness_dict = {}
self.pop_dict = {}
self.std_gen = []
self.mean_gen = []
self.median_gen = []
self.max_gen = []
self.min_gen = []
self.time_gen = []
def save_all_data(self, curr_generation, output_folder):
"""
Saves metrics for the given generations in a given path.
:param curr_generation: Current generation (int)
:param output_folder: folder to save data in.
:return: None
"""
written_dict = {str(k): v for k, v in self.fitness_dict.items()}
with open(output_folder + "fitness_dict.json", 'w') as file:
json.dump(written_dict, file)
with open(output_folder + "gens_dict.json", 'w') as file:
json.dump(self.pop_dict, file)
with open(output_folder + "am_alive", "w") as f:
f.write("Still running at generation :" + str(curr_generation) + "\n")
save_list(self.std_gen, output_folder + "std.txt")
save_list(self.mean_gen, output_folder + "mean.txt")
save_list(self.median_gen, output_folder + "median.txt")
save_list(self.max_gen, output_folder + "max.txt")
save_list(self.min_gen, output_folder + "min.txt")
save_list(self.time_gen, output_folder + "time.txt")
def __init_normal_dist(self, length_to_gen: int) -> List[int]:
"""
Creates a random individual with a given length
:param length_to_gen: How many '1/0' to generate
:return: A random binary array of the given size
"""
ret_arr = []
while len(ret_arr) < length_to_gen:
ret_arr += [int(random.random() <= self.one_prob)]
return ret_arr
def __set_ind_generator(self, max_ind_size) -> None:
"""
Sets the individual generator. This is done by sampling from a normal distribution (from the given __init__
params) the probability of '1' to generate. For example:
max_ind_size = 480
mu = 120
sigma = 40
Assuming 130 was randomly chosen , then one_probability = 0.2708 = (130 / 480) = (normal_dist_sample /
max_ind_size)
:param max_ind_size: Size of the individual to generate
:return: None
"""
normal_dist_sample = np.random.normal(self.mu_for_sampling, self.sigma_for_sampling, 1000)
self.one_prob = random.choice(normal_dist_sample) / max_ind_size
self.toolbox.register("gen_ind", self.__init_normal_dist, max_ind_size)
def __init_seeds(self) -> None:
"""
Resets seeds back to the initial seed
:return: None
"""
tf.random.set_seed(self.random_state)
np.random.seed(self.random_state)
random.seed(self.random_state)
os.environ['PYTHONHASHSEED'] = str(self.random_state)
def learn_only_pred(self, output_folder, context_val, y_val, classifier_path) -> None:
"""
:param output_folder: Where to write the output to
:param context_val: Validation Context [matrix]
:param y_val: Target values for validation
:param classifier_path: Path to a keras classifier that will be used
used to load fresh weights for every fitness calculation (for retraining the network from scratch)
:return:
"""
self.reset_stats()
self.__init_seeds()
create_output_folder(output_folder)
assert len(context_val) == len(y_val)
classifier = load_model(classifier_path)
get_auc_func = lambda ind: self.only_predict_get_auc(ind, context_val, y_val, classifier)
self.__run_gens(output_folder, get_auc_func)
def learn_only_pred_full_arch(self, output_folder: str, val_input, y_val, classifier_path: str) -> None:
self.reset_stats()
self.__init_seeds()
create_output_folder(output_folder)
input_size = 3
# item, user and context (3)
assert len(val_input) == input_size
classifier = load_model(classifier_path)
get_auc_func = lambda ind: self.only_predict_get_auc_full_arch(ind, val_input, y_val, classifier)
self.__run_gens(output_folder, get_auc_func)
def learn_full_train(self, output_folder, context_train, y_train, context_val, y_val, classifier_path,
fresh_weights_path):
"""
Starts the learning process for the full-train algorithm
:param output_folder: Folder to save results in
:param context_train: Training Data
:param y_train: Training labels
:param context_val: Validation Data
:param y_val: Validation Labels
:param classifier_path: Path to the keras MLP classifier
:param fresh_weights_path: Path to fresh-weights (non-trained) for the classifier.
:return: None
"""
self.reset_stats()
self.__init_seeds()
create_output_folder(output_folder)
assert len(context_val) == len(y_val) and len(context_train) == len(y_train)
density = sum(y_train) / len(y_train)
ones_weight = (1 - density) / density
zero_weight = 1
class_weights = {0: zero_weight, 1: ones_weight}
get_auc_func = lambda ind: self.train_mlp_net_get_auc(ind, context_train, y_train, context_val, y_val,
classifier_path, fresh_weights_path, class_weights)
self.__run_gens(output_folder, get_auc_func)
def learn_full_train_sklearn_model(self, output_folder, X, y, model_class, params, cv_split=3):
self.reset_stats()
self.__init_seeds()
create_output_folder(output_folder)
assert len(X) == len(y)
get_auc_func = lambda ind: self.train_model_get_auc(ind, X, y, model_class, params, cv_split)
self.__run_gens(output_folder, get_auc_func)
def train_mlp_net_get_auc(self, individual, context_train, y_train, context_val, y_val, classifier_path,
fresh_weights, class_weights):
"""
Trains an MLP network and returns the resulting AUC
:param individual: Ind to evaluate
:param context_train: Training data
:param y_train: Training labels
:param context_val: Validation Data
:param y_val: Validation Labels
:param classifier_path: Path to a MLP classifier
:param fresh_weights: Path to fresh weights
:param class_weights: Class weights for training
:return: AUC Score
"""
if self.is_in_dict(individual):
return self.get_fit(individual)
best_model_path = '../data/expr_running_best.h5'
es = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='min')
mcp = ModelCheckpoint(best_model_path, save_best_only=True, mode='min')
npIndividual = np.array(individual)
make_zero = lambda element: np.multiply(npIndividual, element)
x_val = np.array(list(map(make_zero, context_val)))
x_train = np.array(list(map(make_zero, context_train)))
classifier = load_model(classifier_path)
classifier.load_weights(fresh_weights)
classifier.fit(x_train, y_train, validation_data=(x_val, y_val),
epochs=100, batch_size=256, verbose=0, callbacks=[es, mcp], class_weight=class_weights)
best_classifier = load_model(best_model_path)
val_predict = best_classifier.predict(x_val, batch_size=256)
res_auc = roc_auc_score(y_val, val_predict)
self.save_fitness(individual, res_auc)
del best_classifier, es, mcp, x_val, x_train, classifier, val_predict
gc.collect()
K.clear_session()
tf.compat.v1.reset_default_graph()
return res_auc
def train_model_get_auc(self, individual, X, y, model_class, model_params, cv_split=3):
"""
Trains a given model class and returns the resulting AUC
:param model_class: Class of model to create
:param model_params: Params for the constructed model
:param cv_split: Cross validation folds
:param individual: Ind to evaluate
:param X: Training data
:param y: Training labels
:return: AUC Score
"""
model = model_class(**model_params)
if self.is_in_dict(individual):
return self.get_fit(individual)
npIndividual = np.array(individual)
make_zero = lambda element: np.multiply(npIndividual, element)
x_selection = np.array(list(map(make_zero, X)))
scores = cross_val_score(model, x_selection, y, cv=cv_split, scoring='roc_auc')
auc_score = np.mean(scores)
self.save_fitness(individual, auc_score)
del model, npIndividual, x_selection
return auc_score
def __run_gens(self, outputFolder, get_auc_func):
"""
:param outputFolder: Folder to save output data
:param get_auc_func: Function that receives an individual and returns his AUC score
:return: None
"""
CXPB, MUTPB = self.crossover_prob, self.mutation_prob
pop = self.toolbox.population(n=self.population_size)
start_time = time.time()
for g in tqdm(range(0, self.n_generations)):
self.pop_dict[g] = [list(x).copy() for x in pop]
# A new generation
print("-- Generation %i --" % g)
# Note : The fitness is calculated inside of the select function, for better performance.
# Meaning that the fitness is calculated only for the selected individuals in the tournament.
# Select the next generation individuals
offspring = self.toolbox.select(pop, len(pop), get_auc_func)
# Clone the selected individuals
offspring = list(map(self.toolbox.clone, offspring))
fits = [ind.fitness.values[0] for ind in offspring]
self.update_progress_arrays(offspring, fits, g, start_time, outputFolder)
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
self.toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
self.toolbox.mutate(mutant)
del mutant.fitness.values
# Changed to offsprings because fitness changes for the entire generation
pop[:] = offspring
def only_predict_get_auc_full_arch(self, individual, input_val, y_val, fully_trained_mlp) -> float:
"""
Calculates the fitness of a given individual by predicting on the fully trained arch
:param individual: Ind to predict on
:param input_val: a tuple of 3 elements - [item_val, user_val, context_val]
:param y_val: Validation Target
:param fully_trained_mlp: Fully trained arch classifier
:return: AUC of the prediction
"""
if self.is_in_dict(individual):
return self.get_fit(individual)
item_val, user_val, con_val = input_val
npIndividual = np.array(individual)
make_zero = lambda element: np.multiply(npIndividual, element)
x_cv = np.array(list(map(make_zero, con_val)))
input_val_ind = [item_val, user_val, x_cv]
val_predict = fully_trained_mlp.predict(input_val_ind)
res_auc = roc_auc_score(y_val, val_predict)
self.save_fitness(individual, res_auc)
return res_auc
def only_predict_get_auc(self, individual, context_val, y_val, fully_trained_mlp) -> float:
"""
Calculates the fitness of a given individual by predicting on the fully trained mlp
:param individual: Ind to predict on
:param context_val: Validation context
:param y_val: Validation Target
:param fully_trained_mlp: Fully trained classifier
:return: AUC of the prediction
"""
if self.is_in_dict(individual):
return self.get_fit(individual)
npIndividual = np.array(individual)
make_zero = lambda element: np.multiply(npIndividual, element)
x_cv = np.array(list(map(make_zero, context_val)))
val_predict = fully_trained_mlp.predict(x_cv)
res_auc = roc_auc_score(y_val, val_predict)
self.save_fitness(individual, res_auc)
return res_auc
def save_fitness(self, ind, val):
"""
:param ind: GA individual to save
:param val: metric to save
:return: None
"""
code = tuple(ind)
self.fitness_dict[code] = val
def is_in_dict(self, ind):
"""
:param ind: GA individual
:return: True iff the given individual has a value store in the cache (past metric)
"""
code = tuple(ind)
return code in self.fitness_dict
def get_fit(self, ind):
"""
:param ind: GA individual
:return: Saved metric
"""
code = tuple(ind)
if code in self.fitness_dict:
return self.fitness_dict[code]
"""
Not used on purpose, evaluation of individuals is done inside the selection function (for performance reasons).
Instead of evaluating the entire generation, we evaluate only the individuals that were randomly
selected in the tournament selection, thus saving unnecessary training of the MLP network .
"""
def evalInd(self, individual, METRIC):
pass
def update_progress_arrays(self, pop, fits, g, gen_start, outputpath):
"""
:param pop: current generation of GA individuals
:param fits: list of fitness scores
:param g: current generation number
:param gen_start: time that this generation started
:param outputpath: output to save the data
:return: None
"""
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x * x for x in fits)
std = abs(sum2 / length - mean ** 2) ** 0.5
min_feat = min(fits)
max_feat = max(fits)
median_feat = np.median(fits)
self.min_gen += [min_feat]
self.max_gen += [max_feat]
self.mean_gen += [mean]
self.std_gen += [std]
self.median_gen += [median_feat]
self.time_gen += [time.time() - gen_start]
self.save_all_data(g, outputpath)
def get_train_test_val_split(arr):
x_train, x_test = train_test_split(arr, test_size=0.3, shuffle=False)
x_val, x_test = train_test_split(x_test, test_size=0.66, shuffle=False)
return x_train, x_test, x_val
if __name__ == '__main__':
n_generations_ = 300
n_population_size = 100
sigma_for_init = 40
CXPB_ = 0.65
MUTPB_ = 1
ind_length_ = 661
mu_for_init = 0.25 * ind_length_
fs = FeatureSelectionGA(n_generations_, n_population_size, mu_for_init, sigma_for_init, CXPB_, MUTPB_,
ind_length_)
context_train_, _, context_val_ = get_train_test_val_split(np.load('../data_hero_ds/X.npy'))
y_train_, _, y_val_ = get_train_test_val_split(np.load('../data_hero_ds/y.npy'))
assert len(context_train_) == len(y_train_) and len(context_val_) == len(y_val_)
mlp_path = '../fresh_mlp_data/HEARO_fresh_mlp_200_100_50_1.h5'
fresh_weights_ = '../fresh_mlp_data/HEARO_fresh_weights_200_100_50_1.h5'
output_folder_ = '../RESULTS_HERO_DS/GA_FAMILY/GA_RES_20_5_FULL_TRAIN/'
fs.learn_full_train(output_folder_, context_train_, y_train_, context_val_, y_val_, mlp_path, fresh_weights_)
| 23,227 | 34.462595 | 115 | py |
GRAND-plus | GRAND-plus-main/model_mag.py | from __future__ import division
from __future__ import print_function
import sys
import time
import argparse
import numpy as np
import scipy.sparse as sp
from precompute import propagation
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
from torch_scatter import scatter
from utils.data_loader import load_data, accuracy
class MLP(nn.Module):
def __init__(self, num_features, num_classes, hidden_size, nlayers, use_bn, input_dropout, hidden_dropout, node_norm):
super().__init__()
if nlayers == 1:
self.embeds = torch.nn.Embedding(num_features, num_classes)
self.fcs = nn.ModuleList([])
self.bns = nn.ModuleList([])
else:
fcs = []
bns = []
self.embeds = torch.nn.Embedding(num_features, hidden_size)
for i in range(nlayers - 2):
fcs.append(nn.Linear(hidden_size, hidden_size, bias=True))
bns.append(nn.BatchNorm1d(hidden_size))
bns.append(nn.BatchNorm1d(hidden_size))
fcs.append(nn.Linear(hidden_size, num_classes, bias=True))
self.fcs = nn.ModuleList(fcs).cuda()
self.bns = nn.ModuleList(bns).cuda()
self.input_droprate = input_dropout
self.hidden_droprate = hidden_dropout
self.use_bn = use_bn
self.node_norm = node_norm
self.reset_param()
def reset_param(self):
for lin in self.fcs:
lin.reset_parameters()
def normalize(self, embedding):
return embedding / (1e-12 + torch.norm(embedding, p=2, dim=-1, keepdim=True))
def emb(self, attr_idx, node_idx, attr_data):
feat_embeds = self.embeds(attr_idx).cuda()
feat_embeds = F.dropout(feat_embeds, self.input_droprate, training=self.training)
dim_size = node_idx[-1] + 1
node_embeds = scatter(feat_embeds * attr_data[:, None], node_idx[:,None], dim=0, dim_size=dim_size, reduce='sum')
node_s_sum = scatter(attr_data[:, None], node_idx[:, None], dim=0, dim_size=dim_size, reduce='sum')
embs = node_embeds / (node_s_sum + 1e-10)
return embs
def forward(self, X):
embs = X
for fc, bn in zip(self.fcs, self.bns):
embs = F.relu(embs)
if self.node_norm:
embs = self.normalize(embs)
if self.use_bn:
embs = bn(embs)
embs = F.dropout(embs, self.hidden_droprate, training=self.training)
embs = fc(embs)
return embs
class Grand_Plus(nn.Module):
def __init__(self, num_features, num_classes, hidden_size, nlayers, use_bn, input_dropout, hidden_dropout, dropnode_rate=0.5, node_norm=False):
super().__init__()
self.mlp = MLP(num_features, num_classes,
hidden_size, nlayers, use_bn, input_dropout, hidden_dropout, node_norm = node_norm)
self.dropnode_rate = dropnode_rate
def forward(self, X):
logits = self.mlp(X)
return logits
def random_prop(self, feats, mat_scores, mat_idx, dropnode_rate):
mat_scores = F.dropout(mat_scores, p=dropnode_rate, training=self.training)
propagated_logits = scatter(feats * mat_scores[:, None], mat_idx[:, None],
dim=0, dim_size=mat_idx[-1] + 1, reduce='sum')
mat_sum_s = scatter(mat_scores[:,None], mat_idx[:,None],
dim=0, dim_size=mat_idx[-1] + 1, reduce='sum')
return propagated_logits / (mat_sum_s + 1e-12)
def emb(self, attr_idx, node_idx, attr_data, cuda= True):
feat_embeds = self.mlp.emb(attr_idx, node_idx, attr_data)
return feat_embeds
def iterate_minibatches_listinputs(index, batch_size, shuffle=False):
numSamples = len(index)
if shuffle:
indices = np.arange(numSamples)
np.random.shuffle(indices)
for start_idx in range(0, numSamples, batch_size):
if start_idx + batch_size > numSamples:
end_idx = numSamples
else:
end_idx = start_idx + batch_size
if shuffle:
excerpt = indices[start_idx:end_idx]
else:
excerpt = slice(start_idx, end_idx)
yield index[excerpt]
def sample_unlabel(idx_unlabel, unlabel_batch_size, shuffle=False):
unlabel_numSamples = idx_unlabel.shape[0]
indices = np.arange(unlabel_numSamples)
if shuffle:
np.random.shuffle(indices)
excerpt = indices[:unlabel_batch_size]
return idx_unlabel[excerpt]
def clip_grad_norm(params, max_norm):
if max_norm > 0:
return torch.nn.utils.clip_grad_norm_(params, max_norm)
else:
return torch.sqrt(sum(p.grad.data.norm() ** 2 for p in params if p.grad is not None))
def consis_loss(args, logps, tem, conf):
ps = [torch.exp(p) for p in logps]
sum_p = 0.
for p in ps:
sum_p = sum_p + p
avg_p = sum_p/len(ps)
sharp_p = (torch.pow(avg_p, 1./tem) / torch.sum(torch.pow(avg_p, 1./tem), dim=1, keepdim=True)).detach()
loss = 0.
for p in ps:
if args.loss == 'kl':
loss += torch.mean((-sharp_p * torch.log(p)).sum(1)[avg_p.max(1)[0] > conf])
elif args.loss == 'l2':
loss += torch.mean((p-sharp_p).pow(2).sum(1)[avg_p.max(1)[0] > conf])
else:
raise ValueError(f"Unknown loss type: {args.loss}")
loss = loss/len(ps)
return loss
def valid(args, model, topk_adj, features, idx_val, labels, batch_size=100):
model.eval()
outputs = []
for idx in iterate_minibatches_listinputs(idx_val, batch_size):
val_topk_adj = topk_adj[idx]
source_idx, neighbor_idx = val_topk_adj.nonzero()
mat_scores = val_topk_adj.data
val_feat = features[neighbor_idx]
mat_scores = torch.tensor(mat_scores, dtype=torch.float32)
source_idx = torch.tensor(source_idx, dtype=torch.long)
y_val = labels[idx]
node_idx, attr_idx = val_feat.nonzero()
attr_data = val_feat.data
attr_data = torch.tensor(attr_data, dtype=torch.float32)
node_idx = torch.tensor(node_idx, dtype=torch.long)
attr_idx = torch.tensor(attr_idx, dtype=torch.long)
if args.cuda:
mat_scores = mat_scores.cuda()
source_idx = source_idx.cuda()
node_idx = node_idx.cuda()
attr_data = attr_data.cuda()
with torch.no_grad():
batch_emb = model.emb(attr_idx, node_idx, attr_data)
batch_feat_aug = model.random_prop(batch_emb, mat_scores, source_idx, args.dropnode_rate)#.detach()
output = model(batch_feat_aug)
output = torch.log_softmax(output, dim=-1)
outputs.append(output)
outputs = torch.cat(outputs, dim=0)
loss_test = F.nll_loss(outputs, labels[idx_val])
acc_test = accuracy(outputs, labels[idx_val])
return loss_test.item(), acc_test.item()
def get_local_logits(model, attr_mat, batch_size=10000):
device = next(model.parameters()).device
nnodes = attr_mat.shape[0]
logits = []
with torch.set_grad_enabled(False):
for i in range(0, nnodes, batch_size):
batch_attr = torch.FloatTensor(attr_mat[i:i + batch_size]).cuda()
logits.append(model(batch_attr).to('cpu').numpy())
logits = np.row_stack(logits)
return logits
def predict(args, adj, features_np, model, idx_test, labels_org, mode='ppr', batch_size_logits=10000):
model.eval()
nprop = args.order
embs, feats = [], []
nnodes = features_np.shape[0]
with torch.set_grad_enabled(False):
for i in range(0, nnodes, batch_size_logits):
batch_feat = features_np[i: i + batch_size_logits]
node_idx, attr_idx = batch_feat.nonzero()
attr_data = batch_feat.data
attr_data = torch.tensor(attr_data, dtype=torch.float32).cuda()
node_idx = torch.tensor(node_idx, dtype=torch.long).cuda()
attr_idx = torch.tensor(attr_idx, dtype=torch.long)
batch_embs = model.emb(attr_idx, node_idx, attr_data).to('cpu').numpy()
embs.append(batch_embs)
embs = np.row_stack(embs)
if mode == 'ppr':
embs = args.alpha * embs
embs_prop = embs.copy()
deg_row = adj.sum(1).A1
deg_row_inv_alpha = np.asarray((1 - args.alpha) / np.maximum(deg_row, 1e-12))
for _ in range(nprop):
embs = np.multiply(deg_row_inv_alpha[:, None], (adj.dot(embs)))
embs_prop += embs
feats.append(embs_prop.copy())
elif mode == 'avg':
embs_prop = embs.copy()
deg_row = adj.sum(1).A1
deg_row_inv = 1 / np.maximum(deg_row, 1e-12)
for _ in range(nprop):
embs = np.multiply(deg_row_inv[:,None], (adj.dot(embs)))
embs_prop += embs
embs_prop = embs_prop/(nprop + 1)
feats.append(embs_prop)
elif mode == 'single':
deg_row = adj.sum(1).A1
deg_row_inv = 1 / np.maximum(deg_row, 1e-12)
for _ in range(nprop):
embs = np.multiply(deg_row_inv[:,None], (adj.dot(embs)))
feats.append(embs)
else:
raise ValueError(f"Unknown propagation mode: {mode}")
for feat in feats:
logits = get_local_logits(
model.mlp, feat, batch_size_logits)
preds = logits.argmax(1)
correct = np.equal(preds[idx_test],
labels_org.cpu().numpy()[idx_test]).astype(float)
correct = correct.sum()
acc_test = correct / len(idx_test)
print(acc_test)
return acc_test
def main_mag(args):
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.cuda.set_device(args.cuda_device)
device = args.cuda_device
torch.manual_seed(args.seed2)
if args.cuda:
torch.cuda.manual_seed(args.seed2)
np.random.seed(args.seed2)
dataset = args.dataset
adj, features, labels, idx_train, idx_val, idx_test, _ = load_data(dataset_str=dataset, split_seed=args.seed1)
unlabel_num = args.unlabel_num
time_s1 = time.time()
adj = adj + sp.eye(features.shape[0])
idx_sample = np.random.permutation(
idx_test)[:unlabel_num]
idx_unlabel = np.concatenate([idx_val, idx_sample])
idx_train_unlabel = np.concatenate(
[idx_train, idx_unlabel])
indptr = np.array(adj.indptr, dtype=np.int32)
indices = np.array(adj.indices, dtype=np.int32)
graph = propagation.Graph(indptr, indices, args.seed2)
row_idx = np.zeros((idx_train_unlabel.shape[0] * args.top_k), dtype=np.int32)
col_idx = np.zeros((idx_train_unlabel.shape[0] * args.top_k), dtype=np.int32)
mat_value = np.zeros((idx_train_unlabel.shape[0] * args.top_k), dtype=np.float64)
if args.prop_mode == 'avg':
coef = list(np.ones(args.order + 1, dtype=np.float64))
elif args.prop_mode == 'ppr':
coef = [args.alpha]
for i in range(args.order):
coef.append(coef[-1] * (1-args.alpha))
elif args.prop_mode == 'single':
coef = list(np.zeros(args.order + 1, dtype=np.float64))
coef[-1] = 1.
else:
raise ValueError(f"Unknown propagation mode: {args.prop_mode}")
print(f"propagation matrix: {args.prop_mode}")
coef = np.asarray(coef) / np.sum(coef)
graph.gfpush_omp(idx_train_unlabel, row_idx, col_idx, mat_value, coef, args.rmax, args.top_k)
#print(row_idx.astype(np.int32).max(), col_idx.astype(np.int32).max(), features.shape[0])
topk_adj = sp.coo_matrix((mat_value, (row_idx, col_idx)), (
features.shape[0], features.shape[0]))
topk_adj = topk_adj.tocsr()
time_preprocessing = time.time() - time_s1
print(f"preprocessing done, time: {time_preprocessing}")
features_np = features
n_class = labels.shape[1]
labels = torch.LongTensor(np.argmax(labels, -1))
model = Grand_Plus(num_features=features.shape[1],
num_classes=labels.max().item() + 1,
hidden_size=args.hidden,
nlayers=args.nlayers,
use_bn = args.use_bn,
input_dropout=args.input_droprate,
hidden_dropout=args.hidden_droprate,
dropnode_rate=args.dropnode_rate,
node_norm = args.node_norm)
optimizer = optim.Adam(model.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
if args.cuda:
labels = labels.cuda()
t_begin = time.time()
loss_values = []
acc_values = []
batch_time = []
bad_counter = 0
loss_best = np.inf
acc_best = 0.0
loss_mn = np.inf
acc_mx = 0.0
best_epoch = 0
num_batch = 0
for epoch in range(args.epochs):
for train_index in iterate_minibatches_listinputs(idx_train, batch_size=args.batch_size, shuffle=True):
batch_t_s = time.time()
model.train()
optimizer.zero_grad()
unlabel_index_batch = sample_unlabel(
idx_sample, args.unlabel_batch_size, shuffle=True)
batch_index = np.concatenate((train_index, unlabel_index_batch))
batch_topk_adj = topk_adj[batch_index]
source_idx, neighbor_idx = batch_topk_adj.nonzero()
mat_scores = batch_topk_adj.data
batch_feat = features[neighbor_idx]#.to(device)
mat_scores = torch.tensor(mat_scores, dtype=torch.float32).to(device)
source_idx = torch.tensor(source_idx, dtype=torch.long).to(device)
y_train_batch = labels[train_index]
node_idx, attr_idx = batch_feat.nonzero()
attr_data = batch_feat.data
attr_data = torch.tensor(attr_data, dtype=torch.float32).to(device)
node_idx = torch.tensor(node_idx, dtype=torch.long).to(device)
attr_idx = torch.tensor(attr_idx, dtype=torch.long)
output_list = []
K = args.sample
loss_train = 0.
for i in range(K):
batch_emb = model.emb(attr_idx, node_idx, attr_data)
batch_feat_aug = model.random_prop(batch_emb, mat_scores, source_idx, args.dropnode_rate)#.detach()
output_aug = model(batch_feat_aug)
output_aug = torch.log_softmax(output_aug, dim=-1)
output_list.append(output_aug[len(train_index):])
loss_train += F.nll_loss(output_aug[:len(train_index)], y_train_batch)
loss_train = loss_train/K
args.conf = 2./n_class
loss_train += min(1.0 , float(num_batch)/args.warmup) * args.lam * consis_loss(args, output_list, args.tem, args.conf)
acc_train = accuracy(output_aug[:len(train_index)], y_train_batch)
loss_train.backward()
grad_norm = clip_grad_norm(model.parameters(), args.clip_norm)
optimizer.step()
batch_time.append(time.time() - batch_t_s)
if num_batch % args.eval_batch == 0:
loss_val, acc_val = valid(
args, model, topk_adj, features, idx_val, labels)
loss_values.append(loss_val)
acc_values.append(acc_val)
if args.visible:
print(
f'epoch {epoch}, batch {num_batch}, validation loss {loss_val}, validation acc {acc_val}')
if acc_values[-1] >= acc_mx:
if args.stop_mode == 'acc' or (args.stop_mode == 'both' and loss_values[-1]<= loss_mn):
loss_mn = loss_values[-1]
acc_mx = acc_values[-1]
best_epoch = epoch
best_batch = num_batch
torch.save(model.state_dict(),
f"{args.model}_{dataset}.pkl")
bad_counter = 0
else:
bad_counter += 1
if bad_counter >= args.patience:
if args.visible:
print(
f'Early stop! Min loss: {loss_mn}, Max accuracy: {acc_mx}, num batch: {num_batch} num epoch: {epoch}')
break
num_batch += 1
if bad_counter >= args.patience:
break
if args.visible:
print(
f'Optimization Finished! Min loss: {loss_mn}, Max accuracy: {acc_mx}, num batch: {num_batch} num epoch: {epoch}')
if args.visible:
print('Loading {}th epoch'.format(best_epoch))
model.load_state_dict(torch.load(f"{args.model}_{dataset}.pkl"))
test_acc = predict(args, adj, features_np, model, idx_test, labels, mode = args.prop_mode)
t_total = time.time() - time_s1
print("Total time elapsed: {:.4f}s".format(t_total))
print(f"Test Accuracy {test_acc}")
return t_total, test_acc, np.mean(batch_time), num_batch
| 17,036 | 39.954327 | 147 | py |
GRAND-plus | GRAND-plus-main/model.py | from __future__ import division
from __future__ import print_function
import sys
sys.path.append("..")
import time
import argparse
import numpy as np
import scipy.sparse as sp
from precompute import propagation
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
from torch_scatter import scatter
from utils.data_loader import load_data, accuracy, totensor
class MLP(nn.Module):
def __init__(self, num_features, num_classes, hidden_size, nlayers, use_bn, input_dropout, hidden_dropout, node_norm):
super().__init__()
if nlayers == 1:
fcs = [nn.Linear(num_features, num_classes, bias=True)]
bns = [nn.BatchNorm1d(num_features)]
else:
fcs = [nn.Linear(num_features, hidden_size, bias=True)]
bns = [nn.BatchNorm1d(num_features)]
for i in range(nlayers - 2):
fcs.append(nn.Linear(hidden_size, hidden_size, bias=True))
bns.append(nn.BatchNorm1d(hidden_size))
bns.append(nn.BatchNorm1d(hidden_size))
fcs.append(nn.Linear(hidden_size, num_classes, bias=True))
self.fcs = nn.ModuleList(fcs)
self.bns = nn.ModuleList(bns)
self.input_droprate = input_dropout
self.hidden_droprate = hidden_dropout
self.use_bn = use_bn
self.node_norm = node_norm
self.reset_param()
def reset_param(self):
for lin in self.fcs:
lin.reset_parameters()
def normalize(self, embedding):
return embedding / (1e-12 + torch.norm(embedding, p=2, dim=-1, keepdim=True))
def forward(self, X):
if self.node_norm:
X = self.normalize(X).detach()
if self.use_bn:
X = self.bns[0](X)
embs = F.dropout(X, self.input_droprate, training=self.training)#.detach()
embs = self.fcs[0](embs)
for fc, bn in zip(self.fcs[1:], self.bns[1:]):
embs = F.relu(embs)
if self.node_norm:
embs = self.normalize(embs)
if self.use_bn:
embs = bn(embs)
embs = F.dropout(embs, self.hidden_droprate, training=self.training)
embs = fc(embs)
return embs
class Grand_Plus(nn.Module):
def __init__(self, num_features, num_classes, hidden_size, nlayers, use_bn, input_dropout, hidden_dropout, dropnode_rate=0.5, node_norm=False):
super().__init__()
self.mlp = MLP(num_features, num_classes,
hidden_size, nlayers, use_bn, input_dropout, hidden_dropout, node_norm = node_norm)
self.dropnode_rate = dropnode_rate
def forward(self, X):
logits = self.mlp(X)
return logits
def random_prop(self, feats, mat_scores, mat_idx, dropnode_rate):
mat_scores = F.dropout(mat_scores, p=dropnode_rate, training=self.training)
propagated_logits = scatter(feats * mat_scores[:, None], mat_idx[:, None],
dim=0, dim_size=mat_idx[-1] + 1, reduce='sum')
mat_sum_s = scatter(mat_scores[:,None], mat_idx[:,None],
dim=0, dim_size=mat_idx[-1] + 1, reduce='sum')
return propagated_logits / (mat_sum_s + 1e-12)
def iterate_minibatches_listinputs(index, batch_size, shuffle=False):
numSamples = len(index)
if shuffle:
indices = np.arange(numSamples)
np.random.shuffle(indices)
for start_idx in range(0, numSamples, batch_size):
if start_idx + batch_size > numSamples:
end_idx = numSamples
else:
end_idx = start_idx + batch_size
if shuffle:
excerpt = indices[start_idx:end_idx]
else:
excerpt = slice(start_idx, end_idx)
yield index[excerpt]
def sample_unlabel(idx_unlabel, unlabel_batch_size, shuffle=False):
unlabel_numSamples = idx_unlabel.shape[0]
indices = np.arange(unlabel_numSamples)
if shuffle:
np.random.shuffle(indices)
excerpt = indices[:unlabel_batch_size]
return idx_unlabel[excerpt]
def clip_grad_norm(params, max_norm):
if max_norm > 0:
return torch.nn.utils.clip_grad_norm_(params, max_norm)
else:
return torch.sqrt(sum(p.grad.data.norm() ** 2 for p in params if p.grad is not None))
def consis_loss(args, logps, tem, conf):
ps = [torch.exp(p) for p in logps]
sum_p = 0.
for p in ps:
sum_p = sum_p + p
avg_p = sum_p/len(ps)
sharp_p = (torch.pow(avg_p, 1./tem) / torch.sum(torch.pow(avg_p, 1./tem), dim=1, keepdim=True)).detach()
loss = 0.
for p in ps:
if args.loss == 'kl':
loss += torch.mean((-sharp_p * torch.log(p)).sum(1)[avg_p.max(1)[0] > conf])
elif args.loss == 'l2':
loss += torch.mean((p-sharp_p).pow(2).sum(1)[avg_p.max(1)[0] > conf])
else:
raise ValueError(f"Unknown loss type: {args.loss}")
loss = loss/len(ps)
return loss
def valid(args, model, topk_adj, features, idx_val, labels, batch_size=10000):
model.eval()
outputs = []
for idx in iterate_minibatches_listinputs(idx_val, batch_size):
val_topk_adj = topk_adj[idx]
source_idx, neighbor_idx = val_topk_adj.nonzero()
mat_scores = val_topk_adj.data
val_feat = features[neighbor_idx]
mat_scores = torch.tensor(mat_scores, dtype=torch.float32)
source_idx = torch.tensor(source_idx, dtype=torch.long)
y_val = labels[idx]
if args.cuda:
val_feat = val_feat.cuda()
mat_scores = mat_scores.cuda()
source_idx = source_idx.cuda()
with torch.no_grad():
val_feat = model.random_prop(val_feat, mat_scores, source_idx, args.dropnode_rate).detach()
output = model(val_feat)
output = torch.log_softmax(output, dim=-1)
outputs.append(output)
outputs = torch.cat(outputs, dim=0)
loss_test = F.nll_loss(outputs, labels[idx_val])
acc_test = accuracy(outputs, labels[idx_val])
return loss_test.item(), acc_test.item()
def get_local_logits(model, attr_mat, batch_size=10000):
device = next(model.parameters()).device
nnodes = attr_mat.shape[0]
logits = []
with torch.set_grad_enabled(False):
for i in range(0, nnodes, batch_size):
batch_attr = torch.FloatTensor(attr_mat[i:i + batch_size]).to(device)
logits.append(model(batch_attr).to('cpu').numpy())
logits = np.row_stack(logits)
return logits
def predict(args, adj, features_np, model, idx_test, labels_org, mode='ppr', batch_size_logits=10000):
model.eval()
nprop = args.order
feats = []
if mode == 'ppr':
features_np = args.alpha * features_np
features_np_prop = features_np.copy()
deg_row = adj.sum(1).A1
deg_row_inv_alpha = np.asarray((1 - args.alpha) / np.maximum(deg_row, 1e-12))
for _ in range(nprop):
features_np = np.multiply(deg_row_inv_alpha[:, None], (adj.dot(features_np)))
features_np_prop += features_np
feats.append(features_np_prop.copy())
elif mode == 'avg':
features_np_prop = features_np.copy()
deg_row = adj.sum(1).A1
deg_row_inv = 1 / np.maximum(deg_row, 1e-12)
for _ in range(nprop):
features_np = np.multiply(deg_row_inv[:,None], (adj.dot(features_np)))
features_np_prop += features_np
features_np_prop = features_np_prop/(nprop + 1)
feats.append(features_np_prop)
elif mode == 'single':
deg_row = adj.sum(1).A1
deg_row_inv = 1 / np.maximum(deg_row, 1e-12)
for _ in range(nprop):
features_np = np.multiply(deg_row_inv[:,None], (adj.dot(features_np)))
features_np_prop = features_np
feats = [features_np_prop]
else:
raise ValueError(f"Unknown propagation mode: {mode}")
for feat in feats:
# print(feat[0])
logits = get_local_logits(
model.mlp, feat, batch_size_logits)
preds = logits.argmax(1)
correct = np.equal(preds[idx_test],
labels_org.cpu().numpy()[idx_test]).astype(float)
correct = correct.sum()
acc_test = correct / len(idx_test)
print(acc_test)
return acc_test
def main(args):
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.cuda.set_device(args.cuda_device)
device = args.cuda_device
torch.manual_seed(args.seed2)
if args.cuda:
torch.cuda.manual_seed(args.seed2)
np.random.seed(args.seed2)
dataset = args.dataset
adj, features, labels, idx_train, idx_val, idx_test, _ = load_data(
dataset_str=dataset, split_seed=args.seed1)
unlabel_num = args.unlabel_num
time_s1 = time.time()
adj = adj + sp.eye(features.shape[0])
idx_sample = np.random.permutation(
idx_test)[:unlabel_num]
idx_unlabel = np.concatenate([idx_val, idx_sample])
idx_train_unlabel = np.concatenate(
[idx_train, idx_unlabel])
indptr = np.array(adj.indptr, dtype=np.int32)
indices = np.array(adj.indices, dtype=np.int32)
graph = propagation.Graph(indptr, indices, args.seed2)
row_idx = np.zeros((idx_train_unlabel.shape[0] * args.top_k), dtype=np.int32)
col_idx = np.zeros((idx_train_unlabel.shape[0] * args.top_k), dtype=np.int32)
mat_value = np.zeros((idx_train_unlabel.shape[0] * args.top_k), dtype=np.float64)
if args.prop_mode == 'avg':
coef = list(np.ones(args.order + 1, dtype=np.float64))
elif args.prop_mode == 'ppr':
coef = [args.alpha]
for i in range(args.order):
coef.append(coef[-1] * (1-args.alpha))
elif args.prop_mode == 'single':
coef = list(np.zeros(args.order + 1, dtype=np.float64))
coef[-1] = 1.0
else:
raise ValueError(f"Unknown propagation mode: {args.prop_mode}")
print(f"propagation matrix: {args.prop_mode}")
coef = np.asarray(coef) / np.sum(coef)
graph.gfpush_omp(idx_train_unlabel, row_idx, col_idx, mat_value, coef, args.rmax, args.top_k)
topk_adj = sp.coo_matrix((mat_value, (row_idx, col_idx)), (
features.shape[0], features.shape[0]))
topk_adj = topk_adj.tocsr()
time_preprocessing = time.time() - time_s1
print(f"preprocessing done, time: {time_preprocessing}")
features_np = features
features, labels = totensor(features, labels)
n_class = labels.max().item() + 1
model = Grand_Plus(num_features=features.shape[1],
num_classes=n_class,
hidden_size=args.hidden,
nlayers=args.nlayers,
use_bn = args.use_bn,
input_dropout=args.input_droprate,
hidden_dropout=args.hidden_droprate,
dropnode_rate=args.dropnode_rate,
node_norm = args.node_norm)
optimizer = optim.Adam(model.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
labels = labels.cuda()
loss_values = []
acc_values = []
batch_time = []
bad_counter = 0
loss_mn = np.inf
acc_mx = 0.0
best_epoch = 0
num_batch = 0
for epoch in range(args.epochs):
for train_index in iterate_minibatches_listinputs(idx_train, batch_size=args.batch_size, shuffle=True):
batch_t_s = time.time()
model.train()
optimizer.zero_grad()
unlabel_index_batch = sample_unlabel(
idx_sample, args.unlabel_batch_size, shuffle=True)
batch_index = np.concatenate((train_index, unlabel_index_batch))
batch_topk_adj = topk_adj[batch_index]
source_idx, neighbor_idx = batch_topk_adj.nonzero()
mat_scores = batch_topk_adj.data
batch_feat = features[neighbor_idx].to(device)
mat_scores = torch.tensor(mat_scores, dtype=torch.float32).to(device)
source_idx = torch.tensor(source_idx, dtype=torch.long).to(device)
y_train_batch = labels[train_index]
output_list = []
K = args.sample
loss_train = 0.
for i in range(K):
batch_feat_aug = model.random_prop(batch_feat, mat_scores, source_idx, args.dropnode_rate).detach()
output_aug = model(batch_feat_aug)
output_aug = torch.log_softmax(output_aug, dim=-1)
output_list.append(output_aug[len(train_index):])
loss_train += F.nll_loss(output_aug[:len(train_index)], y_train_batch)
loss_train = loss_train/K
args.conf = 2./n_class
loss_train += min(args.lam, (args.lam * float(num_batch)/args.warmup)) * consis_loss(args, output_list, args.tem, args.conf)
acc_train = accuracy(output_aug[:len(train_index)], y_train_batch)
loss_train.backward()
grad_norm = clip_grad_norm(model.parameters(), args.clip_norm)
optimizer.step()
batch_time.append(time.time() - batch_t_s)
if num_batch % args.eval_batch == 0:
loss_val, acc_val = valid(
args, model, topk_adj, features, idx_val, labels, args.batch_size)
loss_values.append(loss_val)
acc_values.append(acc_val)
if args.visible:
print(
f'epoch {epoch}, batch {num_batch}, validation loss {loss_val}, validation acc {acc_val}')
if acc_values[-1] >= acc_mx:
if args.stop_mode == 'acc' or (args.stop_mode == 'both' and loss_values[-1]<= loss_mn):
loss_mn = loss_values[-1]
acc_mx = acc_values[-1]
best_epoch = epoch
best_batch = num_batch
torch.save(model.state_dict(),
f"{args.model}_{dataset}.pkl")
bad_counter = 0
else:
bad_counter += 1
if bad_counter >= args.patience:
if args.visible:
print(
f'Early stop! Min loss: {loss_mn}, Max accuracy: {acc_mx}, num batch: {num_batch} num epoch: {epoch}')
break
num_batch += 1
if bad_counter >= args.patience:
break
if args.visible:
print(
f'Optimization Finished! Min loss: {loss_mn}, Max accuracy: {acc_mx}, num batch: {num_batch} num epoch: {epoch}')
if args.visible:
print('Loading {}th epoch'.format(best_epoch))
model.load_state_dict(torch.load(f"{args.model}_{dataset}.pkl"))
test_acc = predict(args, adj, features_np, model, idx_test, labels, mode = args.prop_mode)
t_total = time.time() - time_s1
print("Total time elapsed: {:.4f}s".format(t_total))
print(f"Test Accuracy {test_acc}")
return t_total, test_acc, np.mean(batch_time), num_batch
| 15,190 | 39.401596 | 147 | py |
GRAND-plus | GRAND-plus-main/utils/data_loader.py | import sys
import pickle as pkl
import networkx as nx
import numpy as np
import scipy.sparse as sp
from sklearn.preprocessing import StandardScaler
import torch
from utils.make_dataset import get_dataset, get_train_val_test_split
import os
from sklearn import metrics
def load_data(dataset_str='cora', split_seed=0, renormalize=False):
"""Load data."""
if os.path.exists("dataset/{}".format(dataset_str)):
path = "dataset/{}".format(dataset_str)
else:
path = "dataset/"
if dataset_str == 'aminer':
adj = pkl.load(open(os.path.join(path, "{}.adj.sp.pkl".format(dataset_str)), "rb"))
features = pkl.load(
open(os.path.join(path, "{}.features.pkl".format(dataset_str)), "rb"))
labels = pkl.load(
open(os.path.join(path, "{}.labels.pkl".format(dataset_str)), "rb"))
random_state = np.random.RandomState(split_seed)
idx_train, idx_val, idx_test = get_train_val_test_split(
random_state, labels, train_examples_per_class=20, val_examples_per_class=30)
idx_unlabel = np.concatenate((idx_val, idx_test))
features = col_normalize(features)
elif dataset_str in ['ms_academic_cs', 'ms_academic_phy', 'amazon_electronics_photo', 'amazon_electronics_computers', 'cora_full']:
datapath = os.path.join(path, dataset_str + '.npz')
adj, features, labels = get_dataset(
dataset_str, datapath, True, train_examples_per_class=20, val_examples_per_class=30)
random_state = np.random.RandomState(split_seed)
idx_train, idx_val, idx_test = get_train_val_test_split(
random_state, labels, train_examples_per_class=20, val_examples_per_class=30)
idx_unlabel = np.concatenate((idx_val, idx_test))
features = features.todense()
elif dataset_str in ['reddit']:
adj = sp.load_npz(os.path.join(path, '{}_adj.npz'.format(dataset_str)))
features = np.load(os.path.join(path, '{}_feat.npy'.format(dataset_str)))
labels = np.load(os.path.join(path, '{}_labels.npy'.format(dataset_str)))
print(labels.shape, list(np.sum(labels, axis=0)))
random_state = np.random.RandomState(split_seed)
idx_train, idx_val, idx_test = get_train_val_test_split(
random_state, labels, train_examples_per_class=20, val_examples_per_class=30)
idx_unlabel = np.concatenate((idx_val, idx_test))
print(dataset_str, features.shape)
elif dataset_str in ['Amazon2M']:
adj = sp.load_npz(os.path.join(path, '{}_adj.npz'.format(dataset_str)))
features = np.load(os.path.join(path, '{}_feat.npy'.format(dataset_str)))
labels = np.load(os.path.join(path, '{}_labels.npy'.format(dataset_str)))
print(labels.shape, list(np.sum(labels, axis=0)))
random_state = np.random.RandomState(split_seed)
class_num = labels.shape[1]
idx_train, idx_val, idx_test = get_train_val_test_split(random_state, labels, train_size=20* class_num, val_size=30 * class_num)
idx_unlabel = np.concatenate((idx_val, idx_test))
elif dataset_str in ['mag_scholar_c', 'mag_scholar_f']:
data_set = np.load(path + dataset_str + '.npz')
adj_data = data_set['adj_matrix.data']
adj_indices = data_set['adj_matrix.indices']
adj_indptr = data_set['adj_matrix.indptr']
adj_shape = data_set['adj_matrix.shape']
feat_data = data_set['attr_matrix.data']
feat_indices = data_set['attr_matrix.indices']
feat_indptr = data_set['attr_matrix.indptr']
feat_shape = data_set['attr_matrix.shape']
labels_num = data_set['labels']
features = sp.csr_matrix((feat_data, feat_indices, feat_indptr), shape=feat_shape)
adj = sp.csr_matrix((adj_data, adj_indices, adj_indptr), shape=adj_shape)
random_state = np.random.RandomState(split_seed)
label_count = labels_num.max() + 1
labels = np.eye(label_count)[labels_num]
idx_train, idx_val, idx_test = get_train_val_test_split(random_state, labels, train_examples_per_class=20, val_examples_per_class=30)
idx_unlabel = np.concatenate((idx_val, idx_test))
elif dataset_str in ['cora', 'citeseer', 'pubmed']:
if os.path.exists("dataset/citation"):
path = 'dataset/citation'
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open(os.path.join(path,"ind.{}.{}".format(dataset_str, names[i])), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file(
os.path.join(path, "ind.{}.test.index".format(dataset_str)))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
test_idx_range_full = range(
min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
# normalize
features = normalize(features)
features = features.todense()
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_train = np.arange(len(y))
idx_val = np.arange(len(y), len(y)+500)
idx_test = np.asarray(test_idx_range.tolist())
idx_unlabel = np.arange(len(y), labels.shape[0])
else:
raise NotImplementedError
if renormalize:
adj = adj + sp.eye(adj.shape[0])
D1 = np.array(adj.sum(axis=1))**(-0.5)
D2 = np.array(adj.sum(axis=0))**(-0.5)
D1 = sp.diags(D1[:, 0], format='csr')
D2 = sp.diags(D2[0, :], format='csr')
A = adj.dot(D1)
A = D2.dot(A)
adj = A
return adj, features, labels, idx_train, idx_val, idx_test, idx_unlabel
def totensor(features, labels):
features = torch.FloatTensor(features)
labels = torch.LongTensor(np.argmax(labels, -1))
return features, labels
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def col_normalize(mx):
"""Column-normalize sparse matrix"""
scaler = StandardScaler()
mx = scaler.fit_transform(mx)
return mx
| 7,494 | 39.295699 | 141 | py |
BiGCN | BiGCN-master/tools/earlystopping2class.py | import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.accs=0
self.F1=0
self.F2 = 0
self.F3 = 0
self.F4 = 0
self.val_loss_min = np.Inf
def __call__(self, val_loss, accs,acc1,acc2,pre1,pre2,rec1,rec2,F1,F2,model,modelname,str):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.accs = accs
self.acc1=acc1
self.acc2=acc2
self.pre1=pre1
self.pre2=pre2
self.rec1=rec1
self.rec2=rec2
self.F1 = F1
self.F2 = F2
self.save_checkpoint(val_loss, model,modelname,str)
elif score < self.best_score:
self.counter += 1
# print('EarlyStopping counter: {} out of {}'.format(self.counter,self.patience))
if self.counter >= self.patience:
self.early_stop = True
print("BEST LOSS:{:.4f}| Accuracy: {:.4f}|acc1: {:.4f}|acc2: {:.4f}|pre1: {:.4f}|pre2: {:.4f}"
"|rec1: {:.4f}|rec2: {:.4f}|F1: {:.4f}|F2: {:.4f}"
.format(-self.best_score,self.accs,self.acc1,self.acc2,self.pre1,self.pre2,self.rec1,self.rec2,self.F1,self.F2))
else:
self.best_score = score
self.accs = accs
self.acc1=acc1
self.acc2=acc2
self.pre1=pre1
self.pre2=pre2
self.rec1=rec1
self.rec2=rec2
self.F1 = F1
self.F2 = F2
self.save_checkpoint(val_loss, model,modelname,str)
self.counter = 0
def save_checkpoint(self, val_loss, model,modelname,str):
'''Saves model when validation loss decrease.'''
# if self.verbose:
# print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(self.val_loss_min,val_loss))
torch.save(model.state_dict(),modelname+str+'.m')
self.val_loss_min = val_loss | 2,599 | 36.681159 | 134 | py |
BiGCN | BiGCN-master/tools/earlystopping.py | import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.accs=0
self.F1=0
self.F2 = 0
self.F3 = 0
self.F4 = 0
self.val_loss_min = np.Inf
def __call__(self, val_loss, accs,F1,F2,F3,F4,model,modelname,str):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.accs = accs
self.F1 = F1
self.F2 = F2
self.F3 = F3
self.F4 = F4
self.save_checkpoint(val_loss, model,modelname,str)
elif score < self.best_score:
self.counter += 1
# print('EarlyStopping counter: {} out of {}'.format(self.counter,self.patience))
if self.counter >= self.patience:
self.early_stop = True
print("BEST Accuracy: {:.4f}|NR F1: {:.4f}|FR F1: {:.4f}|TR F1: {:.4f}|UR F1: {:.4f}"
.format(self.accs,self.F1,self.F2,self.F3,self.F4))
else:
self.best_score = score
self.accs = accs
self.F1 = F1
self.F2 = F2
self.F3 = F3
self.F4 = F4
self.save_checkpoint(val_loss, model,modelname,str)
self.counter = 0
def save_checkpoint(self, val_loss, model,modelname,str):
'''Saves model when validation loss decrease.'''
# if self.verbose:
# print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(self.val_loss_min,val_loss))
torch.save(model.state_dict(),modelname+str+'.m')
self.val_loss_min = val_loss | 2,208 | 35.816667 | 122 | py |
BiGCN | BiGCN-master/Process/dataset.py | import os
import numpy as np
import torch
import random
from torch.utils.data import Dataset
from torch_geometric.data import Data
class GraphDataset(Dataset):
def __init__(self, fold_x, treeDic,lower=2, upper=100000, droprate=0,
data_path=os.path.join('..','..', 'data', 'Weibograph')):
self.fold_x = list(filter(lambda id: id in treeDic and len(treeDic[id]) >= lower and len(treeDic[id]) <= upper, fold_x))
self.treeDic = treeDic
self.data_path = data_path
self.droprate = droprate
def __len__(self):
return len(self.fold_x)
def __getitem__(self, index):
id =self.fold_x[index]
data=np.load(os.path.join(self.data_path, id + ".npz"), allow_pickle=True)
edgeindex = data['edgeindex']
if self.droprate > 0:
row = list(edgeindex[0])
col = list(edgeindex[1])
length = len(row)
poslist = random.sample(range(length), int(length * (1 - self.droprate)))
poslist = sorted(poslist)
row = list(np.array(row)[poslist])
col = list(np.array(col)[poslist])
new_edgeindex = [row, col]
else:
new_edgeindex = edgeindex
return Data(x=torch.tensor(data['x'],dtype=torch.float32),
edge_index=torch.LongTensor(new_edgeindex),
y=torch.LongTensor([int(data['y'])]), root=torch.LongTensor(data['root']),
rootindex=torch.LongTensor([int(data['rootindex'])]))
def collate_fn(data):
return data
class BiGraphDataset(Dataset):
def __init__(self, fold_x, treeDic,lower=2, upper=100000, tddroprate=0,budroprate=0,
data_path=os.path.join('..','..', 'data', 'Weibograph')):
self.fold_x = list(filter(lambda id: id in treeDic and len(treeDic[id]) >= lower and len(treeDic[id]) <= upper, fold_x))
self.treeDic = treeDic
self.data_path = data_path
self.tddroprate = tddroprate
self.budroprate = budroprate
def __len__(self):
return len(self.fold_x)
def __getitem__(self, index):
id =self.fold_x[index]
data=np.load(os.path.join(self.data_path, id + ".npz"), allow_pickle=True)
edgeindex = data['edgeindex']
if self.tddroprate > 0:
row = list(edgeindex[0])
col = list(edgeindex[1])
length = len(row)
poslist = random.sample(range(length), int(length * (1 - self.tddroprate)))
poslist = sorted(poslist)
row = list(np.array(row)[poslist])
col = list(np.array(col)[poslist])
new_edgeindex = [row, col]
else:
new_edgeindex = edgeindex
burow = list(edgeindex[1])
bucol = list(edgeindex[0])
if self.budroprate > 0:
length = len(burow)
poslist = random.sample(range(length), int(length * (1 - self.budroprate)))
poslist = sorted(poslist)
row = list(np.array(burow)[poslist])
col = list(np.array(bucol)[poslist])
bunew_edgeindex = [row, col]
else:
bunew_edgeindex = [burow,bucol]
return Data(x=torch.tensor(data['x'],dtype=torch.float32),
edge_index=torch.LongTensor(new_edgeindex),BU_edge_index=torch.LongTensor(bunew_edgeindex),
y=torch.LongTensor([int(data['y'])]), root=torch.LongTensor(data['root']),
rootindex=torch.LongTensor([int(data['rootindex'])]))
class UdGraphDataset(Dataset):
def __init__(self, fold_x, treeDic,lower=2, upper=100000, droprate=0,
data_path=os.path.join('..','..','data', 'Weibograph')):
self.fold_x = list(filter(lambda id: id in treeDic and len(treeDic[id]) >= lower and len(treeDic[id]) <= upper, fold_x))
self.treeDic = treeDic
self.data_path = data_path
self.droprate = droprate
def __len__(self):
return len(self.fold_x)
def __getitem__(self, index):
id =self.fold_x[index]
data=np.load(os.path.join(self.data_path, id + ".npz"), allow_pickle=True)
edgeindex = data['edgeindex']
row = list(edgeindex[0])
col = list(edgeindex[1])
burow = list(edgeindex[1])
bucol = list(edgeindex[0])
row.extend(burow)
col.extend(bucol)
if self.droprate > 0:
length = len(row)
poslist = random.sample(range(length), int(length * (1 - self.droprate)))
poslist = sorted(poslist)
row = list(np.array(row)[poslist])
col = list(np.array(col)[poslist])
new_edgeindex = [row, col]
return Data(x=torch.tensor(data['x'],dtype=torch.float32),
edge_index=torch.LongTensor(new_edgeindex),
y=torch.LongTensor([int(data['y'])]), root=torch.LongTensor(data['root']),
rootindex=torch.LongTensor([int(data['rootindex'])]))
| 4,936 | 40.141667 | 128 | py |
BiGCN | BiGCN-master/model/Weibo/BiGCN_Weibo.py | import sys,os
sys.path.append(os.getcwd())
from Process.process import *
import torch as th
from torch_scatter import scatter_mean
import torch.nn.functional as F
import numpy as np
from tools.earlystopping2class import EarlyStopping
from torch_geometric.data import DataLoader
from tqdm import tqdm
from Process.rand5fold import *
from tools.evaluate import *
from torch_geometric.nn import GCNConv
import copy
class TDrumorGCN(th.nn.Module):
def __init__(self,in_feats,hid_feats,out_feats):
super(TDrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x1=copy.copy(x.float())
x = self.conv1(x, edge_index)
x2=copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x=F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x= scatter_mean(x, data.batch, dim=0)
return x
class BUrumorGCN(th.nn.Module):
def __init__(self,in_feats,hid_feats,out_feats):
super(BUrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.BU_edge_index
x1 = copy.copy(x.float())
x = self.conv1(x, edge_index)
x2 = copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x= scatter_mean(x, data.batch, dim=0)
return x
class Net(th.nn.Module):
def __init__(self,in_feats,hid_feats,out_feats):
super(Net, self).__init__()
self.TDrumorGCN = TDrumorGCN(in_feats, hid_feats, out_feats)
self.BUrumorGCN = BUrumorGCN(in_feats, hid_feats, out_feats)
self.fc=th.nn.Linear((out_feats+hid_feats)*2,2)
def forward(self, data):
TD_x = self.TDrumorGCN(data)
BU_x = self.BUrumorGCN(data)
x = th.cat((BU_x,TD_x), 1)
x=self.fc(x)
x = F.log_softmax(x, dim=1)
return x
def train_GCN(treeDic, x_test, x_train,TDdroprate,BUdroprate,lr, weight_decay,patience,n_epochs,batchsize,dataname,iter):
model = Net(5000,64,64).to(device)
BU_params=list(map(id,model.BUrumorGCN.conv1.parameters()))
BU_params += list(map(id, model.BUrumorGCN.conv2.parameters()))
base_params=filter(lambda p:id(p) not in BU_params,model.parameters())
optimizer = th.optim.Adam([
{'params':base_params},
{'params':model.BUrumorGCN.conv1.parameters(),'lr':lr/5},
{'params': model.BUrumorGCN.conv2.parameters(), 'lr': lr/5}
], lr=lr, weight_decay=weight_decay)
model.train()
train_losses,val_losses,train_accs,val_accs = [],[],[],[]
early_stopping = EarlyStopping(patience=patience, verbose=True)
for epoch in range(n_epochs):
traindata_list, testdata_list = loadBiData(dataname, treeDic, x_train, x_test, TDdroprate,BUdroprate)
train_loader = DataLoader(traindata_list, batch_size=batchsize,
shuffle=False, num_workers=10)
test_loader = DataLoader(testdata_list, batch_size=batchsize,
shuffle=True, num_workers=10)
avg_loss,avg_acc = [],[]
batch_idx = 0
tqdm_train_loader = tqdm(train_loader)
for Batch_data in tqdm_train_loader:
Batch_data.to(device)
out_labels = model(Batch_data)
loss = F.nll_loss(out_labels, Batch_data.y)
optimizer.zero_grad()
loss.backward()
avg_loss.append(loss.item())
optimizer.step()
_, pred = out_labels.max(dim=-1)
correct = pred.eq(Batch_data.y).sum().item()
train_acc = correct / len(Batch_data.y)
avg_acc.append(train_acc)
postfix = "Iter {:03d} | Epoch {:05d} | Batch{:02d} | Train_Loss {:.4f}| Train_Accuracy {:.4f}".format(iter,
epoch,
batch_idx,
loss.item(),
train_acc)
tqdm_train_loader.set_postfix_str(postfix)
batch_idx = batch_idx + 1
train_losses.append(np.mean(avg_loss))
train_accs.append(np.mean(avg_acc))
temp_val_losses,temp_val_accs,temp_val_Acc_all, temp_val_Acc1, temp_val_Prec1, temp_val_Recll1, temp_val_F1, \
temp_val_Acc2, temp_val_Prec2, temp_val_Recll2, temp_val_F2 = [],[],[], [], [], [], [], [], [], [], []
model.eval()
tqdm_test_loader = tqdm(test_loader)
for Batch_data in tqdm_test_loader:
Batch_data.to(device)
val_out = model(Batch_data)
val_loss = F.nll_loss(val_out, Batch_data.y)
temp_val_losses.append(val_loss.item())
_, val_pred = val_out.max(dim=1)
correct = val_pred.eq(Batch_data.y).sum().item()
val_acc = correct / len(Batch_data.y)
Acc_all, Acc1, Prec1, Recll1, F1, Acc2, Prec2, Recll2, F2 = evaluationclass(
val_pred, Batch_data.y)
temp_val_Acc_all.append(Acc_all), temp_val_Acc1.append(Acc1), temp_val_Prec1.append(
Prec1), temp_val_Recll1.append(Recll1), temp_val_F1.append(F1), \
temp_val_Acc2.append(Acc2), temp_val_Prec2.append(Prec2), temp_val_Recll2.append(
Recll2), temp_val_F2.append(F2)
temp_val_accs.append(val_acc)
val_losses.append(np.mean(temp_val_losses))
val_accs.append(np.mean(temp_val_accs))
print("Epoch {:05d} | Val_Loss {:.4f}| Val_Accuracy {:.4f}".format(epoch, np.mean(temp_val_losses),
np.mean(temp_val_accs)))
res = ['acc:{:.4f}'.format(np.mean(temp_val_Acc_all)),
'C1:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc1), np.mean(temp_val_Prec1),
np.mean(temp_val_Recll1), np.mean(temp_val_F1)),
'C2:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc2), np.mean(temp_val_Prec2),
np.mean(temp_val_Recll2), np.mean(temp_val_F2))]
print('results:', res)
early_stopping(np.mean(temp_val_losses), np.mean(temp_val_Acc_all), np.mean(temp_val_Acc1),
np.mean(temp_val_Acc2), np.mean(temp_val_Prec1),
np.mean(temp_val_Prec2), np.mean(temp_val_Recll1), np.mean(temp_val_Recll2),
np.mean(temp_val_F1),
np.mean(temp_val_F2), model, 'BiGCN', "weibo")
accs = np.mean(temp_val_Acc_all)
acc1 = np.mean(temp_val_Acc1)
acc2 = np.mean(temp_val_Acc2)
pre1 = np.mean(temp_val_Prec1)
pre2 = np.mean(temp_val_Prec2)
rec1 = np.mean(temp_val_Recll1)
rec2 = np.mean(temp_val_Recll2)
F1 = np.mean(temp_val_F1)
F2 = np.mean(temp_val_F2)
if early_stopping.early_stop:
print("Early stopping")
accs = early_stopping.accs
acc1 = early_stopping.acc1
acc2 = early_stopping.acc2
pre1 = early_stopping.pre1
pre2 = early_stopping.pre2
rec1 = early_stopping.rec1
rec2 = early_stopping.rec2
F1 = early_stopping.F1
F2 = early_stopping.F2
break
return train_losses, val_losses, train_accs, val_accs, accs, acc1, pre1, rec1, F1, acc2, pre2, rec2, F2
lr=0.0005
weight_decay=1e-4
patience=10
n_epochs=200
batchsize=16
tddroprate=0
budroprate=0
datasetname="Weibo"
iterations=int(sys.argv[1])
model="BiGCN"
device = th.device('cuda:1' if th.cuda.is_available() else 'cpu')
test_accs,ACC1,ACC2,PRE1,PRE2,REC1,REC2,F1,F2 = [],[],[],[],[],[],[],[],[]
for iter in range(iterations):
fold0_x_test, fold0_x_train,\
fold1_x_test, fold1_x_train, \
fold2_x_test, fold2_x_train, \
fold3_x_test, fold3_x_train, \
fold4_x_test, fold4_x_train = load5foldData(datasetname)
treeDic=loadTree(datasetname)
train_losses, val_losses, train_accs, val_accs, accs_0, acc1_0, pre1_0, rec1_0, F1_0, acc2_0, pre2_0, rec2_0, F2_0 = train_GCN(treeDic,
fold0_x_test,
fold0_x_train,
tddroprate,budroprate,
lr, weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs,accs_1, acc1_1, pre1_1, rec1_1, F1_1, acc2_1, pre2_1, rec2_1, F2_1 = train_GCN(treeDic,
fold1_x_test,
fold1_x_train,
tddroprate,budroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs, accs_2, acc1_2, pre1_2, rec1_2, F1_2, acc2_2, pre2_2, rec2_2, F2_2 = train_GCN(treeDic,
fold2_x_test,
fold2_x_train,
tddroprate,budroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs, accs_3, acc1_3, pre1_3, rec1_3, F1_3, acc2_3, pre2_3, rec2_3, F2_3 = train_GCN(treeDic,
fold3_x_test,
fold3_x_train,
tddroprate,budroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs, accs_4, acc1_4, pre1_4, rec1_4, F1_4, acc2_4, pre2_4, rec2_4, F2_4 = train_GCN(treeDic,
fold4_x_test,
fold4_x_train,
tddroprate,budroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
test_accs.append((accs_0+accs_1+accs_2+accs_3+accs_4)/5)
ACC1.append((acc1_0 + acc1_1 + acc1_2 + acc1_3 + acc1_4) / 5)
ACC2.append((acc2_0 + acc2_1 +acc2_2 + acc2_3 +acc2_4) / 5)
PRE1.append((pre1_0 + pre1_1 + pre1_2 + pre1_3 + pre1_4) / 5)
PRE2.append((pre2_0 + pre2_1 + pre2_2 + pre2_3 + pre2_4) / 5)
REC1.append((rec1_0 + rec1_1 + rec1_2 + rec1_3 + rec1_4) / 5)
REC2.append((rec2_0 + rec2_1 + rec2_2 + rec2_3 + rec2_4) / 5)
F1.append((F1_0+F1_1+F1_2+F1_3+F1_4)/5)
F2.append((F2_0 + F2_1 + F2_2 + F2_3 + F2_4) / 5)
print("weibo:|Total_Test_ Accuracy: {:.4f}|acc1: {:.4f}|acc2: {:.4f}|pre1: {:.4f}|pre2: {:.4f}"
"|rec1: {:.4f}|rec2: {:.4f}|F1: {:.4f}|F2: {:.4f}".format(sum(test_accs) / iterations, sum(ACC1) / iterations,
sum(ACC2) / iterations, sum(PRE1) / iterations, sum(PRE2) /iterations,
sum(REC1) / iterations, sum(REC2) / iterations, sum(F1) / iterations, sum(F2) / iterations))
| 16,447 | 58.379061 | 168 | py |
BiGCN | BiGCN-master/model/Twitter/BiGCN_Twitter.py | import sys,os
sys.path.append(os.getcwd())
from Process.process import *
import torch as th
from torch_scatter import scatter_mean
import torch.nn.functional as F
import numpy as np
from tools.earlystopping import EarlyStopping
from torch_geometric.data import DataLoader
from tqdm import tqdm
from Process.rand5fold import *
from tools.evaluate import *
from torch_geometric.nn import GCNConv
import copy
class TDrumorGCN(th.nn.Module):
def __init__(self,in_feats,hid_feats,out_feats):
super(TDrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x1=copy.copy(x.float())
x = self.conv1(x, edge_index)
x2=copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x= scatter_mean(x, data.batch, dim=0)
return x
class BUrumorGCN(th.nn.Module):
def __init__(self,in_feats,hid_feats,out_feats):
super(BUrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.BU_edge_index
x1 = copy.copy(x.float())
x = self.conv1(x, edge_index)
x2 = copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x,root_extend), 1)
x= scatter_mean(x, data.batch, dim=0)
return x
class Net(th.nn.Module):
def __init__(self,in_feats,hid_feats,out_feats):
super(Net, self).__init__()
self.TDrumorGCN = TDrumorGCN(in_feats, hid_feats, out_feats)
self.BUrumorGCN = BUrumorGCN(in_feats, hid_feats, out_feats)
self.fc=th.nn.Linear((out_feats+hid_feats)*2,4)
def forward(self, data):
TD_x = self.TDrumorGCN(data)
BU_x = self.BUrumorGCN(data)
x = th.cat((BU_x,TD_x), 1)
x=self.fc(x)
x = F.log_softmax(x, dim=1)
return x
def train_GCN(treeDic, x_test, x_train,TDdroprate,BUdroprate,lr, weight_decay,patience,n_epochs,batchsize,dataname,iter):
model = Net(5000,64,64).to(device)
BU_params=list(map(id,model.BUrumorGCN.conv1.parameters()))
BU_params += list(map(id, model.BUrumorGCN.conv2.parameters()))
base_params=filter(lambda p:id(p) not in BU_params,model.parameters())
optimizer = th.optim.Adam([
{'params':base_params},
{'params':model.BUrumorGCN.conv1.parameters(),'lr':lr/5},
{'params': model.BUrumorGCN.conv2.parameters(), 'lr': lr/5}
], lr=lr, weight_decay=weight_decay)
model.train()
train_losses = []
val_losses = []
train_accs = []
val_accs = []
early_stopping = EarlyStopping(patience=patience, verbose=True)
for epoch in range(n_epochs):
traindata_list, testdata_list = loadBiData(dataname, treeDic, x_train, x_test, TDdroprate,BUdroprate)
train_loader = DataLoader(traindata_list, batch_size=batchsize, shuffle=True, num_workers=5)
test_loader = DataLoader(testdata_list, batch_size=batchsize, shuffle=True, num_workers=5)
avg_loss = []
avg_acc = []
batch_idx = 0
tqdm_train_loader = tqdm(train_loader)
for Batch_data in tqdm_train_loader:
Batch_data.to(device)
out_labels= model(Batch_data)
finalloss=F.nll_loss(out_labels,Batch_data.y)
loss=finalloss
optimizer.zero_grad()
loss.backward()
avg_loss.append(loss.item())
optimizer.step()
_, pred = out_labels.max(dim=-1)
correct = pred.eq(Batch_data.y).sum().item()
train_acc = correct / len(Batch_data.y)
avg_acc.append(train_acc)
print("Iter {:03d} | Epoch {:05d} | Batch{:02d} | Train_Loss {:.4f}| Train_Accuracy {:.4f}".format(iter,epoch, batch_idx,
loss.item(),
train_acc))
batch_idx = batch_idx + 1
train_losses.append(np.mean(avg_loss))
train_accs.append(np.mean(avg_acc))
temp_val_losses = []
temp_val_accs = []
temp_val_Acc_all, temp_val_Acc1, temp_val_Prec1, temp_val_Recll1, temp_val_F1, \
temp_val_Acc2, temp_val_Prec2, temp_val_Recll2, temp_val_F2, \
temp_val_Acc3, temp_val_Prec3, temp_val_Recll3, temp_val_F3, \
temp_val_Acc4, temp_val_Prec4, temp_val_Recll4, temp_val_F4 = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
model.eval()
tqdm_test_loader = tqdm(test_loader)
for Batch_data in tqdm_test_loader:
Batch_data.to(device)
val_out = model(Batch_data)
val_loss = F.nll_loss(val_out, Batch_data.y)
temp_val_losses.append(val_loss.item())
_, val_pred = val_out.max(dim=1)
correct = val_pred.eq(Batch_data.y).sum().item()
val_acc = correct / len(Batch_data.y)
Acc_all, Acc1, Prec1, Recll1, F1, Acc2, Prec2, Recll2, F2, Acc3, Prec3, Recll3, F3, Acc4, Prec4, Recll4, F4 = evaluation4class(
val_pred, Batch_data.y)
temp_val_Acc_all.append(Acc_all), temp_val_Acc1.append(Acc1), temp_val_Prec1.append(
Prec1), temp_val_Recll1.append(Recll1), temp_val_F1.append(F1), \
temp_val_Acc2.append(Acc2), temp_val_Prec2.append(Prec2), temp_val_Recll2.append(
Recll2), temp_val_F2.append(F2), \
temp_val_Acc3.append(Acc3), temp_val_Prec3.append(Prec3), temp_val_Recll3.append(
Recll3), temp_val_F3.append(F3), \
temp_val_Acc4.append(Acc4), temp_val_Prec4.append(Prec4), temp_val_Recll4.append(
Recll4), temp_val_F4.append(F4)
temp_val_accs.append(val_acc)
val_losses.append(np.mean(temp_val_losses))
val_accs.append(np.mean(temp_val_accs))
print("Epoch {:05d} | Val_Loss {:.4f}| Val_Accuracy {:.4f}".format(epoch, np.mean(temp_val_losses),
np.mean(temp_val_accs)))
res = ['acc:{:.4f}'.format(np.mean(temp_val_Acc_all)),
'C1:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc1), np.mean(temp_val_Prec1),
np.mean(temp_val_Recll1), np.mean(temp_val_F1)),
'C2:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc2), np.mean(temp_val_Prec2),
np.mean(temp_val_Recll2), np.mean(temp_val_F2)),
'C3:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc3), np.mean(temp_val_Prec3),
np.mean(temp_val_Recll3), np.mean(temp_val_F3)),
'C4:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc4), np.mean(temp_val_Prec4),
np.mean(temp_val_Recll4), np.mean(temp_val_F4))]
print('results:', res)
early_stopping(np.mean(temp_val_losses), np.mean(temp_val_accs), np.mean(temp_val_F1), np.mean(temp_val_F2),
np.mean(temp_val_F3), np.mean(temp_val_F4), model, 'BiGCN', dataname)
accs =np.mean(temp_val_accs)
F1 = np.mean(temp_val_F1)
F2 = np.mean(temp_val_F2)
F3 = np.mean(temp_val_F3)
F4 = np.mean(temp_val_F4)
if early_stopping.early_stop:
print("Early stopping")
accs=early_stopping.accs
F1=early_stopping.F1
F2 = early_stopping.F2
F3 = early_stopping.F3
F4 = early_stopping.F4
break
return train_losses , val_losses ,train_accs, val_accs,accs,F1,F2,F3,F4
lr=0.0005
weight_decay=1e-4
patience=10
n_epochs=200
batchsize=128
TDdroprate=0.2
BUdroprate=0.2
datasetname=sys.argv[1] #"Twitter15"、"Twitter16"
iterations=int(sys.argv[2])
model="GCN"
device = th.device('cuda:0' if th.cuda.is_available() else 'cpu')
test_accs = []
NR_F1 = []
FR_F1 = []
TR_F1 = []
UR_F1 = []
for iter in range(iterations):
fold0_x_test, fold0_x_train, \
fold1_x_test, fold1_x_train, \
fold2_x_test, fold2_x_train, \
fold3_x_test, fold3_x_train, \
fold4_x_test,fold4_x_train = load5foldData(datasetname)
treeDic=loadTree(datasetname)
train_losses, val_losses, train_accs, val_accs0, accs0, F1_0, F2_0, F3_0, F4_0 = train_GCN(treeDic,
fold0_x_test,
fold0_x_train,
TDdroprate,BUdroprate,
lr, weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs1, accs1, F1_1, F2_1, F3_1, F4_1 = train_GCN(treeDic,
fold1_x_test,
fold1_x_train,
TDdroprate,BUdroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs2, accs2, F1_2, F2_2, F3_2, F4_2 = train_GCN(treeDic,
fold2_x_test,
fold2_x_train,
TDdroprate,BUdroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs3, accs3, F1_3, F2_3, F3_3, F4_3 = train_GCN(treeDic,
fold3_x_test,
fold3_x_train,
TDdroprate,BUdroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
train_losses, val_losses, train_accs, val_accs4, accs4, F1_4, F2_4, F3_4, F4_4 = train_GCN(treeDic,
fold4_x_test,
fold4_x_train,
TDdroprate,BUdroprate, lr,
weight_decay,
patience,
n_epochs,
batchsize,
datasetname,
iter)
test_accs.append((accs0+accs1+accs2+accs3+accs4)/5)
NR_F1.append((F1_0+F1_1+F1_2+F1_3+F1_4)/5)
FR_F1.append((F2_0 + F2_1 + F2_2 + F2_3 + F2_4) / 5)
TR_F1.append((F3_0 + F3_1 + F3_2 + F3_3 + F3_4) / 5)
UR_F1.append((F4_0 + F4_1 + F4_2 + F4_3 + F4_4) / 5)
print("Total_Test_Accuracy: {:.4f}|NR F1: {:.4f}|FR F1: {:.4f}|TR F1: {:.4f}|UR F1: {:.4f}".format(
sum(test_accs) / iterations, sum(NR_F1) /iterations, sum(FR_F1) /iterations, sum(TR_F1) / iterations, sum(UR_F1) / iterations))
| 15,749 | 54.65371 | 139 | py |
segodsidb | segodsidb-main/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
import unittest
# Read the contents of the README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(name='torchseg',
version='0.1.0',
description='Python module to segment hyperspectral dental images.',
author='Luis C. Garcia-Peraza Herrera',
author_email='luiscarlos.gph@gmail.com',
license='MIT License',
url='https://github.com/luiscarlosgph/dentalseg',
packages=[
'torchseg',
'torchseg.base',
'torchseg.config',
'torchseg.data_loader',
'torchseg.logger',
'torchseg.model',
'torchseg.machine',
'torchseg.utils',
'torchseg.visualization',
],
package_dir={
'torchseg': 'src',
'torchseg.base': 'src/base',
'torchseg.config': 'src/config',
'torchseg.data_loader': 'src/data_loader',
'torchseg.logger': 'src/logger',
'torchseg.model': 'src/model',
'torchseg.machine': 'src/machine',
'torchseg.utils': 'src/utils',
'torchseg.visualization': 'src/visualization',
},
install_requires = [
'numpy',
'opencv-python',
'torch',
'torchvision',
'tensorboard',
'tqdm',
'tifffile',
'monai',
'pandas',
'colour-science',
'matplotlib',
],
long_description=long_description,
long_description_content_type='text/markdown',
test_suite = 'test',
)
| 1,699 | 28.310345 | 73 | py |
segodsidb | segodsidb-main/src/generate_odsi_db_split.py | """
@brief Script that generates a convenient split of training and testing
images for the ODSI-DB dataset. Then you can split the training fold
into training and validation however you want, but this is expected to
be done in your dataloader.
@details A convenient dataset must have at least pixels of all the classes in all
the splits. An additional desirable feature is to have an even balance
of pixels per class in all the splits.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 1 Jun 2021.
"""
import argparse
import numpy as np
import os
import shutil
import sys
import tqdm
import json
import random
# My imports
import torchseg.data_loader
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
classnames = torchseg.data_loader.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-o': 'Path to the output folder where the splits will be saved \
(required: True)',
'-t': 'Portion of images to be used for training (default: 0.9)',
'-f': 'Number of cross-validation folds (default: 1).',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-o', '--output', required=True, type=str,
help=help('-o'))
parser.add_argument('-t', '--train', default=0.9, type=float,
help=help('-t'))
parser.add_argument('-f', '--folds', default=1, type=int,
help=help('-f'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
# Input directory must exist
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
# Output directory should not exist, we will create it
if os.path.exists(args.output):
raise RuntimeError('[ERROR] Output directory already exists.')
return args
def read_image(self, path, bands=np.linspace(450, 950, 51)):
"""
@param[in] path String with the path to the hyperspectral image.
@returns an image with shape (C, H, W) containing values in the range
[0, 1].
"""
# Read raw TIFF file
raw_tiff = torchseg.data_loader.read_stiff(path, silent=True,
rgb_only=False)
# In the ODSI-DB dataset there are images with 51 bands
# (450-950nm, both inclusive) and images with 204 bands
# (400-1000nm)
# Create empty image
h = raw_tiff[0].shape[0]
w = raw_tiff[0].shape[1]
chan = bands.shape[0]
im = np.empty((chan, h, w), dtype=np.float32)
# Populate image with the wanted bands (bands parameter of this
# function)
raw_im = raw_tiff[0].transpose((2, 0, 1)).astype(np.float32)
wavelengths = raw_tiff[1]
for idx, wl in enumerate(bands.tolist()):
matched_idx = np.abs(wavelengths - wl).argmin()
im[idx, ...] = raw_im[matched_idx, ...].copy()
return im
def read_label(path):
"""
@param[in] path String with the path to the annotation file.
@returns a numpy.ndarray with shape (C, H, W) containing values in the range
[0, 1].
"""
raw_tiff = torchseg.data_loader.read_mtiff(path)
# Get the shape of the labels (which should be identical to the
# height and width of the image)
shape = raw_tiff[list(raw_tiff.keys())[0]].shape
# Create tensor of labels
n_classes = len(classnames)
label = np.zeros((n_classes, *shape), dtype=np.float32)
# Build dictionaries to convert quickly from index to class name
# and vice versa
idx2class = classnames
class2idx = {y: x for x, y in idx2class.items()}
# Populate the binary array for each class
for k, gt in raw_tiff.items():
if k not in class2idx:
raise ValueError('[ERROR] Unknown <' + k + '> label.')
# Find the index of the class with name 'k'
idx = class2idx[k]
# Set the label for the class with name 'k'
label[idx] = gt.astype(np.float32)
return label
def read_files(path):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def generate_random_split(images, labels, train_ratio,
force_train=['Fibroma', 'Makeup', 'Malignant lesion']):
"""
@brief Generates a split of the data for training and testing.
@details Every element in the list 'images' must correspond to a label in the
same index of the list 'labels'.
@param[in] images List of paths to the images.
@param[in] labels List of paths to the labels.
@param[in] train_ratio Portion of the images that will go for training.
@param[in] force_train List of classes. Images containing pixels of these
classes will be automatically put in the training
set. The reason for this is that there is only one
image in the dataset containing pixels of these
classes.
@returns a dictionary of dictionaries. Keys 'train' and 'test' in the
upper level. Keys 'images' and 'labels' in the lower level.
"""
np.random.seed()
class2idx = {y: x for x, y in classnames.items()}
split_dic = {
'train': {
'images': [],
'labels': [],
},
'test': {
'images': [],
'labels': [],
}
}
# Make a list of the indices of all the images
remaining = list(range(len(images)))
# Make a dictionary from each class to the indices of the images that
# contain pixels of such class
class2img = {c: [] for c in range(len(classnames))}
for im_idx in remaining:
label_path = labels[im_idx]
label = read_label(label_path).sum(axis=(1, 2)).astype(np.int64)
for c in label.nonzero()[0].tolist():
class2img[c].append(im_idx)
# Put an image of each class in the training set
for c in class2img:
random.shuffle(class2img[c])
im_idx = class2img[c][0]
if im_idx in remaining:
split_dic['train']['images'].append(images[im_idx])
split_dic['train']['labels'].append(labels[im_idx])
remaining.remove(im_idx)
# Split the rest of the images according to the given probability
while remaining:
im_idx = remaining.pop()
dst = 'train' if np.random.binomial(1, train_ratio) else 'test'
split_dic[dst]['images'].append(images[im_idx])
split_dic[dst]['labels'].append(labels[im_idx])
return split_dic
def validate_random_split(split_dic,
force_train=['Fibroma', 'Makeup', 'Malignant lesion']):
"""
@brief Checks that all the classes are represented in both training and
testing splits.
@param[in] split_dic Dictionary of dictionaries. Keys 'train' and 'test'
in the upper level. Keys 'images' and 'labels' in the
lower level.
@returns True if the split is good. Otherwise, returns False.
"""
classes = len(classnames)
train_classes = np.zeros((classes,), dtype=np.int64)
test_classes = np.zeros((classes,), dtype=np.int64)
# Find the classes present in the training set
for label_path in split_dic['train']['labels']:
train_classes += read_label(label_path).sum(axis=(1, 2)).astype(np.int64)
# Find the classes present in the testing set
for label_path in split_dic['test']['labels']:
test_classes += read_label(label_path).sum(axis=(1, 2)).astype(np.int64)
# All the classes in the testing set must be present in the training set
for idx, count in enumerate(test_classes):
if count > 0 and train_classes[idx] == 0:
return False
return True
def copy_into_dir(file_list, dst_dir):
"""
@brief Copies the files in the list into the provided destination directory.
@param[in] file_list List of paths to files.
@param[in] dst_dir Path to the destination directory.
@returns nothing.
"""
for src_path in file_list:
filename = os.path.basename(src_path)
dst_path = os.path.join(dst_dir, filename)
shutil.copyfile(src_path, dst_path)
def copy_data(split_dic, train_path, test_path):
"""
@brief Gets a split of training/testing files and copies them into separate
folders.
@param[in] split_dic Dictionary of dictionaries. Keys 'train' and 'test'
in the upper level. Keys 'images' and 'labels' in
the lower level.
@returns nothing.
"""
copy_into_dir(split_dic['train']['images'], train_path)
copy_into_dir(split_dic['train']['labels'], train_path)
copy_into_dir(split_dic['test']['images'], test_path)
copy_into_dir(split_dic['test']['labels'], test_path)
def compute_dataset_stats(images, labels):
# Gather data for report
no_classes = len(classnames)
no_pixels = np.zeros((no_classes,), dtype=np.int64)
no_images = np.zeros((no_classes,), dtype=np.int64)
# Find the number of pixels and images per class
for label_path in labels:
label = read_label(label_path).sum(axis=(1, 2)).astype(np.int64)
no_pixels += label
label[label > 0] = 1
no_images += label
return no_pixels, no_images
def print_dataset_stats(no_pixels, no_images, labels):
"""
@brief Prints the number of pixels and images per class.
@param[in] no_pixels Number of pixels of each class, np.ndarray, shape (C,).
@param[in] no_images Number of images of each class, np.ndarray, shape (C,).
@returns nothing.
"""
no_classes = len(classnames)
print('Number of classes:', no_classes)
print('Number of annotated images in the dataset:', len(labels))
print('Number of pixels per class:')
no_classes = len(classnames)
margin = ' '
for c in range(no_classes):
print(' ' + str(classnames[c]) + ' [' + str(c) + ']:' \
+ margin[:len(margin) - len(classnames[c]) - len(str(c))] \
+ str(no_pixels[c]))
print('Number of images per class:')
for c in range(no_classes):
print(' ' + str(classnames[c]) + ' [' + str(c) + ']:' \
+ margin[:len(margin) - len(classnames[c]) - len(str(c))] \
+ str(no_images[c]))
def singular_classes(no_images):
"""
@brief Get a list of classes that are present in only one image of the
dataset.
@param[in] no_images Number of images per class, shape (C,).
@returns a list of the class indices.
"""
return [c for c in range(no_images.shape[0]) if no_images[c] < 2]
def save_dataset_metadata(images, labels, path):
"""
@brief Save JSON file with the information of the dataset.
@param[in] images List of image paths.
@param[in] labels List of label image paths.
@param[in] path String with the path of the dataset folder.
@returns nothing.
"""
data = {}
# Append class indices
idx2class = classnames
class2idx = {y: x for x, y in idx2class.items()}
data['idx2class'] = idx2class
data['class2idx'] = class2idx
# Append a list of the images that should be found in the dataset
data['images'] = images
# Append a list of the labels that should be found in the dataset
data['labels'] = labels
# Append the number of images
data['number_of_images'] = len(images)
# Append the list of images per class and the number of pixels per class
data['images_per_class'] = {c: [] for c in class2idx}
data['number_pixels_per_class'] = {c: 0 for c in class2idx}
for image_path, label_path in zip(images, labels):
label = read_label(label_path).sum(axis=(1, 2)).astype(np.int64)
for class_name in data['images_per_class']:
class_id = class2idx[class_name]
if label[class_id] > 0:
data['images_per_class'][class_name].append(image_path)
data['number_pixels_per_class'][class_name] += int(label[class_id])
# Append the number of images per class
data['number_images_per_class'] = {c: len(data['images_per_class'][c]) \
for c in class2idx}
# Append ignore labels, that is, those classes that are not present
# in the dataset
data['ignore_labels'] = [c for c in data['number_images_per_class'] \
if data['number_images_per_class'][c] == 0]
# Create metadata file
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Create output folder
os.mkdir(args.output)
# Get list of image and segmentation files
images, labels = read_files(args.input)
no_pixels, no_images = compute_dataset_stats(images, labels)
#print_dataset_stats(no_pixels, no_images, labels)
# Get the list of singular classes, i.e. those that are only present in one
# image, we will put these images in the training set
#sc = [classnames[c] for c in singular_classes(no_images)]
#print('Classes that are only in one image:', sc)
# TODO: Save dataset metadata
dataset_metadata_path = os.path.join(args.output, 'metadata.txt')
save_dataset_metadata(images, labels, dataset_metadata_path)
# Generate the split for each fold
for i in tqdm.tqdm(range(args.folds)):
# Get a valid split of the data
valid_split = False
split_dic = None
attempt = 0
while not valid_split:
attempt += 1
sys.stdout.write('Attempt to generate a valid split... ')
sys.stdout.flush()
split_dic = generate_random_split(images, labels, args.train)
valid_split = validate_random_split(split_dic)
if valid_split:
sys.stdout.write("[OK]\n")
else:
sys.stdout.write("[FAIL]\n")
# Create output folders
sys.stdout.write('Creating output folders... ')
sys.stdout.flush()
fold_path = os.path.join(args.output, 'fold_' + str(i))
fold_train_path = os.path.join(fold_path, 'train')
fold_test_path = os.path.join(fold_path, 'test')
os.mkdir(fold_path)
os.mkdir(fold_train_path)
os.mkdir(fold_test_path)
sys.stdout.write("[OK]\n")
# Save fold metadata
train_metadata_path = os.path.join(fold_path, 'train_metadata.txt')
test_metadata_path = os.path.join(fold_path, 'test_metadata.txt')
save_dataset_metadata(split_dic['train']['images'],
split_dic['train']['labels'], train_metadata_path)
save_dataset_metadata(split_dic['test']['images'],
split_dic['test']['labels'], test_metadata_path)
# Copy data to the fold
sys.stdout.write('Copying data to the fold... ')
sys.stdout.flush()
copy_data(split_dic, fold_train_path, fold_test_path)
sys.stdout.write("[OK]\n")
if __name__ == '__main__':
main()
| 16,274 | 34.927152 | 82 | py |
segodsidb | segodsidb-main/src/test.py | """
@brief Main script to launch the testing process.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 5 Jun 2021.
"""
import argparse
import collections
import torch
import numpy as np
import tqdm
import json
import cv2
import skimage.color
# My imports
import torchseg.config.parser
import torchseg.data_loader as dl
import torchseg.model
import torchseg.machine
# There are 35 classes plus background in ODSI-DB
palette = np.round(np.array([
[0.0 , 0.0, 0.0 ],
[0.73910129, 0.54796227, 0.70659469],
[0.07401779, 0.48485457, 0.2241555 ],
[0.35201324, 0.9025658 , 0.81062183],
[0.08126211, 0.23986311, 0.54880697],
[0.33267484, 0.6119932 , 0.30272535],
[0.45419585, 0.1818727 , 0.5877175 ],
[0.1239585 , 0.17862775, 0.6892662 ],
[0.20556493, 0.44462774, 0.38364081],
[0.18754881, 0.1831789 , 0.00863592],
[0.37702173, 0.075744 , 0.07170247],
[0.9487006 , 0.90159635, 0.26639963],
[0.8954375 , 0.58731839, 0.87918311],
[0.83980577, 0.77131811, 0.02192928],
[0.47681103, 0.72962211, 0.96439166],
[0.44293943, 0.60166042, 0.5879358 ],
[0.52419707, 0.18690438, 0.69027514],
[0.34720014, 0.57450984, 0.96570434],
[0.78380941, 0.2237716 , 0.52199938],
[0.98170786, 0.61735585, 0.73834123],
[0.44000012, 0.06259595, 0.76726459],
[0.47754739, 0.13137904, 0.04615173],
[0.65486219, 0.24028978, 0.75424866],
[0.79301129, 0.75970907, 0.06562084],
[0.14864707, 0.55623561, 0.80328385],
[0.54439947, 0.234355 , 0.81248573],
[0.24443958, 0.00697174, 0.59921356],
[0.76808718, 0.56387681, 0.52199431],
[0.69855907, 0.73646473, 0.8320837 ],
[0.85436454, 0.86456808, 0.61494475],
[0.34944949, 0.79188401, 0.8251793 ],
[0.43554137, 0.18054355, 0.80210866],
[0.76501493, 0.38795293, 0.49637574],
[0.31552006, 0.3704537 , 0.90083695],
[0.26176471, 0.66781917, 0.65375891],
[0.21141543, 0.16505171, 0.53799316],
]) * 255.).astype(np.uint8)
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-c': 'config file path (default: None)',
'-l': 'logger config file path (default: None)',
'-r': 'path to latest checkpoint (default: None)',
'-d': 'indices of GPUs to enable (default: all)',
#'-o': 'path to the output JSON file (default: None)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
args = argparse.ArgumentParser(description='PyTorch segmenter.')
args.add_argument('-c', '--conf', default=None, type=str, help=help('-c'))
args.add_argument('-l', '--logconf', default=None, type=str, help=help('-l'))
args.add_argument('-r', '--resume', default=None, type=str, help=help('-r'))
args.add_argument('-d', '--device', default=None, type=str, help=help('-d'))
#args.add_argument('-o', '--output', default=None, type=str, help=help('-o'))
return args
def parse_config(args):
"""
@brief Combines parameters from both JSON and command line into a
@param[in] args Argparse args object.
@returns A torchseg.config.parser.ConfigParser object.
"""
# Custom CLI options to modify the values provided in the JSON configuration
CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')
options = [
CustomArgs(['-o', '--output'], type=str, target='output'),
]
config = torchseg.config.parser.ConfigParser.from_args(args, options)
return config
def image_based_accuracy(log_pred, gt, nclasses=35):
# Convert log-probabilities into probabilities
pred = torch.exp(log_pred)
# Remove the batch dimension
pred = pred[0, :]
gt = gt[0, :]
# Flatten the images
pred = torch.reshape(pred, (nclasses, -1))
gt = torch.reshape(gt, (nclasses, -1))
# List of classes considered
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
class2idx = {y: x for x, y in idx2class.items()}
relevant_class = ["Skin", "Oral mucosa", "Enamel", "Tongue", "Lip", "Hard palate",
"Attached gingiva", "Soft palate", "Hair"]
relevant_class_idx = torch.tensor([class2idx[x] for x in relevant_class])
# Get only the annotated pixels
ann_idx = torch.sum(gt, dim=0) == 1
y_pred = pred[:, ann_idx]
y_true = gt[:, ann_idx]
# Discard non-relevant classes
y_pred = y_pred[relevant_class_idx, :]
y_true = y_true[relevant_class_idx, :]
# Get class index predictions
y_pred = torch.argmax(y_pred, dim=0)
y_true = torch.argmax(y_true, dim=0)
# Compute accuracy
correct_predictions = (y_pred == y_true).float().sum()
total_predictions = y_true.shape[0]
accuracy = (correct_predictions / total_predictions).item()
return accuracy
def patch_based_prediction(data, gt, model, device, patch_size=512):
# Create full prediction
output = torch.zeros_like(gt).to(device)
im_height = data.shape[2]
im_width = data.shape[3]
i = 0
j = 0
while j < im_height:
# Compute patch height
j_end = j + patch_size
if j_end > im_height:
j_end = im_height
while i < im_width:
# Compute patch width
i_end = i + patch_size
if i_end > im_width:
i_end = im_width
# Get patch into GPU
patch = data[:, :, j:j_end, i:i_end]
patch = patch.to(device)
# Perform patch inference
output[:, :, j:j_end, i:i_end] = model(patch)
# Move to the right
i += patch_size
# Move to the bottom
j += patch_size
return output
def label2bgr(im, pred, gt):
"""
@brief Function to convert a segmentation label into an RGB image.
Label is expected to be a Torch tensor with shape C, H, W.
"""
# Convert prediction and ground truth to single channel
pred_sinchan = torch.argmax(pred, dim=0).numpy()
gt_sinchan = torch.argmax(gt, dim=0).numpy()
# Add one to all the classes because the non-annotated pixels will be black
pred_sinchan += 1
gt_sinchan += 1
# Black out the non-annotated pixels
nan_idx = torch.sum(gt, dim=0) != 1
pred_sinchan[nan_idx, ...] = 0
gt_sinchan[nan_idx, ...] = 0
# List of classes considered
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
class2idx = {y: x for x, y in idx2class.items()}
relevant_class = ["Skin", "Oral mucosa", "Enamel", "Tongue", "Lip", "Hard palate",
"Attached gingiva", "Soft palate", "Hair"]
relevant_class_idx = [class2idx[x] for x in relevant_class]
# Black out the non-relevant classes
for idx in relevant_class_idx:
pixels_of_this_class = gt_sinchan == (idx + 1)
pred_sinchan[pixels_of_this_class] = 0
gt_sinchan[pixels_of_this_class] = 0
# Convert single-channel label prediction and ground truth to BGR
pred_bgr = skimage.color.label2rgb(pred_sinchan, colors=palette)
gt_bgr = skimage.color.label2rgb(gt_sinchan, colors=palette)
return pred_bgr, gt_bgr
def main():
# Read command line arguments
args = parse_cmdline_params()
# Read configuration file
config = parse_config(args)
logger = config.get_logger('test')
# Hack config file to add 'odsi_db_conf_mat' as a metric
config['metrics'].append('odsi_db_conf_mat')
# Get testing logger
logger = config.get_logger('test')
# Build network architecture
model = config.init_obj('model', torchseg.model)
logger.info(model)
# Load the weights of a trained architecture (typically model_best.pth)
logger.info('Loading checkpoint: {} ...'.format(config.resume))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
checkpoint = torch.load(config.resume, map_location=device)
state_dict = checkpoint['state_dict']
if torch.cuda.is_available() and config['n_gpu'] > 1:
model = torch.nn.DataParallel(model)
model.load_state_dict(state_dict)
model = model.to(device)
# Put model in test mode
model.eval()
# Get function handles of loss and metrics
loss_fn = getattr(torchseg.model.loss, config['loss'])
metric_fns = [getattr(torchseg.model.metric, met) \
for met in config['metrics']]
# Loop over all testing datasets
for dataset in config['testing']['datasets']:
# Create dataloader for this particular testing dataset
data_loader = getattr(torchseg.data_loader,
dataset['type'])(**dataset['args'])
data_loader.training = False
# Initialise image-based accuracy accumulator
im_data = []
im_pred = []
im_gt = []
im_acc = []
im_path = []
# Run inference on all the samples in the dataset
total_loss = 0.0
#total_metrics = torch.zeros(len(metric_fns))
total_metrics = [None] * len(metric_fns)
counter = 0
with torch.no_grad():
for i, raw_data in enumerate(tqdm.tqdm(data_loader)):
counter += 1
# Load input data
data = raw_data['image']
gt = raw_data['label']
path = raw_data['path']
# Run inference on the GPU
output = None
if data.shape[3] > 512 and data_loader.mode in ['rgbpixel_test', 'spixel_170_test']:
output = patch_based_prediction(data, gt, model, device)
gt = gt.to(device)
else:
# Send image and label to GPU
data, gt = data.to(device), gt.to(device)
# Perform inference on the input
output = model(data)
# Update image-based accuracy
im_data.append(data.detach().cpu()[0, ...])
im_pred.append(output.detach().cpu()[0, ...])
im_gt.append(gt.detach().cpu()[0, ...])
im_acc.append(image_based_accuracy(output.detach().cpu(), gt.detach().cpu()))
im_path.append(path)
# Update accumulated loss
loss = loss_fn(output, gt)
batch_size = data.shape[0]
total_loss += loss.item() * batch_size
# Update accumulated metrics
for i, metric in enumerate(metric_fns):
metric_value = metric(output, gt)
if total_metrics[i] is None:
total_metrics[i] = metric_value * batch_size
else:
total_metrics[i] += metric_value * batch_size
# Get best, median, worst image index
best_idx = np.argmax(im_acc)
median_idx = np.argsort(im_acc)[len(im_acc) // 2]
worst_idx = np.argmin(im_acc)
# FIXME: debugging
pred_bgr, gt_bgr = label2bgr(im_data[best_idx], im_pred[best_idx], im_gt[best_idx])
cv2.imwrite('/tmp/pred_bgr.png', pred_bgr)
cv2.imwrite('/tmp/gt_bgr.png', gt_bgr)
# Calculate the final image-based accuracy
print('Image-based minimum accuracy:', im_acc[worst_idx])
print('Image-based median accuracy:', im_acc[median_idx])
print('Image-based maximum accuracy:', im_acc[best_idx])
print('Image-based average accuracy:', sum(im_acc) / len(im_acc))
#print('Batch size:', batch_size)
#print('Metric functions:', metric_fns)
#print('Total metrics:', [x / counter for x in total_metrics])
# Find the index of the 'confusion matrix' metric
conf_mat_idx = None
for i, met in enumerate(metric_fns):
if met.__name__ == 'odsi_db_conf_mat':
conf_mat_idx = i
if not conf_mat_idx:
raise RuntimeError('Confusion matrix fake metric not found.')
# Get confusion matrix (accumulated over all the testing pixels)
conf_mat = total_metrics[conf_mat_idx][0].detach().cpu().numpy()
# Compute the balance accuracy per class
bal_acc = {}
sensitivity = {}
specificity = {}
accuracy = {}
i2c = torchseg.data_loader.OdsiDbDataLoader.OdsiDbDataset.classnames
for i in range(conf_mat.shape[0]):
tp, fp, tn, fn = conf_mat[i]
sensitivity[i2c[i]] = 1. * tp / (tp + fn)
specificity[i2c[i]] = 1. * tn / (tn + fp)
accuracy[i2c[i]] = 1. * (tp + tn) / (tp + tn + fp + fn)
bal_acc[i2c[i]] = .5 * sensitivity[i2c[i]] + .5 * specificity[i2c[i]]
# Save JSON file with the results per class
with open(config['output'] + '_sensitivity.json', 'w') as f:
json.dump(sensitivity, f)
with open(config['output'] + '_specificity.json', 'w') as f:
json.dump(specificity, f)
with open(config['output'] + '_accuracy.json', 'w') as f:
json.dump(accuracy, f)
with open(config['output'] + '_balanced_accuracy.json', 'w') as f:
json.dump(bal_acc, f)
# Create a dictionary with the resuls on this dataset
#n_samples = len(data_loader.sampler)
#log = {'loss': total_loss / n_samples}
#log.update({
# met.__name__: total_metrics[i].item() / n_samples \
# for i, met in enumerate(metric_fns)
#})
# Show or save the results for this dataset in the log
#logger.info({dataset['type']: log})
if __name__ == '__main__':
main()
| 13,865 | 34.92228 | 100 | py |
segodsidb | segodsidb-main/src/generate_rgb_recon.py | """
@brief Script that reconstructs the RGB images of the ODSI-DB dataset and
saves them in output folders, one per camera.
- Nuance EX (1392x1040 pixels, 450-950nm, 10nm steps).
- Specim IQ (512x512 pixels, 400-1000nm, 3nm steps).
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 16 Feb 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-o': 'Path to the output directory (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-o', '--output', required=True, type=str,
help=help('-o'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
# Input directory must exist
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
# Output directory should not exist, we will create it
if os.path.exists(args.output):
raise RuntimeError('[ERROR] Output directory already exists.')
return args
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Create output folders
nuance_ex_path = os.path.join(args.output, 'nuance_ex')
specim_iq_path = os.path.join(args.output, 'specim_iq')
os.mkdir(args.output)
os.mkdir(nuance_ex_path)
os.mkdir(specim_iq_path)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
for im_path, label_path in tqdm.tqdm(zip(image_paths, label_paths)):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
label_fname = ntpath.basename(label_path)
# Read image
im_hyper, wl, im_rgb_orig, metadata = dl.read_stiff(im_path,
silent=True, rgb_only=False)
nbands = im_hyper.shape[2]
# Copy image to the right folder depending on the type of camera, this is to observe
# the diferences between the images reconstructed from each of the different cameras
if nbands == 51: # Nuance EX
im_dpath = os.path.join(nuance_ex_path, im_fname + '.jpg')
im_dpath_orig = os.path.join(nuance_ex_path, im_fname + '_orig.jpg')
#label_dpath = os.path.join(nuance_ex_path, label_fname)
elif nbands == 204: # Specim IQ
im_dpath = os.path.join(specim_iq_path, im_fname) + '.jpg'
im_dpath_orig = os.path.join(specim_iq_path, im_fname + '_orig.jpg')
#label_dpath = os.path.join(specim_iq_path, label_fname)
else:
raise ValueError('[ERROR] The image {} has {} bands.'.format(im_fname, nbands))
# Convert hyperspectral image to RGB
im_rgb = dl.OdsiDbDataLoader.LoadImage.hyper2rgb(im_hyper, wl)
# Save reconstructed RGB image into the output folder
im_bgr = im_rgb[...,::-1].copy()
cv2.imwrite(im_dpath, im_bgr, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# Save RGB provided in ODSI-DB into the output folder
im_bgr_orig = im_rgb_orig[...,::-1].copy()
cv2.imwrite(im_dpath_orig, im_bgr_orig,
[int(cv2.IMWRITE_JPEG_QUALITY), 100])
if __name__ == '__main__':
main()
| 4,659 | 32.52518 | 93 | py |
segodsidb | segodsidb-main/src/compute_specim_iq_mean.py | """
@brief Compute the channel mean of the Specim IQ images.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 16 Feb 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
import sys
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
# RGB average
nbands = 204
hyper_sums = np.zeros((nbands,), dtype=np.float64)
pixel_count = 0
image_count = 0
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, im_rgb_orig, metadata = dl.read_stiff(im_path,
silent=True, rgb_only=False)
# We are only interested in Specim IQ images
if im_hyper.shape[2] != nbands:
continue
# Update average computation
for i in range(nbands):
hyper_sums[i] += im_hyper[:, :, i].sum()
pixel_count += im_hyper.shape[0] * im_hyper.shape[1]
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
sys.stdout.write('Specim IQ average: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']\n')
# Print final stats
print('Image count:', image_count)
sys.stdout.write('Specim IQ average: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']')
if __name__ == '__main__':
main()
| 3,733 | 26.659259 | 80 | py |
segodsidb | segodsidb-main/src/validate_dataset.py | """
@brief Script to validate the hyperspectral data of ODSI-DB along with the
generation of RGB images from hyperspectral data.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 27 Sep 2021.
"""
import argparse
import numpy as np
import os
import tqdm
import random
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn.manifold
import multiprocessing
import colour
import sys
import cv2
import scipy
# My imports
import torchseg.data_loader
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = torchseg.data_loader.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-o': 'Path to the output directory (required: True)',
'-n': 'Max. no. of randomly selected pixels for t-SNE (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-o', '--output', required=True, type=str,
help=help('-o'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
# Input directory must exist
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
# Output directory should not exist, we will create it
if os.path.exists(args.output):
raise RuntimeError('[ERROR] Output directory already exists.')
return args
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Create output folder
os.mkdir(args.output)
# Get list of image and segmentation files
sys.stdout.write('[INFO] Reading list of input files... ')
sys.stdout.flush()
image_paths, label_paths = read_files(args.input)
print('OK')
# Loop over the images
for im_path, label_path in zip(image_paths, label_paths):
# Read image
im_hyper, wl, im_rgb, metadata = torchseg.data_loader.read_stiff(im_path,
silent=True, rgb_only=False)
# Convert hyperspectral image to RGB
fname = os.path.basename(os.path.splitext(im_path)[0])
sys.stdout.write("[INFO] Converting {} to RGB... ".format(fname))
image_loader = torchseg.data_loader.OdsiDbDataLoader.LoadImage
im_recon_rgb = image_loader.hyper2rgb(im_hyper, wl)
sys.stdout.flush()
print('OK')
# Convert RGB image to BGR so that we can save it with OpenCV
im_recon_bgr = im_recon_rgb[...,::-1]
# Save reconstructed RGB image to the output folder
im_recon_path = os.path.join(args.output, fname + '_reconstructed.png')
cv2.imwrite(im_recon_path, im_recon_bgr)
# Save RGB image provided by the dataset to the output folder
im_orig_bgr = im_rgb[...,::-1]
im_orig_path = os.path.join(args.output, fname + '_original.png')
cv2.imwrite(im_orig_path, im_orig_bgr)
if __name__ == '__main__':
main()
| 4,126 | 29.57037 | 82 | py |
segodsidb | segodsidb-main/src/compute_all_hyper_450_950_51_mean.py | """
@brief Compute the channel mean of the 51 interpolated bands from
450-950nm.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 21 Jun 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
import sys
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
# Hyperspectral mean computation stuff
nbands = 51
new_wl = np.linspace(450, 950, nbands)
hyper_sums = np.zeros((nbands,), dtype=np.float64)
pixel_count = 0
image_count = 0
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, _, _ = dl.read_stiff(im_path, silent=True,
rgb_only=False)
# Interpolate hyperspectral image to the 450-950nm range (51 bands)
interp_func = dl.OdsiDbDataLoader.LoadImage.interp_spectra
im_hyper = interp_func(im_hyper, wl, new_wl)
# Update average computation
for i in range(nbands):
hyper_sums[i] += im_hyper[:, :, i].sum()
pixel_count += im_hyper.shape[0] * im_hyper.shape[1]
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
sys.stdout.write('All hyper 450-950nm (51 bands) average: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']\n')
# Print final stats
print('Image count:', image_count)
sys.stdout.write('All hyper 450-950nm (51 bands) average: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']')
if __name__ == '__main__':
main()
| 3,960 | 27.912409 | 80 | py |
segodsidb | segodsidb-main/src/compute_all_hyper_450_950_170_mean.py | """
@brief Compute the channel mean of the 170 interpolated bands from
450-950nm.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 21 Jun 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
import sys
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
# Hyperspectral mean computation stuff
nbands = 170
new_wl = np.linspace(450, 950, nbands)
hyper_sums = np.zeros((nbands,), dtype=np.float64)
pixel_count = 0
image_count = 0
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, _, _ = dl.read_stiff(im_path, silent=True,
rgb_only=False)
# Interpolate hyperspectral image to the 450-950nm range (170 bands)
interp_func = dl.OdsiDbDataLoader.LoadImage.interp_spectra
im_hyper = interp_func(im_hyper, wl, new_wl)
# Update average computation
for i in range(nbands):
hyper_sums[i] += im_hyper[:, :, i].sum()
pixel_count += im_hyper.shape[0] * im_hyper.shape[1]
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
sys.stdout.write('All hyper 450-950nm (170 bands) average: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']\n')
# Print final stats
print('Image count:', image_count)
sys.stdout.write('All hyper 450-950nm (170 bands) average: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']')
if __name__ == '__main__':
main()
| 3,964 | 27.941606 | 80 | py |
segodsidb | segodsidb-main/src/generate_tsne.py | """
@brief Script that generates a t-SNE of all the pixels in the ODSI-DB dataset,
comparing RGB pixels with hyperspectral pixels.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 27 Sep 2021.
"""
import argparse
import numpy as np
import os
import tqdm
import random
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
import sklearn.manifold
import multiprocessing
import scipy.spatial
#import colour
# My imports
import torchseg.data_loader
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = torchseg.data_loader.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-o': 'Path to the output directory (required: True)',
'-n': 'Max. no. of selected pixels for t-SNE (required: True)',
'-r': 'Use reconstructed RGB images (required: True)',
'-e': 'Convert Specim IQ images to Nuance EX style of 51 bands (required: True)',
'-v': 'Use only the visible spectrum (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-o', '--output', required=True, type=str,
help=help('-o'))
parser.add_argument('-n', '--npixels', required=True, type=int,
help=help('-n'))
parser.add_argument('-r', '--recon', required=True, type=bool,
help=help('-r'))
parser.add_argument('-v', '--visible', required=True, type=bool,
help=help('-v'))
parser.add_argument('-e', '--nuance', required=True, type=str,
help=help('-e'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
# Input directory must exist
if not os.path.isdir(args.input):
raise ValueError('[ERROR] Input directory does not exist.')
# Output directory should not exist, we will create it
if os.path.exists(args.output):
raise ValueError('[ERROR] Output directory already exists.')
if args.nuance not in ['none', 'nearest', 'linear']:
raise ValueError('[ERROR] Specim -> Nuance conversion mode not recognised.')
return args
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def specim2nuance(im_specim, specim_wl=None, nuance_wl=None, mode='linear'):
"""
@brief Convert a hyperspectral image from the Specim IQ camera (204 bands)
to the Nuance EX (51 bands).
@param[in] im_specim Numpy ndarray, shape (h, w, c).
@param[in] specim_wl Array of wavelengths of the Specim IQ bands.
@param[in] nuance_wl Array of wavelengths of the Nuance EX bands.
@param[in] mode Two modes are available: 'nearest' and 'linear'.
nearest: the band with nearest wavelength of
the Specim IQ is used.
linear: linear interpolation of the two
surrounding wavelengths.
"""
# Get an array of the wavelengths of each camera
if specim_wl is None:
specim_wl = np.array([
397.32000732, 400.20001221, 403.08999634, 405.97000122, 408.8500061 ,
411.73999023, 414.63000488, 417.51998901, 420.3999939 , 423.29000854,
426.19000244, 429.07998657, 431.97000122, 434.86999512, 437.76000977,
440.66000366, 443.55999756, 446.45001221, 449.3500061 , 452.25 ,
455.16000366, 458.05999756, 460.95999146, 463.86999512, 466.76998901,
469.67999268, 472.58999634, 475.5 , 478.41000366, 481.32000732,
484.23001099, 487.14001465, 490.05999756, 492.97000122, 495.89001465,
498.79998779, 501.72000122, 504.64001465, 507.55999756, 510.48001099,
513.40002441, 516.33001709, 519.25 , 522.17999268, 525.09997559,
528.0300293 , 530.96002197, 533.89001465, 536.82000732, 539.75 ,
542.67999268, 545.61999512, 548.54998779, 551.48999023, 554.42999268,
557.35998535, 560.29998779, 563.23999023, 566.17999268, 569.11999512,
572.07000732, 575.01000977, 577.96002197, 580.90002441, 583.84997559,
586.79998779, 589.75 , 592.70001221, 595.65002441, 598.59997559,
601.54998779, 604.51000977, 607.46002197, 610.41998291, 613.38000488,
616.34002686, 619.29998779, 622.26000977, 625.2199707 , 628.17999268,
631.15002441, 634.10998535, 637.08001709, 640.03997803, 643.01000977,
645.97998047, 648.95001221, 651.91998291, 654.89001465, 657.86999512,
660.84002686, 663.80999756, 666.78997803, 669.77001953, 672.75 ,
675.72998047, 678.71002197, 681.69000244, 684.66998291, 687.65002441,
690.64001465, 693.61999512, 696.60998535, 699.59997559, 702.58001709,
705.57000732, 708.57000732, 711.55999756, 714.54998779, 717.53997803,
720.53997803, 723.5300293 , 726.5300293 , 729.5300293 , 732.5300293 ,
735.5300293 , 738.5300293 , 741.5300293 , 744.5300293 , 747.53997803,
750.53997803, 753.54998779, 756.55999756, 759.55999756, 762.57000732,
765.58001709, 768.59997559, 771.60998535, 774.61999512, 777.64001465,
780.65002441, 783.66998291, 786.67999268, 789.70001221, 792.7199707 ,
795.73999023, 798.77001953, 801.78997803, 804.80999756, 807.84002686,
810.85998535, 813.89001465, 816.91998291, 819.95001221, 822.97998047,
826.01000977, 829.03997803, 832.07000732, 835.10998535, 838.14001465,
841.17999268, 844.2199707 , 847.25 , 850.28997803, 853.33001709,
856.36999512, 859.41998291, 862.46002197, 865.5 , 868.54998779,
871.59997559, 874.64001465, 877.69000244, 880.73999023, 883.78997803,
886.84002686, 889.90002441, 892.95001221, 896.01000977, 899.05999756,
902.11999512, 905.17999268, 908.23999023, 911.29998779, 914.35998535,
917.41998291, 920.47998047, 923.54998779, 926.60998535, 929.67999268,
932.73999023, 935.80999756, 938.88000488, 941.95001221, 945.02001953,
948.09997559, 951.16998291, 954.23999023, 957.32000732, 960.40002441,
963.4699707 , 966.54998779, 969.63000488, 972.71002197, 975.78997803,
978.88000488, 981.96002197, 985.04998779, 988.13000488, 991.2199707 ,
994.30999756, 997.40002441, 1000.48999023, 1003.58001709,
])
if nuance_wl is None:
nuance_wl = np.linspace(450, 950, 51)
# If we get a Nuance EX image, we don't need to convert anything
if im_specim.shape[2] == 51:
return im_specim, nuance_wl
assert(im_specim.shape[2] == 204)
# Compute the distance between wavelengths of the two cameras
dist = scipy.spatial.distance.cdist(specim_wl.reshape((204, 1)),
nuance_wl.reshape((51, 1)), metric='euclidean')
# Compute the nearest nuance wavelength for each specim band
nearest = np.argmin(dist, axis=0)
# Synthesize Nuance-like image using the nearest bands
h, w, _ = im_specim.shape
im_nuance = np.empty((h, w, nuance_wl.shape[0]), dtype=np.float32)
if mode == 'nearest':
for i in range(nuance_wl.shape[0]):
im_nuance[:, :, i] = im_specim[:, :, nearest[i]].copy()
elif mode == 'linear':
for i in range(nuance_wl.shape[0]):
# Find the bands before and after
if specim_wl[nearest[i]] > nuance_wl[i]:
j = nearest[i] - 1
k = nearest[i]
else:
j = nearest[i]
k = nearest[i] + 1
# Compute the weights of the bands before and after
d1 = nuance_wl[i] - specim_wl[j]
d2 = specim_wl[k] - nuance_wl[i]
norm = d1 + d2
w1 = d2 / norm
w2 = d1 / norm
# Add interpolated channel to the synthetic Nuance EX image
im_nuance[:, :, i] = w1 * im_specim[:, :, j] + w2 * im_specim[:, :, k]
else:
raise ValueError('[ERROR] Unknown interpolation mode.')
assert(im_nuance.shape == (h, w, 51))
return im_nuance, nuance_wl
def process_image(im_path: str, label_path: str, no_pixels: int,
rgb_recon: bool, visible: bool, nuance_recon: str):
"""
@brief Get the annotated pixels from an image of the ODSI-DB dataset.
@param[in] im_path Path to the image file.
@param[in] label_path Path to the annotation file.
@param[in] no_pixels Number of annotated pixels to be sampled from the
image.
@param[in] rgb_recon Reconstruct RGB image from hyperspectral.
@param[in] visible Only use visible wavelengths.
@param[in] nuance_recon Convert Specim IQ images to the Nuance EX style
of 51 bands.
"""
# Read raw hyperspectral image
im_hyper, wl, im_rgb, metadata = torchseg.data_loader.read_stiff(im_path,
silent=True, rgb_only=False)
# Convert image to Nuance EX (in case it is Specim IQ)
if nuance_recon is not None:
im_hyper, wl = specim2nuance(im_hyper, mode=nuance_recon)
# Remove the non-visible part of the spectrum if requested
if visible:
im_hyper, wl = torchseg.data_loader.OdsiDbDataLoader.LoadImage.filter_bands(im_hyper, wl,
380., 740.)
# Reconstruct RGB image if requested
if rgb_recon:
im_rgb = torchseg.data_loader.OdsiDbDataLoader.LoadImage.hyper2rgb(im_hyper, wl)
# Read label
label = torchseg.data_loader.OdsiDbDataLoader.LoadImage.read_label(
label_path).transpose((1, 2, 0))
# Make sure that image and label have the same height and width
assert(label.shape[2] == 35) # Labels are supposed to have 35 classes
assert(im_hyper.shape[0] == label.shape[0])
assert(im_hyper.shape[1] == label.shape[1])
assert(im_rgb.shape[0] == label.shape[0])
assert(im_rgb.shape[1] == label.shape[1])
# Collect the labelled pixels
ann = np.where(np.sum(label, axis=2) == 1)
ann = list(zip(ann[0], ann[1]))
# Downsample the number of pixels
random.shuffle(ann)
ann = ann[:no_pixels]
# Save the randomly selected pixels
hyper_pixels = []
rgb_pixels = []
labels = []
for i, j in ann:
hyper_pixels.append(im_hyper[i, j, :].tolist())
rgb_pixels.append(im_rgb[i, j, :].tolist())
labels.append(np.argmax(label[i, j, :]))
return hyper_pixels, rgb_pixels, labels
def get_pixels(image_paths, label_paths, no_pixels=1000000, rgb_recon=False,
visible=False, nuance_recon=False):
"""
@brief Get all the labelled pixels of the ODSI-DB dataset.
@param[in] image_paths List of image paths.
@param[in] label_paths List of label paths in sync with the list of
images.
@param[in] rgb_recon True if you want to reconstruct the RGB images
from the hyperspectral images and not use the
ones already provided in ODSI-DB.
@param[in] visible True if you want to use just the visible part
of the spectrum.
@param[in] nuance_recon Convert Specim IQ image to Nuance EX style.
"""
assert(len(image_paths) == len(label_paths))
nimages = len(image_paths)
ppi = no_pixels // nimages
retval = None
with multiprocessing.Pool() as pool:
data_input = list(zip(image_paths, label_paths, [ppi] * nimages,
[rgb_recon] * nimages, [visible] * nimages,
[nuance_recon] * nimages))
retval = pool.starmap(process_image, data_input)[0]
#hyper_pixels, rgb_pixels = retval[0]
#labels = retval[1]
return retval
def plot(rgb_embed, hyper_embed, labels):
"""
@brief Plots the t-SNE of RGB and hyperspectral pixels.
@param[in] rgb_embed Embedding of RGB pixels (N, 2).
@param[in] hyper_embed Embedding of hyperspectral pixels (N, 2).
@param[in] labels List of strings (N,).
@returns a tuple (fig, axes, lgd) where fig and ax are the usual matplotlib
objects and lgd is the fig.legend.
"""
# Prepare data to plot with seaborn
rgb_data = {'y': rgb_embed[:, 0], 'x': rgb_embed[:, 1], 'label': labels}
hyper_data = {'y': hyper_embed[:, 0], 'x': hyper_embed[:, 1], 'label': labels}
# Create figure
sns.set()
fig, axes = plt.subplots(1, 2, figsize=(16, 8))
# Generate subplots
palette = dict(zip(idx2class.values(),
sns.color_palette('husl', n_colors=len(idx2class.values()))))
# sns.color_palette(n_colors=len(idx2class.values()))))
rgb_plot = sns.scatterplot(ax=axes[0], data=rgb_data, x='x', y='y',
hue='label', s=5)
hyper_plot = sns.scatterplot(ax=axes[1], data=hyper_data, x='x', y='y',
hue='label', s=5)
# Make plots beautiful
ymax = max(rgb_embed[:, 0].max(), hyper_embed[:, 0].max())
ymin = min(rgb_embed[:, 0].min(), hyper_embed[:, 0].min())
xmax = max(rgb_embed[:, 1].max(), hyper_embed[:, 1].max())
xmin = min(rgb_embed[:, 1].min(), hyper_embed[:, 1].min())
handles, figlabels = fig.axes[-1].get_legend_handles_labels()
lgd = fig.legend(handles, figlabels, loc='lower center',
bbox_to_anchor=(0.5, -0.15), ncol=10)
#lgd = fig.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
axes[0].set_title('RGB', pad=20)
#rgb_plot.set(title='RGB')
rgb_plot.set(xlabel=None)
rgb_plot.set(ylabel=None)
rgb_plot.set(ylim=[ymin, ymax])
rgb_plot.set(xlim=[xmin, xmax])
rgb_plot.set(xticklabels=[])
rgb_plot.set(yticklabels=[])
rgb_plot.set(facecolor='white')
rgb_plot.get_legend().remove()
axes[1].set_title('Hyperspectral', pad=20)
#hyper_plot.set(title='Hyperspectral')
hyper_plot.set(xlabel=None)
hyper_plot.set(ylabel=None)
hyper_plot.set(ylim=[ymin, ymax])
hyper_plot.set(xlim=[xmin, xmax])
hyper_plot.set(xticklabels=[])
hyper_plot.set(yticklabels=[])
hyper_plot.set(facecolor='white')
hyper_plot.get_legend().remove()
return fig, axes, lgd
def savefig(fig: matplotlib.figure.Figure, lgd: matplotlib.legend.Legend,
path: str):
"""
@brief Save figure to file.
@param[in] fig Seaborn figure.
@param[in] lgd Seaborn legend. This is needed because we want to place
the legend outside of the plotting area. To do this, we
need to pass the legend to the 'bbox_extra_artists'
argument of fig.savefig().
@param[in] path Path to the output file where the figure will be saved.
"""
plt.tight_layout()
fig.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight')
def specimen_iq_visble_rgb_metric(pixa, pixb):
wl = np.array([
397.32000732, 400.20001221, 403.08999634, 405.97000122, 408.8500061,
411.73999023, 414.63000488, 417.51998901, 420.3999939 , 423.29000854,
426.19000244, 429.07998657, 431.97000122, 434.86999512, 437.76000977,
440.66000366, 443.55999756, 446.45001221, 449.3500061, 452.25,
455.16000366, 458.05999756, 460.95999146, 463.86999512, 466.76998901,
469.67999268, 472.58999634, 475.5, 478.41000366, 481.32000732,
484.23001099, 487.14001465, 490.05999756, 492.97000122, 495.89001465,
498.79998779, 501.72000122, 504.64001465, 507.55999756, 510.48001099,
513.40002441, 516.33001709, 519.25, 522.17999268, 525.09997559,
528.0300293, 530.96002197, 533.89001465, 536.82000732, 539.75,
542.67999268, 545.61999512, 548.54998779, 551.48999023, 554.42999268,
557.35998535, 560.29998779, 563.23999023, 566.17999268, 569.11999512,
572.07000732, 575.01000977, 577.96002197, 580.90002441, 583.84997559,
586.79998779, 589.75, 592.70001221, 595.65002441, 598.59997559,
601.54998779, 604.51000977, 607.46002197, 610.41998291, 613.38000488,
616.34002686, 619.29998779, 622.26000977, 625.2199707 , 628.17999268,
631.15002441, 634.10998535, 637.08001709, 640.03997803, 643.01000977,
645.97998047, 648.95001221, 651.91998291, 654.89001465, 657.86999512,
660.84002686, 663.80999756, 666.78997803, 669.77001953, 672.75,
675.72998047, 678.71002197, 681.69000244, 684.66998291, 687.65002441,
690.64001465, 693.61999512, 696.60998535, 699.59997559, 702.58001709,
705.57000732, 708.57000732, 711.55999756, 714.54998779, 717.53997803,
720.53997803, 723.5300293, 726.5300293, 729.5300293, 732.5300293,
735.5300293, 738.5300293,
])
# Create image with the two pixels
im_hyper = np.dstack((pixa, pixb)).transpose((0, 2, 1))
# Convert image to RGB
image_loader = torchseg.data_loader.OdsiDbDataLoader.LoadImage
im = image_loader.hyper2rgb(im_hyper, wl)
# Compute Euclidean distance in RGB
dist = np.linalg.norm(im[0, 0] - im[0, 1])
return dist
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Create output folder
os.mkdir(args.output)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
# FIXME: this is for debugging
#image_paths = image_paths[:1]
#label_paths = label_paths[:1]
# Read pixels from the dataset
args.nuance = None if args.nuance == 'none' else args.nuance
hyper_pixels, rgb_pixels, labels = get_pixels(image_paths, label_paths,
args.npixels,
rgb_recon=args.recon,
visible=args.visible,
nuance_recon=args.nuance)
#rgb_pixels = np.array(rgb_pixels)
#hyper_pixels = np.array(hyper_pixels)
#assert(rgb_pixels.shape[0] == hyper_pixels.shape[0])
assert(len(hyper_pixels) == len(rgb_pixels))
assert(len(rgb_pixels) == len(labels))
# Convert labels from class indices to class names
labels = [idx2class[l] for l in labels]
# t-SNE
for perp in list(range(10, 210, 10)):
print("Running t-SNE with perplexity = {}...".format(perp))
rgb_tsne = sklearn.manifold.TSNE(n_components=2, verbose=0,
perplexity=perp,
n_iter=1000, learning_rate=200,
init='pca')
hyper_tsne = sklearn.manifold.TSNE(n_components=2, verbose=0,
perplexity=perp,
n_iter=1000, learning_rate=200,
init='pca')
rgb_embed = rgb_tsne.fit_transform(rgb_pixels)
hyper_embed = hyper_tsne.fit_transform(hyper_pixels)
# Save embeddings
with open(os.path.join(args.output, "rgb_embed_perplexity_{}.npy".format(perp)), 'wb') as f:
np.save(f, rgb_embed)
with open(os.path.join(args.output, "hyper_embed_perplexity_{}.npy".format(perp)), 'wb') as f:
np.save(f, hyper_embed)
# Plot RGB and hyperspectral t-SNE subplots
fig, axes, lgd = plot(rgb_embed, hyper_embed, labels)
# Save figure inside the output folder
savefig(fig, lgd, os.path.join(args.output, "figure_perplexity_{}.png".format(perp)))
if __name__ == '__main__':
main()
| 20,785 | 42.668067 | 102 | py |
segodsidb | segodsidb-main/src/generate_results_latex.py | """
@brief Script that generates a Latex table where every row represents a class
and every column represents a cross-validation fold. The cells will
contain the metric, which for ODSI-DB we have decided will be balanced
accuracy.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 5 Aug 2021.
"""
import argparse
import numpy as np
import os
import json
import numpy as np
# My imports
import torchseg.data_loader as dl
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Dictionary of fold names to paths.',
'-c': 'Caption of the table of results.',
'-l': 'Table label.',
'-s': 'List containing a subset of classes to display.',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-c', '--caption', required=True, type=str,
help=help('-c'))
parser.add_argument('-l', '--label', required=True, type=str,
help=help('-l'))
parser.add_argument('-s', '--classes', required=False, default='None',
type=str, help=help('-l'))
# Read parameters
args = parser.parse_args()
args.input = eval(args.input)
args.classes = eval(args.classes)
assert(type(args.input) == dict)
return args
def validate_cmdline_params(args):
# Check that the JSON files exist
#for fid in args.input:
# if not os.path.isfile(args.input[fid]):
# raise ValueError('[ERROR] The path `' + str(args.input[fid]) \
# + '` does not exist.')
return args
def read_json(path):
data = None
with open(path) as f:
data = json.load(f)
return data
def print_latex_table(metrics, caption, label, class_subset=None):
"""
@brief Print a table with classes as rows and folds as columns.
@param[in] metric_per_class Dictionary of class names -> metric value.
@param[in] metric_name Name of the metric.
@returns Nothing.
"""
# Get the list of classes that the user wants to consider
list_of_classes = None
if class_subset:
list_of_classes = class_subset
else:
list_of_classes = list(metrics[metrics.keys()[0]].keys())
#for c in list_of_classes:
# if class_subset is not None and c not in class_subset:
# del metric_per_class[c]
# Average over classes
#metric_avg = np.nanmean([v for k, v in metric_per_class.items()])
# Convert metric to percentage
#metric_per_class = {k: 100. * v for k, v in metric_per_class.items()}
#metric_avg *= 100.
# Compute average over classes
average = {}
for m in metrics:
acc = 0.
for c in metrics[m]:
if c not in list_of_classes:
continue
acc += metrics[m][c]
avg = acc / len(list_of_classes)
average[m] = avg
# Table header
print()
print("\\begin{table*}[!htb]")
print(" \\centering")
print(" \\caption{" + caption + "}")
print(" \\vspace{0.2cm}")
print(" \\begin{tabular}{l" + "c" * len(metrics) + "}")
print(" \\hline")
print(" \\multicolumn{1}{c}{\\bfseries Class} ")
# Print column names
for m in metrics:
print(" & \\multicolumn{1}{c}{\\bfseries " \
+ m.capitalize().replace('_', ' ') + "}") # + " (\\%)}")
print(" \\\\")
print(" \\hline")
# Print all the metrics for each class
for c in sorted(list_of_classes):
line_str = ' ' + c
for m in metrics:
line_str += " & {:.2f}".format(metrics[m][c] * 100)
line_str += " \\\\"
print(line_str)
print(" \\hline")
# Print the average for each metric
line_str = " \\multicolumn{1}{c}{\\bfseries Average} "
for m in metrics:
line_str += " & {:.2f}".format(average[m] * 100)
line_str += " \\\\"
print(line_str)
# Table footer
print(" \\end{tabular}")
print(" \\vspace{0.2cm}")
print(" \\label{" + label + "}")
print("\\end{table*}")
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
classnames = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
# List of suffixes
lsuffix = ['_sensitivity.json', '_specificity.json',
'_accuracy.json', '_balanced_accuracy.json']
# Produce latex table for all the metrics
metrics = {}
for suffix in lsuffix:
# Read JSON files with the resuls for each class
fold_results = {fold_id: read_json(path + suffix) for fold_id, path in args.input.items()}
# Average over folds
metric_per_class = {classnames[k]: None for k in classnames}
for class_name in metric_per_class:
values = [fold_results[fold_id][class_name] for fold_id in fold_results]
metric_per_class[class_name] = np.nanmean(values)
# Store the per-class results for this metric
metrics[suffix.split('.')[0][1:]] = metric_per_class
# Print Latex table formated for Photonics West
print_latex_table(metrics, caption=args.caption, label=args.label, class_subset=args.classes)
#metric_name="Balanced accuracy (\\%)",
if __name__ == '__main__':
main()
| 5,686 | 30.41989 | 98 | py |
segodsidb | segodsidb-main/src/split_images_per_camera_model.py | """
@brief Script that splits ODSI-DB dataset into two parts: images generated
by the Nuance EX (1392x1040 pixels, 450-950nm, 10nm steps) and the
Specim IQ (512x512 pixels, 400-1000nm, 3nm steps).
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 27 Sep 2021.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
# My imports
import torchseg.data_loader
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = torchseg.data_loader.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-o': 'Path to the output directory (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-o', '--output', required=True, type=str,
help=help('-o'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
# Input directory must exist
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
# Output directory should not exist, we will create it
if os.path.exists(args.output):
raise RuntimeError('[ERROR] Output directory already exists.')
return args
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Create output folders
nuance_ex_path = os.path.join(args.output, 'nuance_ex')
specim_iq_path = os.path.join(args.output, 'specim_iq')
os.mkdir(args.output)
os.mkdir(nuance_ex_path)
os.mkdir(specim_iq_path)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
for im_path, label_path in tqdm.tqdm(zip(image_paths, label_paths)):
im_fname = ntpath.basename(im_path)
label_fname = ntpath.basename(label_path)
# Read image
im_hyper, wl, im_rgb, metadata = torchseg.data_loader.read_stiff(im_path,
silent=True, rgb_only=False)
nbands = im_hyper.shape[2]
# Copy image to the right folder depending on the type of camera
if nbands == 51: # Nuance EX
im_dpath = os.path.join(nuance_ex_path, im_fname)
label_dpath = os.path.join(nuance_ex_path, label_fname)
elif nbands == 204: # Specim IQ
im_dpath = os.path.join(specim_iq_path, im_fname)
label_dpath = os.path.join(specim_iq_path, label_fname)
else:
raise ValueError('[ERROR] The image {} has {} bands.'.format(im_fname,
nbands))
shutil.copyfile(im_path, im_dpath)
shutil.copyfile(label_path, label_dpath)
if __name__ == '__main__':
main()
| 3,860 | 30.137097 | 82 | py |
segodsidb | segodsidb-main/src/compute_all_hyper_400_1000_204_std.py | """
@brief Compute the channel std of the 204 interpolated bands from
400-1000nm.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 8 Jul 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
import sys
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string,
# e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command
line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-m': 'Channel means normalised to the range [0, 1] (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-m', '--means', required=True, type=str,
help=help('-m'))
# Read parameters
args = parser.parse_args()
args.means = eval(args.means)
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, _ = read_files(args.input)
# Read means
mean_sq = np.array(args.means) * np.array(args.means)
# Hyperspectral std computation stuff
min_wl = 400
max_wl = 1000
nbands = 204
new_wl = np.linspace(min_wl, max_wl, nbands)
hyper_sums = np.zeros((new_wl.shape[0],), dtype=np.float64)
pixel_count = 0
image_count = 0
interp_func = dl.OdsiDbDataLoader.LoadImage.interp_spectra
# Get the indices of the wavelengths that are shared among both cameras
inside_indices = np.array([idx for idx, x in enumerate(new_wl.tolist()) \
if x > 450. and x < 950.])
inside_wl = new_wl[inside_indices]
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, _, _ = dl.read_stiff(im_path, silent=True,
rgb_only=False)
npixels = im_hyper.shape[0] * im_hyper.shape[1]
# Handle Nuance EX images
if im_hyper.shape[2] == 51:
# Interpolate to bands in the range [450-950]
im_hyper_inside = interp_func(im_hyper, wl, inside_wl)
# Extrapolate to 204 bands
im_hyper = np.empty((im_hyper_inside.shape[0],
im_hyper_inside.shape[1],
nbands),
dtype=im_hyper_inside.dtype)
j = 0
for i in range(nbands):
if i < np.min(inside_indices):
im_hyper[:, :, i] = im_hyper_inside[:, :, 0]
elif i > np.max(inside_indices):
im_hyper[:, :, i] = im_hyper_inside[:, :, -1]
else:
im_hyper[:, :, i] = im_hyper_inside[:, :, j]
j += 1
# Handle Specim IQ images
else:
im_hyper = interp_func(im_hyper, wl, new_wl)
# Update std computation
im_hyper_sq = im_hyper * im_hyper
for i in range(hyper_sums.shape[0]):
hyper_sums[i] += (im_hyper_sq[:, :, i] - mean_sq[i]).sum()
# Update counters
pixel_count += npixels
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
sys.stdout.write('All hyper 400-1000nm (204 bands) std: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']\n')
# Print final stats
print('Image count:', image_count)
sys.stdout.write('All hyper 400-1000nm (204 bands) std: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']')
if __name__ == '__main__':
main()
| 5,487 | 29.831461 | 78 | py |
segodsidb | segodsidb-main/src/compute_specim_iq_std.py | """
@brief Compute the channel std of the Specim IQ images.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 16 Feb 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
import sys
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-m': 'Channel means normalised to the range [0, 1] (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-m', '--means', required=True, type=str,
help=help('-m'))
# Read parameters
args = parser.parse_args()
args.means = eval(args.means)
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
# Read means
mean_sq = np.array(args.means) * np.array(args.means)
# Hyperspectral std computation stuff
nbands = 204
hyper_sums = np.zeros((nbands,), dtype=np.float64)
pixel_count = 0
image_count = 0
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, im_rgb_orig, metadata = dl.read_stiff(im_path,
silent=True, rgb_only=False)
# We are only interested in Specim IQ images
if im_hyper.shape[2] != nbands:
continue
# Update std computation
im_hyper_sq = im_hyper * im_hyper
for i in range(nbands):
hyper_sums[i] += (im_hyper_sq[:, :, i] - mean_sq[i]).sum()
pixel_count += im_hyper.shape[0] * im_hyper.shape[1]
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
sys.stdout.write('Specim IQ std: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']\n')
# Print final stats
print('Image count:', image_count)
sys.stdout.write('Specim IQ std: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']')
if __name__ == '__main__':
main()
| 4,105 | 27.713287 | 80 | py |
segodsidb | segodsidb-main/src/compute_all_hyper_400_1000_204_mean.py | """
@brief Compute the channel mean of the 204 interpolated bands from
400-1000nm. This script handles hyperspectral images from both
cameras, Nuance EX (450-950nm) and Specim IQ (400-1000nm).
@details The channels of both cameras are interpolated to a fixed range
define as:
np.linspace(400, 1000, 204)
However, the number of pixels within the range [450-950] is higher
than that in the ranges [400-450] and [950-1000]. This is because
for the ranges [400-450] and [950-1000] only the pixels of the
Specim IQ contribute, whereas for the range in-between, the images
of both cameras contribute.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 7 Jul 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
import sys
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
# Hyperspectral mean computation stuff
min_wl = 400
max_wl = 1000
nbands = 204
new_wl = np.linspace(min_wl, max_wl, nbands)
hyper_sums = np.zeros((new_wl.shape[0],), dtype=np.float64)
#inside_pixel_count = 0
#outside_pixel_count = 0
pixel_count = 0
image_count = 0
interp_func = dl.OdsiDbDataLoader.LoadImage.interp_spectra
# Get the indices of the wavelengths that are shared among both cameras
inside_indices = np.array([idx for idx, x in enumerate(new_wl.tolist()) \
if x > 450. and x < 950.])
inside_wl = new_wl[inside_indices]
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, _, _ = dl.read_stiff(im_path, silent=True,
rgb_only=False)
npixels = im_hyper.shape[0] * im_hyper.shape[1]
# Handle Nuance EX images
if im_hyper.shape[2] == 51:
# Interpolate to bands in the range [450-950]
im_hyper_inside = interp_func(im_hyper, wl, inside_wl)
# Extrapolate to 204 bands
im_hyper = np.empty((im_hyper_inside.shape[0],
im_hyper_inside.shape[1],
nbands),
dtype=im_hyper_inside.dtype)
j = 0
for i in range(nbands):
if i < np.min(inside_indices):
im_hyper[:, :, i] = im_hyper_inside[:, :, 0]
elif i > np.max(inside_indices):
im_hyper[:, :, i] = im_hyper_inside[:, :, -1]
else:
im_hyper[:, :, i] = im_hyper_inside[:, :, j]
j += 1
# Handle Specim IQ images
else:
im_hyper = interp_func(im_hyper, wl, new_wl)
# Update average computation
for i in range(hyper_sums.shape[0]):
hyper_sums[i] += im_hyper[:, :, i].sum()
# Update counters
pixel_count += npixels
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
sys.stdout.write('All hyper 400-1000nm (204 bands) average: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']\n')
# Print final stats
print('Image count:', image_count)
sys.stdout.write('All hyper 400-1000nm (204 bands) average: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']\n')
if __name__ == '__main__':
main()
| 5,821 | 30.989011 | 80 | py |
segodsidb | segodsidb-main/src/odsi_db_stats.py | """
@brief Script that show some ODSI-DB stats that are relevant for the paper:
- Number of images of 51 bands
- Number of images of 204 bands
- Number of pixels per class
- Number of images in which a class is annotated
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 3 Sep 2021.
"""
import argparse
import numpy as np
import os
import shutil
import sys
import tqdm
import json
import random
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
classnames = torchseg.data_loader.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
"""@brief Ensure that input directory exits."""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
return args
def read_files(path):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def num_images_per_camera(images):
"""@brief Compute number of images per camera."""
bands_51 = 0
bands_204 = 0
for im_path in images:
im, wl, im_rgb, metadata = dl.read_stiff(im_path, silent=True,
rgb_only=False)
if im.shape[2] == 51:
bands_51 += 1
elif im.shape[2] == 204:
bands_204 += 1
else:
raise ValueError('[ERROR] The image ' + im_path + ' has ' \
+ str(im.shape[2]) + ' bands.')
return bands_51, bands_204
def get_latex_table(label, columns, data_pix, data_im, caption):
"""
@brief Get a table with the stats for each class in the dataset.
@param[in] label Latex label of the table.
@param[in] columns List of column names.
@param[in] data_pix Class -> number of pixels.
@param[in] data_im Class -> number of images.
@param[in] caption Caption of the figure.
@returns the string with the latex code.
"""
latex = "\\begin{table*}[htb!]\n"
latex += " \centering\n"
latex += " \caption{" + caption + "}\n"
latex += " \\vspace{0.2cm}\n"
latex += " \\begin{tabular}{lrr}\n"
latex += " \\hline\n"
latex += " \multicolumn{1}{c}{\\bfseries " + columns[0] + "} &\n"
latex += " \multicolumn{1}{c}{\\bfseries " + columns[1] + "} &\n"
latex += " \multicolumn{1}{c}{\\bfseries " + columns[2] + "} \\\\ \n"
latex += " \hline\n"
# Print rows here
data_pix = dict(sorted(data_pix.items(), key=lambda item: item[1],
reverse=True))
for k in data_pix:
latex += ' ' + k + ' & ' + str(data_pix[k]) + ' & ' + str(data_im[k]) + " \\\\ \n"
latex += " \end{tabular}\n"
latex += " \\vspace{0.2cm}\n"
latex += " \label{tab:" + label + "}\n"
latex += "\end{table*}"
return latex
def num_pixels_per_class(images, labels):
"""@brief Compute the number of pixels belonging to each class."""
class2numpix = {v: 0 for k, v \
in dl.OdsiDbDataLoader.OdsiDbDataset.classnames.items()}
class2numim = {v: 0 for k, v \
in dl.OdsiDbDataLoader.OdsiDbDataset.classnames.items()}
for label_path in labels:
label = torchseg.data_loader.read_mtiff(label_path)
for c in label:
class2numim[c] += 1
class2numpix[c] += np.count_nonzero(label[c])
return class2numpix, class2numim
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
images, labels = read_files(args.input)
# Compute the number of pixels per class
class2numpix, class2numim = num_pixels_per_class(images, labels)
print(get_latex_table('tab:class_to_numpix',
['Class', 'Number of pixels', 'Number of images'],
class2numpix, class2numim,
'Number of pixels per class.'))
# Compute the number of images recorded by Nuance EX (51 bannds) and
# the number of images recorded by Specim IQ (204 bands)
bands_51, bands_204 = num_images_per_camera(images)
print('Number of images with 51 bands:', bands_51)
print('Number of images with 204 bands:', bands_204)
print('Total number of annotated images:', len(images))
if __name__ == '__main__':
main()
| 5,669 | 31.4 | 97 | py |
segodsidb | segodsidb-main/src/compute_class_loss_weights.py | """
@brief Compute the weights for each class of the ODSI-DB dataset based on the
number of pixels of each class.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 21 Jun 2022.
"""
# My imports
import torchseg.data_loader as dl
pixels_per_class = {
u'Skin': 18993686,
u'Out of focus area': 8454491,
u'Oral mucosa': 7917543,
u'Enamel': 4913805,
u'Tongue': 4081689,
u'Lip': 3920600,
u'Hard palate': 2375998,
u'Specular reflection': 1931845,
u'Attached gingiva': 1922545,
u'Soft palate': 1398594,
u'Hair': 1383970,
u'Marginal gingiva': 804393,
u'Prosthetics': 755554,
u'Shadow/Noise': 732209,
u'Plastic': 255017,
u'Metal': 196682,
u'Gingivitis': 161874,
u'Attrition/Erosion': 100919,
u'Inflammation': 81098,
u'Pigmentation': 43144,
u'Calculus': 28615,
u'Initial caries': 22008,
u'Stain': 19428,
u'Fluorosis': 17872,
u'Microfracture': 14759,
u'Root': 13962,
u'Plaque': 10024,
u'Dentine caries': 6616,
u'Ulcer': 5552,
u'Leukoplakia': 4623,
u'Blood vessel': 3667,
u'Mole': 2791,
u'Malignant lesion': 1304,
u'Fibroma': 593,
u'Makeup': 406,
}
def main():
# Make sure that all the classes of ODSI-DB are present in the dic
dataloader_classes = [dl.OdsiDbDataLoader.OdsiDbDataset.classnames[k] \
for k in dl.OdsiDbDataLoader.OdsiDbDataset.classnames]
for k in dataloader_classes:
if k not in pixels_per_class:
raise ValueError('[ERROR] The number of pixels of the class ' \
+ k + ' is unknown.')
# Make sure that the classes present in the dic are in ODSI-DB
for k in pixels_per_class:
if k not in dataloader_classes:
raise ValueError('[ERROR] The class ' + k + ' is not known' \
+ ' to the dataloader.')
# Count the total number of pixels
total = 0
for k, v in pixels_per_class.items():
total += v
# Compute 1 - weight of each of the classes
weights = {k: 1 - (float(v) / total) for k, v in pixels_per_class.items()}
# Print result in Python format
print('class_weights = {')
for k, v in weights.items():
print(" '" + k + "': " + "{:.9f}".format(v) + ',')
print('}')
# Print weights using class indices
inverted = {v: k for k, v in dl.OdsiDbDataLoader.OdsiDbDataset.classnames.items()}
print('class_weights = {')
for k, v in weights.items():
number = inverted[k]
print(" '" + "{}".format(number) + "': " + "{:.9f}".format(v) + ',')
print('}')
# Print weights as a vector
vector = [weights[dl.OdsiDbDataLoader.OdsiDbDataset.classnames[k]] for k in range(35)]
print('class_weights = np.array(', vector, ')')
if __name__ == '__main__':
main()
| 3,193 | 32.621053 | 90 | py |
segodsidb | segodsidb-main/src/compute_all_hyper_450_950_51_std.py | """
@brief Compute the channel std of the 51 interpolated bands from
450-950nm.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 21 Jun 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
import sys
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string,
# e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command
line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-m': 'Channel means normalised to the range [0, 1] (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-m', '--means', required=True, type=str,
help=help('-m'))
# Read parameters
args = parser.parse_args()
args.means = eval(args.means)
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, _ = read_files(args.input)
# Read means
mean_sq = np.array(args.means) * np.array(args.means)
# Hyperspectral std computation stuff
nbands = 51
new_wl = np.linspace(450, 950, nbands)
hyper_sums = np.zeros((nbands,), dtype=np.float64)
pixel_count = 0
image_count = 0
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, _, _ = dl.read_stiff(im_path, silent=True,
rgb_only=False)
# Interpolate hyperspectral image to the 450-950nm range (51 bands)
interp_func = dl.OdsiDbDataLoader.LoadImage.interp_spectra
im_hyper = interp_func(im_hyper, wl, new_wl)
# Update std computation
im_hyper_sq = im_hyper * im_hyper
for i in range(nbands):
hyper_sums[i] += (im_hyper_sq[:, :, i] - mean_sq[i]).sum()
pixel_count += im_hyper.shape[0] * im_hyper.shape[1]
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
sys.stdout.write('All hyper 450-950nm (51 bands) std: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']\n')
# Print final stats
print('Image count:', image_count)
sys.stdout.write('All hyper 450-950nm (51 bands) std: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']')
if __name__ == '__main__':
main()
| 4,314 | 28.353741 | 78 | py |
segodsidb | segodsidb-main/src/compute_rgb_mean.py | """
@brief Compute the RGB channel mean of the ODSI-DB reconstructed images.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 16 Feb 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
# '-o': 'Path to the output directory (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
#parser.add_argument('-o', '--output', required=True, type=str,
# help=help('-o'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
# RGB average
red_sum = 0.0
green_sum = 0.0
blue_sum = 0.0
pixel_count = 0
image_count = 0
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, im_rgb_orig, metadata = dl.read_stiff(im_path,
silent=True, rgb_only=False)
nbands = im_hyper.shape[2]
# Convert hyperspectral image to RGB (H, W, 3)
im_rgb = dl.OdsiDbDataLoader.LoadImage.hyper2rgb(im_hyper, wl)
# Normalise the image in the range [0., 1.]
im_rgb = im_rgb.astype(np.float64) / 255.
# Update average computation
red_sum += im_rgb[:, :, 0].sum()
green_sum += im_rgb[:, :, 1].sum()
blue_sum += im_rgb[:, :, 2].sum()
pixel_count += im_rgb.shape[0] * im_rgb.shape[1]
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
print('Red average:', red_sum / pixel_count)
print('Green average:', green_sum / pixel_count)
print('Blue average:', blue_sum / pixel_count)
# Print final stats
print('Image count:', image_count)
print('Red average:', red_sum / pixel_count)
print('Green average:', green_sum / pixel_count)
print('Blue average:', blue_sum / pixel_count)
if __name__ == '__main__':
main()
| 3,891 | 27.617647 | 80 | py |
segodsidb | segodsidb-main/src/compute_all_hyper_450_950_170_std.py | """
@brief Compute the channel std of the 170 interpolated bands from
450-950nm.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 21 Jun 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
import sys
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string,
# e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command
line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-m': 'Channel means normalised to the range [0, 1] (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-m', '--means', required=True, type=str,
help=help('-m'))
# Read parameters
args = parser.parse_args()
args.means = eval(args.means)
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, _ = read_files(args.input)
# Read means
mean_sq = np.array(args.means) * np.array(args.means)
# Hyperspectral std computation stuff
nbands = 170
new_wl = np.linspace(450, 950, nbands)
hyper_sums = np.zeros((nbands,), dtype=np.float64)
pixel_count = 0
image_count = 0
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, _, _ = dl.read_stiff(im_path, silent=True,
rgb_only=False)
# Interpolate hyperspectral image to the 450-950nm range (170 bands)
interp_func = dl.OdsiDbDataLoader.LoadImage.interp_spectra
im_hyper = interp_func(im_hyper, wl, new_wl)
# Update std computation
im_hyper_sq = im_hyper * im_hyper
for i in range(nbands):
hyper_sums[i] += (im_hyper_sq[:, :, i] - mean_sq[i]).sum()
pixel_count += im_hyper.shape[0] * im_hyper.shape[1]
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
sys.stdout.write('All hyper 450-950nm (170 bands) std: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']\n')
# Print final stats
print('Image count:', image_count)
sys.stdout.write('All hyper 450-950nm (170 bands) std: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']')
if __name__ == '__main__':
main()
| 4,318 | 28.380952 | 78 | py |
segodsidb | segodsidb-main/src/compute_nuance_ex_mean.py | """
@brief Compute the channel mean of the Nuance EX images.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 16 Feb 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
import sys
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
# '-o': 'Path to the output directory (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
#parser.add_argument('-o', '--output', required=True, type=str,
# help=help('-o'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
# RGB average
nbands = 51
hyper_sums = np.zeros((nbands,), dtype=np.float64)
pixel_count = 0
image_count = 0
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, im_rgb_orig, metadata = dl.read_stiff(im_path,
silent=True, rgb_only=False)
# We are only interested in Nuance EX images
if im_hyper.shape[2] != nbands:
continue
# Update average computation
for i in range(nbands):
hyper_sums[i] += im_hyper[:, :, i].sum()
pixel_count += im_hyper.shape[0] * im_hyper.shape[1]
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
sys.stdout.write('Nuance EX average: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']\n')
# Print final stats
print('Image count:', image_count)
sys.stdout.write('Nuance EX average: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']')
if __name__ == '__main__':
main()
| 3,875 | 27.086957 | 80 | py |
segodsidb | segodsidb-main/src/compute_nuance_ex_std.py | """
@brief Compute the channel std of the Nuance EX images.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 16 Feb 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
import sys
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-m': 'Channel means normalised to the range [0, 1] (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-m', '--means', required=True, type=str,
help=help('-m'))
# Read parameters
args = parser.parse_args()
args.means = eval(args.means)
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
# Read means
mean_sq = np.array(args.means) * np.array(args.means)
# Hyperspectral std computation stuff
nbands = 51
hyper_sums = np.zeros((nbands,), dtype=np.float64)
pixel_count = 0
image_count = 0
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, im_rgb_orig, metadata = dl.read_stiff(im_path,
silent=True, rgb_only=False)
# We are only interested in Nuance EX images
if im_hyper.shape[2] != nbands:
continue
# Update std computation
im_hyper_sq = im_hyper * im_hyper
for i in range(nbands):
hyper_sums[i] += (im_hyper_sq[:, :, i] - mean_sq[i]).sum()
pixel_count += im_hyper.shape[0] * im_hyper.shape[1]
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
sys.stdout.write('Nuance EX std: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']\n')
# Print final stats
print('Image count:', image_count)
sys.stdout.write('Nuance EX std: [')
for i in range(nbands):
sys.stdout.write(str(hyper_sums[i] / pixel_count))
if i < nbands - 1:
sys.stdout.write(', ')
sys.stdout.write(']')
if __name__ == '__main__':
main()
| 4,104 | 27.706294 | 80 | py |
segodsidb | segodsidb-main/src/compute_rgb_std.py | """
@brief Compute the std of the RGB images in the ODSI-DB dataset,
normalised to the range [0, 1].
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 16 Feb 2022.
"""
import argparse
import numpy as np
import os
import ntpath
import shutil
import tqdm
import cv2
# My imports
import torchseg.data_loader as dl
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-m': 'RGB means normalised to the range [0, 1] (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-m', '--means', required=True, type=str,
help=help('-m'))
# Read parameters
args = parser.parse_args()
args.means = eval(args.means)
return args
def validate_cmdline_params(args):
"""
@brief Make sure that input directory exists.
@returns nothing.
"""
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
# Read means
red_mean_sq = args.means[0] * args.means[0]
green_mean_sq = args.means[1] * args.means[1]
blue_mean_sq = args.means[2] * args.means[2]
# RGB average
red_sum = 0.0
green_sum = 0.0
blue_sum = 0.0
pixel_count = 0
image_count = 0
# Print interval
interval = 10
# Loop over the images
for im_path in tqdm.tqdm(image_paths):
im_fname = os.path.splitext(ntpath.basename(im_path))[0]
# Read image
im_hyper, wl, im_rgb_orig, metadata = dl.read_stiff(im_path,
silent=True,
rgb_only=False)
nbands = im_hyper.shape[2]
# Convert hyperspectral image to RGB (H, W, 3)
im_rgb = dl.OdsiDbDataLoader.LoadImage.hyper2rgb(im_hyper, wl)
# Normalise the image in the range [0., 1.]
im_rgb = im_rgb.astype(np.float64) / 255.
# Update std computation
im_rgb_sq = im_rgb * im_rgb
red_sum += (im_rgb_sq[:, :, 0] - red_mean_sq).sum()
green_sum += (im_rgb_sq[:, :, 1] - green_mean_sq).sum()
blue_sum += (im_rgb_sq[:, :, 2] - blue_mean_sq).sum()
pixel_count += im_rgb.shape[0] * im_rgb.shape[1]
image_count += 1
# Print intermediate stats
if image_count % interval == 0:
print('Image count:', image_count)
print('Red std:', np.sqrt(red_sum / pixel_count))
print('Green std:', np.sqrt(green_sum / pixel_count))
print('Blue std:', np.sqrt(blue_sum / pixel_count))
# Print final stats
print('Image count:', image_count)
print('Red std:', np.sqrt(red_sum / pixel_count))
print('Green std:', np.sqrt(green_sum / pixel_count))
print('Blue std:', np.sqrt(blue_sum / pixel_count))
if __name__ == '__main__':
main()
| 4,390 | 29.282759 | 80 | py |
segodsidb | segodsidb-main/src/validate_class_presence.py | """
@brief We want to use image-level annotations to learn the segmentation.
Therefore, we need to make sure that there is at least one image
(ideally more) where each of the classes is not present.
The opposite is not a concern, as we know that all the classes are
present at least in one image.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 27 Apr 2022.
"""
import argparse
import numpy as np
import os
import sys
# My imports
import torchseg.data_loader as dl
import torchseg.utils
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-o': 'Path to the output directory (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
# Input directory must exist
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
return args
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def class_in_label(class_name: str, label: np.ndarray) -> bool:
# Get class index
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
class2idx = {y: x for x, y in idx2class.items()}
class_id = class2idx[class_name]
# Check if there is any pixel of this class
return bool(np.count_nonzero(label[:, :, class_id]))
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
sys.stdout.write('[INFO] Reading list of input files... ')
sys.stdout.flush()
image_paths, label_paths = read_files(args.input)
print('OK')
# Initially we assume that all the classes are in all the images
present_in_all = {v: True for v in dl.OdsiDbDataLoader.OdsiDbDataset.classnames.values()}
# Loop over the images
for im_path, label_path in zip(image_paths, label_paths):
# Real label
label = dl.OdsiDbDataLoader.LoadImage.read_label(
label_path).transpose((1, 2, 0))
# Loop over the classes and find out which are not present
for c in present_in_all:
# If we know that this class was not present in a previous image
# we do not need to check this image
if not present_in_all[c]:
continue
# If the class is not present in the image, we write it down
if not class_in_label(c, label):
present_in_all[c] = False
# Make sure that all the classes are missing from at least one image
success = True
for c in present_in_all:
if present_in_all[c]:
success = False
break
if success:
print("[OK] You can use image-level presence labels with the ODSI-DB dataset.")
else:
print("[ERROR] You cannot use image-level presence labels with the ODSI_DB dataset.")
print("Check the classes that are present in ALL the images below:")
print(present_in_all)
if __name__ == '__main__':
main()
| 4,147 | 30.664122 | 93 | py |
segodsidb | segodsidb-main/src/produce_cmf_plot.py | #!/usr/bin/env python3
#
# @brief Script to produce HSI to XYZ conversion colour matching function.
#
# @author Luis C. Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
# @date 23 Aug 2022.
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# My imports
import torchseg.data_loader as dl
def main():
plt.rc('font', size=12)
# Produce CMF function
cmf = 'cie_2_1931'
min_wl = 380
max_wl = 780
steps = max_wl - min_wl + 1
wl = new_wl=np.linspace(min_wl, max_wl, steps)
f_xbar, f_ybar, f_zbar = \
dl.OdsiDbDataLoader.LoadImage.get_corrected_cmf(cmf, wl)
# Plot CMF
xbar = f_xbar(wl)
ybar = f_ybar(wl)
zbar = f_zbar(wl)
cmf_plot = sns.lineplot(x=wl, y=xbar, color='red', label='X')
cmf_plot = sns.lineplot(x=wl, y=ybar, color='green', label='Y')
cmf_plot = sns.lineplot(x=wl, y=zbar, color='blue', label='Z')
# Compute the integral of the CMFs
xbar_integral = np.trapz(xbar, wl)
ybar_integral = np.trapz(ybar, wl)
zbar_integral = np.trapz(zbar, wl)
print('Original CMF xbar integral:', xbar_integral)
print('Original CMF ybar integral:', ybar_integral)
print('Original CMF zbar integral:', zbar_integral)
print()
# Save CMF plot to disk
plt.title('CIE XYZ standard observer CMFs')
plt.xlim([380, 780])
plt.ylim([0., 4.])
plt.xticks([x for x in range(380, 790, 10)], ['' if x not in [y for y in range(380, 800, 40)] else x for x in range(380, 790, 10)])
plt.legend()
plt.xlabel('Wavelength (nm)')
fig = cmf_plot.get_figure()
fig.savefig('cie_2_1931.svg', format='svg', dpi=600)
# Clear plot
plt.clf()
# Produce modified CMF function
min_wl = 400
max_wl = 780
steps = max_wl - min_wl + 1
wl = new_wl=np.linspace(min_wl, max_wl, steps)
f_xbar, f_ybar, f_zbar = \
dl.OdsiDbDataLoader.LoadImage.get_corrected_cmf(cmf, wl)
# Plot modified CMF
xbar = f_xbar(wl)
ybar = f_ybar(wl)
zbar = f_zbar(wl)
cmf_plot = sns.lineplot(x=wl, y=xbar, color='red', label='X')
cmf_plot = sns.lineplot(x=wl, y=ybar, color='green', label='Y')
cmf_plot = sns.lineplot(x=wl, y=zbar, color='blue', label='Z')
# Compute the integral of the CMFs
xbar_integral = np.trapz(xbar, wl)
ybar_integral = np.trapz(ybar, wl)
zbar_integral = np.trapz(zbar, wl)
print('Specim IQ CMF xbar integral:', xbar_integral)
print('Specim IQ CMF ybar integral:', ybar_integral)
print('Specim IQ CMF zbar integral:', zbar_integral)
print()
# Save modified CMF to disk
plt.title('Modified CIE XYZ CMFs for Specim IQ')
plt.xlim([380, 780])
plt.ylim([0., 4.])
plt.xticks([x for x in range(380, 790, 10)], ['' if x not in [y for y in range(380, 800, 40)] else x for x in range(380, 790, 10)])
plt.legend()
plt.xlabel('Wavelength (nm)')
fig = cmf_plot.get_figure()
fig.savefig('cie_2_1931_specim_iq.svg', format='svg', dpi=600)
# Clear plot
plt.clf()
# Produce modified CMF function
min_wl = 450
max_wl = 780
steps = max_wl - min_wl + 1
wl = new_wl=np.linspace(min_wl, max_wl, steps)
f_xbar, f_ybar, f_zbar = \
dl.OdsiDbDataLoader.LoadImage.get_corrected_cmf(cmf, wl)
# Plot modified CMF
xbar = f_xbar(wl)
ybar = f_ybar(wl)
zbar = f_zbar(wl)
cmf_plot = sns.lineplot(x=wl, y=xbar, color='red', label='X')
cmf_plot = sns.lineplot(x=wl, y=ybar, color='green', label='Y')
cmf_plot = sns.lineplot(x=wl, y=zbar, color='blue', label='Z')
# Compute the integral of the CMFs
xbar_integral = np.trapz(xbar, wl)
ybar_integral = np.trapz(ybar, wl)
zbar_integral = np.trapz(zbar, wl)
print('Nuance EX CMF xbar integral:', xbar_integral)
print('Nuance EX CMF ybar integral:', ybar_integral)
print('Nuance EX CMF zbar integral:', zbar_integral)
print()
# Save modified CMF to disk
plt.title('Modified CIE XYZ CMFs for Nuance EX')
plt.xlim([380, 780])
plt.ylim([0., 4.])
plt.xticks([x for x in range(380, 790, 10)], ['' if x not in [y for y in range(380, 800, 40)] else x for x in range(380, 790, 10)])
plt.legend()
plt.xlabel('Wavelength (nm)')
fig = cmf_plot.get_figure()
fig.savefig('cie_2_1931_nuance_ex.svg', format='svg', dpi=600)
if __name__ == '__main__':
main()
| 4,371 | 31.626866 | 135 | py |
segodsidb | segodsidb-main/src/train.py | """
@brief Main script to kick off the training.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 1 Jun 2021.
"""
import argparse
import collections
import torch
import numpy as np
# My imports
import torchseg.config.parser
import torchseg.data_loader
import torchseg.model
import torchseg.machine
# Fix random seeds for reproducibility
SEED = 18303
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(SEED)
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-c': 'config file path (default: None)',
'-l': 'logger config file path (default: None)',
'-r': 'path to latest checkpoint (default: None)',
'-d': 'indices of GPUs to enable (default: all)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
args = argparse.ArgumentParser(description='PyTorch segmenter.')
args.add_argument('-c', '--conf', default=None, type=str, help=help('-c'))
args.add_argument('-l', '--logconf', default=None, type=str, help=help('-l'))
args.add_argument('-r', '--resume', default=None, type=str, help=help('-r'))
args.add_argument('-d', '--device', default=None, type=str, help=help('-d'))
return args
def parse_config(args):
"""
@brief Combines parameters from both JSON and command line into a
@param[in] args Argparse args object.
@returns A torchseg.config.parser.ConfigParser object.
"""
# Custom CLI options to modify the values provided in the JSON configuration
CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')
options = [
CustomArgs(['--lr', '--learning-rate'], type=float,
target='optimizer;args;lr'),
CustomArgs(['--bs', '--batch-size'], type=int,
target='data_loader;args;batch_size'),
CustomArgs(['--data-dir'], type=str,
target='data_loader;args;data_dir'),
CustomArgs(['--save-dir'], type=str,
target='machine;args;save_dir'),
]
config = torchseg.config.parser.ConfigParser.from_args(args, options)
return config
def main():
args = parse_cmdline_params()
config = parse_config(args)
# Note: the 'type' in config.json indicates the class, and the 'args' in
# config.json will be passed as parameters to the constructor of that class
# Get training logger
logger = config.get_logger('train')
# Setup data loader
data_loader = config.init_obj('data_loader', torchseg.data_loader)
valid_data_loader = data_loader.split_validation()
# Create model
model = config.init_obj('model', torchseg.model)
logger.info(model)
# Prepare for (multi-device) GPU training
device, device_ids = torchseg.utils.setup_gpu_devices(config['n_gpu'])
model = model.to(device)
if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids)
# Get function handles of loss and evaluation metrics
criterion = getattr(torchseg.model.loss, config['loss'])
metrics = [getattr(torchseg.model.metric, m) for m in config['metrics']]
# Create optmizer
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = config.init_obj('optimizer', torch.optim, trainable_params)
# Learning rate scheduler
lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler,
optimizer)
# Create learning machine
trainer = torchseg.machine.GenericMachine(model, criterion, metrics,
optimizer,
config=config,
device=device,
data_loader=data_loader,
valid_data_loader=valid_data_loader,
lr_scheduler=lr_scheduler,
acc_steps=config['machine']['args']['acc_steps'])
# TODO: We should be able to create the learning machine just with this type
# of call
#trainer = config.init_obj('machine', torchseg.machine)
# Launch training
trainer.train()
if __name__ == '__main__':
main()
| 4,439 | 34.238095 | 95 | py |
segodsidb | segodsidb-main/src/compute_pixel_stats.py | """
@brief Script that computes iteratively the mean and unbiased sample
standard deviation.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 15 Dec 2021.
"""
import argparse
import numpy as np
import os
import copy
import tqdm
#import random
#import pandas as pd
#import seaborn as sns
#import matplotlib
#import matplotlib.pyplot as plt
#import sklearn.manifold
#import multiprocessing
#import colour
# My imports
import torchseg.data_loader
import torchseg.utils
# Get dictionary from class index to class string, e.g. 0 -> u'Attached gingiva'
idx2class = torchseg.data_loader.OdsiDbDataLoader.OdsiDbDataset.classnames
def help(short_option):
"""
@returns The string with the help information for each command line option.
"""
help_msg = {
'-i': 'Path to the root of the ODSI-DB dataset (required: True)',
'-m': 'Path to the output mean (required: True)',
'-s': 'Path to the output std (required: True)',
}
return help_msg[short_option]
def parse_cmdline_params():
"""@returns The argparse args object."""
# Create command line parser
parser = argparse.ArgumentParser(description='PyTorch segmenter.')
parser.add_argument('-i', '--input', required=True, type=str,
help=help('-i'))
parser.add_argument('-m', '--mean', required=True, type=str,
help=help('-m'))
parser.add_argument('-s', '--std', required=True, type=str,
help=help('-s'))
# Read parameters
args = parser.parse_args()
return args
def validate_cmdline_params(args):
if not os.path.isdir(args.input):
raise RuntimeError('[ERROR] Input directory does not exist.')
if os.path.exists(args.mean):
raise RuntimeError('[ERROR] Mean file already exists.')
if os.path.exists(args.std):
raise RuntimeError('[ERROR] Std file already exists.')
return args
def read_files(path: str):
"""
@param[in] path Path to the folder with the ODSI-DB data.
@returns a tuple (imgs, segs) containing two lists of paths to images and
segmentation labels.
"""
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(path) if '.tif' in f]
# Get list of segmentation files
segs = [os.path.join(path, f) for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(path, im)))
return imgs, segs
def main():
# Read command line parameters
args = parse_cmdline_params()
validate_cmdline_params(args)
# Get list of image and segmentation files
image_paths, label_paths = read_files(args.input)
# Get number of spectral channels
im_hyper, _, _, _ = torchseg.data_loader.read_stiff(image_paths[0],
silent=True,
rgb_only=False)
# Compute stats on the dataset
nchan = im_hyper.shape[2]
mean = np.zeros((nchan,))
m2 = np.zeros((nchan,))
n = 0
for im_path, label_path in tqdm.tqdm(zip(image_paths, label_paths)):
# Load image (h, w, c)
im_hyper, _, _, _ = torchseg.data_loader.read_stiff(image_paths[0],
silent=True,
rgb_only=False)
# Update the iterative stats
h, w, c = im_hyper.shape
for i in range(h):
for j in range(w):
n += 1
x_n = im_hyper[i, j]
old_mean = copy.deepcopy(mean)
mean += (x_n - mean) / n
m2 += (x_n - old_mean) * (x_n - mean)
# Compute unbiased sample std
std = np.sqrt(m2 / (n - 1))
# Save mean and std to file
meanfile = open(args.mean, 'w')
stdfile = open(args.std, 'w')
meanfile.write('[')
stdfile.write('[')
for i in range(nchan):
meanfile.write("{:.6f}".format(mean[i]) + ', ')
stdfile.write("{:.6f}".format(std[i]) + ', ')
meanfile.write("]\n")
stdfile.write("]\n")
meanfile.close()
stdfile.close()
if __name__ == '__main__':
main()
| 4,349 | 28.794521 | 80 | py |
segodsidb | segodsidb-main/src/data_loader/data_loader.py | """
@brief Collection of data loaders for different datasets.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 2 Jun 2021.
"""
import os
import torch
import torchvision
import PIL
import numpy as np
import random
import colour
import scipy
import matplotlib.pyplot as plt
# My imports
import torchseg.base
import torchseg.utils
import torchseg.data_loader as dl
import monai.data.dataset
class MnistDataLoader(torchseg.base.BaseDataLoader):
"""
@class MnistDataLoader loads MNIST data.
"""
def __init__(self, data_dir, batch_size, shuffle=True,
validation_split=0.0, num_workers=1, training=True):
# Store parameters
self.data_dir = data_dir
# Create normalisation transform
trsfm = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,))
])
# Create dataset handler
self.dataset = torchvision.datasets.MNIST(self.data_dir,
train=training,
download=True,
transform=trsfm)
# Call the constructor of torchseg.base.BaseDataLoader
super().__init__(self.dataset, batch_size, shuffle, validation_split,
num_workers)
class OdsiDbDataLoader(torchseg.base.BaseDataLoader):
# Input image modes
modes = [
'rgbpixel',
'rgbpixel_test',
'rgbimage',
'spixel_51',
'simage_51',
'spixel_170',
'spixel_170_test',
'simage_170',
'spixel_204',
'simage_204',
'boiko',
]
# The array of wavelengths depends on the input mode
mode2wl = {
'spixel_51' : np.linspace(450, 950, 51),
'simage_51' : np.linspace(450, 950, 51),
'spixel_170': np.linspace(450, 950, 170),
'spixel_170_test': np.linspace(450, 950, 170),
'simage_170': np.linspace(450, 950, 170),
'spixel_204': np.linspace(400, 1000, 204),
'simage_204': np.linspace(400, 1000, 204),
'boiko' : np.array([450., 500., 600.]),
}
# RGB normalisation stats
rgb_mean = np.array([0.403129887605, 0.288007343691, 0.299380141092],
dtype=np.float32)
rgb_std = np.array([0.159463123785, 0.130188316525, 0.12346950031],
dtype=np.float32)
# Nuance EX normalisation stats
nuance_ex_mean = np.array([
0.0776138608947, 0.0783226294712, 0.0915784099028,
0.0851693853982, 0.0831637362668, 0.0847127943303,
0.085340519524, 0.0845109800243, 0.0790671446523,
0.0751274680049, 0.0761701337076, 0.0781188917647,
0.0782783973927, 0.0825329193882, 0.0988390369608,
0.120948266959, 0.140439376036, 0.154276410607,
0.164172320211, 0.171390068447, 0.178038853241,
0.184233131521, 0.189796964942, 0.195031949356,
0.198907463087, 0.198520651813, 0.200358926766,
0.203468102035, 0.201678233612, 0.198429438372,
0.193854994342, 0.191312533015, 0.189530537391,
0.18868570976, 0.18854169948, 0.189165124177,
0.189642954468, 0.189296877222, 0.180562774436,
0.177428725348, 0.174671366762, 0.172125912418,
0.16933901838, 0.167608797022, 0.165615862068,
0.163236092449, 0.16056347767, 0.156600176469,
0.143126733362, 0.138918268583, 0.135092694289,
], dtype=np.float32)
nuance_ex_std = np.array([
0.0038290728339, 0.00386745890456, 0.00461893938556,
0.00427318759442, 0.00421193344028, 0.00433005583596,
0.00441629367651, 0.00442865858953, 0.00416620537279,
0.00409641547929, 0.00419634961461, 0.0043631548856,
0.00440318450805, 0.00471044520012, 0.00585655629628,
0.00776347677282, 0.00967594428827, 0.0112555765706,
0.0125962817541, 0.0135586755183, 0.0144026812212,
0.015031395142, 0.0156616627971, 0.0162450380476,
0.016777610301, 0.0171646905704, 0.0174596376809,
0.0175789718117, 0.0174627609924, 0.0171899509512,
0.0168640925817, 0.0167477550425, 0.0166329796535,
0.016384616472, 0.0162204907189, 0.0160845513226,
0.015832708941, 0.0154426560894, 0.0146519935725,
0.0140359361608, 0.0134760286473, 0.0129535803584,
0.0124506272206, 0.0117099903208, 0.0112119812513,
0.0107739222483, 0.0103392189441, 0.0100008831543,
0.00827122500705, 0.00774512242113, 0.00726368979869,
], dtype=np.float32)
# Specim IQ normalisation stats
specim_iq_mean = np.array([
0.112085002548, 0.102140085151, 0.0943113060797, 0.0885467882906,
0.0846466315166, 0.0820774967744, 0.0802850078561, 0.0791292167016,
0.0784498196042, 0.0780782467903, 0.0780644673204, 0.0784523791739,
0.0792390083204, 0.0804284923453, 0.0818753314213, 0.0834831640881,
0.0851511772687, 0.0867686173785, 0.0883751009176, 0.0898204818059,
0.0912809000747, 0.0926440870137, 0.0939971489817, 0.0952519009781,
0.0965328084729, 0.097690027486, 0.0987274062461, 0.0996016817623,
0.100458407892, 0.101202582001, 0.10195598233, 0.102806150098,
0.103694965839, 0.104664556992, 0.105653205393, 0.10659280337,
0.10746400651, 0.108253455173, 0.108935761923, 0.109321392809,
0.109375497886, 0.108865325572, 0.107869014925, 0.106254806345,
0.104402478702, 0.102317535445, 0.100404782722, 0.0988052551902,
0.0977005111836, 0.0969098524837, 0.0966750172086, 0.0968155206467,
0.0974678382789, 0.0984965585346, 0.0997345260659, 0.100758822558,
0.101404000532, 0.101469627798, 0.101135541391, 0.100144589499,
0.0986686515618, 0.0974109084362, 0.0973515855222, 0.0992310783409,
0.103559105299, 0.110339683424, 0.119200190181, 0.129200049368,
0.139490848038, 0.149170142667, 0.158144175815, 0.166132853615,
0.173451190929, 0.179981349582, 0.185961931089, 0.191155738785,
0.195689772047, 0.199596934812, 0.203163066338, 0.206382320236,
0.209400294235, 0.212104318164, 0.214609173942, 0.216990035595,
0.219227218176, 0.221412241784, 0.223472401843, 0.225275373864,
0.227072332864, 0.228744232706, 0.230303433887, 0.231838015095,
0.233366165061, 0.234887439646, 0.236506400265, 0.237957146659,
0.239588794172, 0.241068114, 0.242681232832, 0.244230795064,
0.245745089833, 0.24692634115, 0.248064798697, 0.248622792136,
0.249324235054, 0.249977211781, 0.250498342546, 0.250987490527,
0.251156369557, 0.251248702127, 0.25096838481, 0.250822536009,
0.250102103495, 0.249356839651, 0.248629243344, 0.247524143344,
0.246625750813, 0.245947435307, 0.245181069521, 0.243986731324,
0.242938204373, 0.242353213664, 0.241708338683, 0.241195778181,
0.241245919936, 0.241264032642, 0.241610304121, 0.242049243169,
0.2421130799, 0.242264614189, 0.242367022354, 0.2409313546,
0.241159969908, 0.242161924135, 0.242228185646, 0.242473373156,
0.242266678267, 0.24223316472, 0.241815605688, 0.241536003903,
0.240810299738, 0.239586301619, 0.238636256726, 0.238204898501,
0.237551727056, 0.236373644054, 0.235011936757, 0.233764670484,
0.232366132656, 0.231146941133, 0.23080009416, 0.230533240929,
0.230076653688, 0.229484626509, 0.228599976517, 0.227914845773,
0.227556968449, 0.22725171334, 0.226303809768, 0.225347473023,
0.224614910223, 0.224014662255, 0.22339760286, 0.222414620401,
0.221382271403, 0.220377157411, 0.219338854373, 0.218130296434,
0.217091878005, 0.215903149325, 0.214644602196, 0.213494551926,
0.212338519749, 0.211226395102, 0.210369281188, 0.209304051737,
0.207966701355, 0.206544497363, 0.204824802113, 0.203001666349,
0.201089720809, 0.199036227562, 0.196907502338, 0.194647061506,
0.191861582388, 0.188844590578, 0.18527816507, 0.181248328695,
0.1777737335, 0.175078837825, 0.1733158301, 0.172441615107,
0.171608116792, 0.171192572651, 0.171049377519, 0.170862948778,
0.171274492165, 0.171573746293, 0.172177039069, 0.172957085572,
0.173301498106, 0.174158032792, 0.174963397296, 0.161150677433
], dtype=np.float32)
specim_iq_std = np.array([
0.0051204327844, 0.0049010894168, 0.00490408951459, 0.00501242131618,
0.00515869582467, 0.00531481044734, 0.00547126338076, 0.00563346798113,
0.00579447352576, 0.00596555732144, 0.00615566882416, 0.00635973070577,
0.00657340461617, 0.0067967285048, 0.0070286671205, 0.00727708514704,
0.00752424001689, 0.00775700912999, 0.00798220085672, 0.00818689563432,
0.00838465923951, 0.00856988323327, 0.0087392056371, 0.00889675897692,
0.00905206000264, 0.0091959914293, 0.009312072202, 0.00940073464096,
0.00947625651694, 0.00953227890046, 0.00957837186835, 0.00964017941934,
0.00969605544023, 0.00977571641123, 0.00985408362882, 0.00993917538033,
0.0100143769771, 0.0100859121458, 0.0101409295895, 0.0101669455923,
0.0101443092583, 0.0100463008317, 0.00989182845609, 0.00966438387347,
0.00942136906458, 0.00917258006915, 0.008953890893, 0.00878593637129,
0.00867668098681, 0.00860270653642, 0.00859433373635, 0.0086204846356,
0.00869749103483, 0.00882064339864, 0.00896339789551, 0.00909145584191,
0.00918067503764, 0.00920361403469, 0.00919296874221, 0.00911463416519,
0.00897490062522, 0.00886443804711, 0.00888037267064, 0.00910847772243,
0.00962114957633, 0.0104625277379, 0.0116591370095, 0.0131564475032,
0.014852059166, 0.0165929757477, 0.0183182801335, 0.019935233596,
0.0214820271417, 0.0229137210796, 0.0242616624892, 0.0254825335416,
0.0265475514249, 0.0274784619776, 0.0283352254374, 0.0291056673323,
0.0298353614964, 0.0304790896885, 0.0310573625231, 0.0316169604888,
0.0321468398482, 0.0326668677501, 0.0331730392939, 0.0335956874147,
0.0340282795814, 0.0344635042052, 0.0348561658458, 0.0352599262886,
0.0356911594505, 0.0360880062706, 0.0365148940448, 0.0369118063317,
0.0373465100767, 0.0377660923652, 0.0382188255657, 0.0386452354932,
0.0390436577487, 0.0393508233, 0.0396008200152, 0.0396906081602,
0.0398572928164, 0.0399856573532, 0.0400535027338, 0.0401183618839,
0.0400770409866, 0.0400355447514, 0.0398820349639, 0.0397914604514,
0.0395229589731, 0.0392744169267, 0.0390286206185, 0.0386696315616,
0.0383528940588, 0.0381384245943, 0.0378284388325, 0.0374353366368,
0.0370882825179, 0.0368806895126, 0.0366293825145, 0.0364590994265,
0.036409711205, 0.0363757080885, 0.0364176906163, 0.0364910524371,
0.0364356384961, 0.0364550964887, 0.0363572534936, 0.035713651417,
0.0357296075661, 0.0360257484172, 0.036304159429, 0.0367355386762,
0.0366726984998, 0.0366845383865, 0.0365487921643, 0.0364630193957,
0.0362154434343, 0.0358276964578, 0.0355990390691, 0.0355736606419,
0.0354255001813, 0.0351446246617, 0.0347914317484, 0.0344802785938,
0.0341160734367, 0.0337945665366, 0.0336832205541, 0.0336308778566,
0.0334985680856, 0.0333494362052, 0.0331237910006, 0.0329646205593,
0.0328705891889, 0.0328222972035, 0.0326090837709, 0.0324180583771,
0.0322387706372, 0.0320901724742, 0.0319185152311, 0.0316350319596,
0.0313471149088, 0.031120219116, 0.0308533330801, 0.0305213851429,
0.0302389431622, 0.029960570957, 0.0296949319738, 0.0294617564081,
0.0291807764587, 0.0288812632713, 0.0286547736342, 0.0283600323959,
0.0279933843565, 0.0276050068161, 0.027095059129, 0.0265664812623,
0.026083041933, 0.0256118408366, 0.025073052516, 0.0244892219447,
0.0238096923167, 0.0230908555588, 0.0222759083261, 0.0213622231765,
0.0205653866989, 0.0199707523122, 0.0194896643608, 0.0192172825747,
0.0189468026241, 0.0187808803217, 0.0186485068087, 0.0184880364592,
0.0184822248112, 0.0184771573814, 0.0184898120673, 0.0185603547897,
0.018496862837, 0.0185234983434, 0.0185140609897, 0.0130384043098,
], dtype=np.float32)
# Normalisation stats for interpolated bands 450-950nm (51 bands)
hyper_51_mean = np.array([
0.0810067996349, 0.0829831647364, 0.093483960472, 0.0899734763065,
0.0894423085798, 0.0915193721587, 0.092661139279, 0.0915338960809,
0.0857899568086, 0.0817884475053, 0.0828441900691, 0.0852258059802,
0.0848364019569, 0.0874677529617, 0.105330525865, 0.130889929167,
0.152258716436, 0.167234953479, 0.177657939938, 0.185337380804,
0.192140167946, 0.198199267684, 0.203636422124, 0.208865833329,
0.213144227103, 0.213884648171, 0.215777113836, 0.218022347793,
0.216236726977, 0.213079549878, 0.208936060479, 0.206582950682,
0.205533508108, 0.205109621906, 0.204955697994, 0.205414174429,
0.205367417871, 0.204263258639, 0.197107520177, 0.193805749636,
0.191465798416, 0.189074327189, 0.186375384941, 0.184456663586,
0.182047172551, 0.179237997628, 0.176214905006, 0.172386557792,
0.161391944603, 0.156415102283, 0.150869385165
], dtype=np.float32)
hyper_51_std = np.array([
0.00514014020925, 0.00539040833008, 0.00603190822433, 0.0059271066889,
0.00597948945687, 0.00616100200841, 0.00629645035981, 0.00619439587162,
0.00575553630156, 0.00557603115215, 0.00569343612698, 0.00594936225045,
0.00592971065021, 0.00609018586403, 0.00776464603105, 0.0109345712314,
0.0139807234261, 0.0163833898851, 0.0181979949318, 0.0195246053556,
0.0206441248362, 0.0215073191503, 0.0223579382643, 0.0231928340066,
0.0240259381558, 0.0246012694501, 0.0249232918579, 0.0248939753191,
0.0246073320615, 0.0241213238954, 0.0235886267345, 0.0233070117303,
0.023279264433, 0.0231155168478, 0.022900319036, 0.0229853245983,
0.022653123846, 0.0221113475025, 0.0214060499578, 0.0206702027159,
0.0202024043012, 0.0196976652234, 0.0191932768321, 0.0184724650055,
0.017832398352, 0.0172010450473, 0.0166074947096, 0.0160897372069,
0.0146111361932, 0.0136754420132, 0.0125149234869,
], dtype=np.float32)
# Normalisation stats for interpolated bands 450-950nm (170 bands)
hyper_170_mean = np.array([
0.0810067996349, 0.0816043995643, 0.0821970031859, 0.0827673146001,
0.0849229440499, 0.0880378175387, 0.0911460114559, 0.0932456460856,
0.0922341744158, 0.09118836566, 0.0901252695013, 0.0898196994614,
0.0896553436568, 0.089513093682, 0.0897392633201, 0.0903613267805,
0.090979011488, 0.0915592812428, 0.0919451792569, 0.0923012143522,
0.0925915664477, 0.0925672795887, 0.0923141765019, 0.0918983693193,
0.0909842773978, 0.0893221808685, 0.0875870100037, 0.0858583116221,
0.084507488729, 0.0833029689943, 0.0822162716762, 0.0818705060695,
0.0820900963864, 0.0824551648085, 0.0829875784395, 0.0837494090013,
0.0844853113882, 0.0851180245572, 0.0852873398232, 0.0852510529314,
0.085032962395, 0.0850213767293, 0.0854903869798, 0.0862520932472,
0.0877033510983, 0.0921965406489, 0.0974439886213, 0.103359354549,
0.110504447195, 0.1181816836, 0.125735154289, 0.132916819701,
0.139431940908, 0.14572431233, 0.151778762252, 0.156579367882,
0.161062623956, 0.16533550223, 0.16897751279, 0.172123531667,
0.175163861539, 0.178042253332, 0.180372587267, 0.182632682035,
0.184851135499, 0.186934765385, 0.188969781225, 0.19097210744,
0.192868746172, 0.194688788255, 0.196478620273, 0.198231629835,
0.199843773024, 0.201451354722, 0.203056473628, 0.204639118831,
0.206169527005, 0.207723786193, 0.209189575556, 0.210462771206,
0.211735450466, 0.212993707719, 0.21341177993, 0.213682033696,
0.213833884439, 0.214179921524, 0.214760809264, 0.215310997878,
0.21587149132, 0.216600461745, 0.217276433301, 0.217878384209,
0.217710349168, 0.217203954087, 0.216615029805, 0.215917558555,
0.214973406228, 0.214003477609, 0.213096996496, 0.21194623461,
0.210718301228, 0.209438320294, 0.208491630456, 0.207786470717,
0.207085517874, 0.206505790068, 0.206151632208, 0.205824085203,
0.205570909186, 0.205465349108, 0.205318419385, 0.205186642554,
0.204999346857, 0.204648895535, 0.204734935304, 0.204967520688,
0.205123997043, 0.205306585329, 0.205378787542, 0.205464869726,
0.205440862705, 0.205448678606, 0.205270700196, 0.204834264548,
0.204479281473, 0.204276377615, 0.202394212481, 0.20025954336,
0.1980644597, 0.196538453003, 0.195484550546, 0.194468031395,
0.193685258656, 0.1930344551, 0.192345946229, 0.191614290102,
0.190845453654, 0.190099431202, 0.189436042822, 0.188795821318,
0.188020086576, 0.187164688065, 0.186352484731, 0.18579908009,
0.185262986887, 0.184676669537, 0.18398799608, 0.183275393733,
0.182564743891, 0.181809907514, 0.180977080903, 0.180169855655,
0.179324102047, 0.178414858592, 0.177525557928, 0.176636254971,
0.175626738953, 0.174556558391, 0.17342668134, 0.171943895798,
0.168758784331, 0.165489093699, 0.162187915121, 0.160305287515,
0.158842194262, 0.157354797792, 0.15586327933, 0.154299476375,
0.152647749572, 0.150869385165,
], dtype=np.float32)
hyper_170_std = np.array([
0.00514014020925, 0.00518764780204, 0.00525823383173,
0.00535012794449, 0.00547291214664, 0.00563897203488,
0.00584719693034, 0.00601840401274, 0.00597312393174,
0.00594230808148, 0.00592796769322, 0.00592149651265,
0.00593073969924, 0.00595863235588, 0.00599366047689,
0.00603598420629, 0.00609498985838, 0.00616283944647,
0.00619021429061, 0.00623129889715, 0.00628079540453,
0.00628164484446, 0.0062554232367, 0.00622091327839,
0.00614385296943, 0.00600149880839, 0.00586976669371,
0.00575943407638, 0.00566313714922, 0.00560383617056,
0.00557784065547, 0.00557070429369, 0.00558360532025,
0.00563066298374, 0.00570444278682, 0.00577308272315,
0.00585341427136, 0.00593415400159, 0.0059523230271,
0.00595304568405, 0.00594175737341, 0.0059186263843,
0.00591387748421, 0.00596412250667, 0.00610436154186,
0.00643399196802, 0.00690699378334, 0.00753532691265,
0.0083193032524, 0.0092326761507, 0.0102199661071,
0.0112077588638, 0.0121033252134, 0.0130072069277,
0.0139072203158, 0.0146611876077, 0.0153828289821,
0.0160744121867, 0.0166766603911, 0.0172164795582,
0.0177488468134, 0.0182632136293, 0.0186566346647,
0.0190425397251, 0.0194357015034, 0.0197617010843,
0.0200872805015, 0.0204344186764, 0.0207311245281,
0.0209694099278, 0.0212335403014, 0.0215115949242,
0.0217373517193, 0.0219897650935, 0.0222568826317,
0.0224981370852, 0.0227274337641, 0.0229864699088,
0.0232474138528, 0.0234793138343, 0.0237303123809,
0.0239932552179, 0.0241723466339, 0.0243493688858,
0.0245137766694, 0.024635517016, 0.0247200187774,
0.0248183733464, 0.0249209704549, 0.0248972215491,
0.0248901565673, 0.0248923538104, 0.0248399712793,
0.024752651806, 0.0246569935811, 0.0245488673966,
0.0243823754977, 0.0242277833625, 0.024122808071,
0.023967193481, 0.0237996737411, 0.0236418743447,
0.0235121271635, 0.0234115922515, 0.0233334145963,
0.0232919621708, 0.0232425509873, 0.0232313866122,
0.0232695414247, 0.0232177091961, 0.0231520231536,
0.0231285687793, 0.0230275623595, 0.0228080246666,
0.0228104850908, 0.0229009641254, 0.0229397383441,
0.0230192843686, 0.0229851797824, 0.0229261270692,
0.0228131414284, 0.0227337707128, 0.0225904626606,
0.0223573308616, 0.0221935579431, 0.0221160037899,
0.0219219949626, 0.0217030911434, 0.0214877019102,
0.021263340512, 0.021007989729, 0.0207843640743,
0.0206411914581, 0.020503339569, 0.0203641347982,
0.0202278523375, 0.0200458075605, 0.0198785898809,
0.0197536648597, 0.0196417853907, 0.0194847954329,
0.0193236536251, 0.0191825470482, 0.0189384395597,
0.0187318605473, 0.0185380521754, 0.0183197827216,
0.0181161554718, 0.0179453503841, 0.0177620880403,
0.0175395335466, 0.0173615623233, 0.0172140745547,
0.0170036939038, 0.0168258604467, 0.0166715328373,
0.0165087393902, 0.0163600570587, 0.0162155713658,
0.0160108820457, 0.0154940480883, 0.0150403349807,
0.0146808652842, 0.0143833065811, 0.0141067654461,
0.0138384200987, 0.0135452274736, 0.0131943819911,
0.0128539926089, 0.0125149234869,
], dtype=np.float32)
# Normalisation stats for interpolated bands 400-1000nm (204 bands)
hyper_204_mean = np.array([
0.0853443115378, 0.082848078348, 0.0810082485533, 0.0797739291089,
0.0789765010856, 0.0784253976231, 0.0780743386258, 0.0778704430139,
0.0777675948717, 0.0777807791937, 0.0779223368812, 0.0781914794129,
0.0785791016654, 0.0790429332303, 0.0795490719257, 0.0800657360188,
0.0805705773184, 0.0810564855142, 0.0816537555639, 0.0822434301913,
0.0828129241917, 0.0851697518554, 0.0882823012806, 0.0913843392534,
0.0931698296219, 0.0921555102572, 0.0911103278881, 0.0900456833142,
0.089806860604, 0.0896448332975, 0.0895035055374, 0.0897823078737,
0.0904041013803, 0.0910202057368, 0.0915858176773, 0.0919696644263,
0.0923231893207, 0.0926073570099, 0.0925576733003, 0.0922939783025,
0.0918693242863, 0.090887923841, 0.0892229863784, 0.0874854547235,
0.0857666488026, 0.0844349513903, 0.0832400678965, 0.0821597277192,
0.0818779649125, 0.0821034052056, 0.0824762599571, 0.0830232482897,
0.0837875118399, 0.0845195548363, 0.0851460471168, 0.0852897622035,
0.0852479044979, 0.0850210136633, 0.0850393938706, 0.0855107406393,
0.086287057134, 0.0878600273907, 0.0923785274538, 0.0976499201371,
0.103583522281, 0.110778056089, 0.118450928734, 0.12599042822,
0.133139807533, 0.139638323898, 0.145918215417, 0.151959042544,
0.156715107509, 0.161186999087, 0.165450053057, 0.169061771918,
0.172201978179, 0.175236667448, 0.178097874942, 0.180423778624,
0.18268025368, 0.184895774742, 0.186974093397, 0.18900679109,
0.191006481496, 0.192898356772, 0.194716583326, 0.196504052264,
0.198253142208, 0.199863596941, 0.201469554295, 0.203073066396,
0.204653778571, 0.206182218116, 0.207735383038, 0.209197545352,
0.210469742978, 0.211741056628, 0.212998041639, 0.213412478345,
0.213682428528, 0.213833922756, 0.214179626296, 0.214759964092,
0.215309688396, 0.21586877854, 0.216597401741, 0.217272827733,
0.217874829296, 0.217713402978, 0.217208848111, 0.216620579664,
0.215926719795, 0.214984752083, 0.214015036535, 0.213108572771,
0.211962954426, 0.210738131305, 0.209458724713, 0.208503667422,
0.20779950603, 0.20709852571, 0.206512866728, 0.206159266439,
0.205829899335, 0.2055763301, 0.205469072134, 0.2053216321,
0.205190373083, 0.205011808434, 0.204647795705, 0.204727073534,
0.204963079918, 0.205117748587, 0.205304463145, 0.205374891607,
0.205465791763, 0.205440357818, 0.205452973033, 0.205285706192,
0.20485040917, 0.204493097663, 0.204284354293, 0.202475507375,
0.200349367799, 0.198158744982, 0.196583004897, 0.195532072024,
0.194514200173, 0.193716582944, 0.193065301919, 0.192380201088,
0.191651233395, 0.190885340272, 0.190137014281, 0.189469187682,
0.188831440083, 0.18806680562, 0.187212345013, 0.186388536669,
0.185829852653, 0.185294547243, 0.18471523209, 0.184031349326,
0.18331896063, 0.182609573035, 0.181863755887, 0.181028442121,
0.180224994041, 0.179381357649, 0.178474862566, 0.177586557723,
0.176697226046, 0.175701928521, 0.174635658965, 0.17350939096,
0.172175970531, 0.168995748617, 0.165735941283, 0.162440184803,
0.160415869749, 0.158957310826, 0.157473121414, 0.155979883857,
0.154430096451, 0.152785336609, 0.151022345404, 0.149900274821,
0.148793261038, 0.147873480083, 0.147190194871, 0.146765078222,
0.146512095475, 0.146301063977, 0.146197658023, 0.146153203837,
0.14612417137, 0.146241604267, 0.146334423564, 0.146512443726,
0.146740819307, 0.146845406329, 0.147084946832, 0.147322538854,
])
hyper_204_std = np.array([
0.00429063019463, 0.00421556938322, 0.00421326782134, 0.00424388475654,
0.00428636463332, 0.00433271456692, 0.00438247193098, 0.00443299962565,
0.00448713852853, 0.00454708491045, 0.00461157550422, 0.00467946718575,
0.00475101854674, 0.00482716376698, 0.00490837051352, 0.00498946941246,
0.00506774484486, 0.00514309477465, 0.0051925135872, 0.00526480835903,
0.00535829764719, 0.00548445997478, 0.00565378259084, 0.00586489236965,
0.00601440273555, 0.005970148491, 0.00594068114053, 0.00592745749255,
0.00592144445538, 0.00593224420545, 0.00596114517579, 0.00599609616221,
0.00603945896088, 0.00609961389108, 0.00616417685938, 0.00619258440357,
0.00623438905956, 0.00628420787443, 0.00628055551672, 0.00625363016914,
0.00621884501468, 0.0061352346863, 0.00599351875816, 0.00586262239631,
0.00575361016223, 0.00565861485312, 0.00560151576211, 0.00557717177346,
0.00557049875569, 0.00558494534593, 0.00563381330001, 0.00570727512085,
0.0057769231621, 0.00585751955194, 0.00593806224132, 0.00595258611539,
0.00595319388812, 0.00594124665851, 0.00591775837127, 0.0059141902236,
0.00596699872254, 0.00611386079954, 0.00644855789032, 0.00692696261297,
0.00756087973826, 0.00835009031748, 0.00926676960146, 0.0102550364563,
0.0112383325076, 0.0121326939975, 0.0130359966992, 0.0139347779985,
0.0146831125972, 0.0154032868349, 0.0160932585878, 0.0166910923292,
0.0172302476572, 0.0177618612655, 0.0182727020506, 0.0186654340517,
0.0190508131667, 0.0194438153118, 0.0197677705176, 0.0200934680362,
0.0204406290093, 0.0207347602012, 0.0209732759897, 0.0212375094924,
0.0215144401639, 0.0217402755271, 0.0219927754858, 0.0222597296813,
0.0225002660856, 0.0227294251226, 0.022988508771, 0.0232487835734,
0.0234806482899, 0.0237314659303, 0.0239941872785, 0.0241727708999,
0.0243496449606, 0.0245138515016, 0.0246354792911, 0.0247198722799,
0.024818114158, 0.02492103543, 0.0248973159425, 0.0248901019265,
0.0248923472587, 0.0248404151896, 0.0247535516486, 0.0246577808842,
0.0245505008988, 0.0243843574091, 0.0242294347995, 0.0241238050661,
0.0239695315257, 0.0238023597521, 0.023644131753, 0.023513948889,
0.0234134082593, 0.0233344132812, 0.0232932802508, 0.0232432945068,
0.0232309333753, 0.0232681973267, 0.0232199005178, 0.0231528377813,
0.0231294812682, 0.0230352229922, 0.0228088477442, 0.0228078705025,
0.0229006847232, 0.0229372223784, 0.023020913354, 0.0229853138712,
0.0229304161225, 0.0228162353109, 0.0227383430924, 0.0225990666575,
0.0223655419096, 0.022199526403, 0.0221186160642, 0.0219295642769,
0.0217117120569, 0.021496332392, 0.0212742212559, 0.0210190235232,
0.0207939541841, 0.0206486668801, 0.0205093053681, 0.0203708455962,
0.020234317687, 0.0200555277685, 0.0198863727191, 0.0197592893651,
0.0196486847768, 0.0194943242135, 0.0193319664984, 0.0191953265613,
0.0189512298341, 0.0187432517636, 0.0185500554975, 0.0183334317457,
0.0181273213302, 0.0179558892652, 0.0177777718174, 0.0175522171077,
0.0173725481567, 0.0172229668295, 0.0170170874327, 0.0168379915155,
0.016681685115, 0.0165205523857, 0.0163709393398, 0.0162251579454,
0.0160519388455, 0.0155285059359, 0.0150706924364, 0.0147043076919,
0.0144054270517, 0.0141271505124, 0.0138587851936, 0.0135721412487,
0.0132223164765, 0.0128809380303, 0.0125428954494, 0.0122095514875,
0.0118863380612, 0.011623718658, 0.0114240502952, 0.0112818544874,
0.0111888996149, 0.0111072365138, 0.0110557638921, 0.0110133352409,
0.0109718697148, 0.0109764311879, 0.0109805840783, 0.010994444414,
0.0110266496781, 0.0110136046495, 0.0110310144291, 0.0110412626426,
])
# The means and stds also depend on the input mode
mode2mean = {
'spixel_51' : hyper_51_mean,
'simage_51' : hyper_51_mean,
'spixel_170' : hyper_170_mean,
'spixel_170_test' : hyper_170_mean,
'simage_170' : hyper_170_mean,
'spixel_204' : hyper_204_mean,
'simage_204' : hyper_204_mean,
'boiko' : None,
}
mode2std = {
'spixel_51' : hyper_51_std,
'simage_51' : hyper_51_std,
'spixel_170' : hyper_170_std,
'spixel_170_test' : hyper_170_std,
'simage_170' : hyper_170_std,
'spixel_204' : hyper_204_std,
'simage_204' : hyper_204_std,
'boiko' : None,
}
class LoadImage(torch.nn.Module):
"""
@class LoadImage is a transform that loads a TIF image and label.
This is not supported by MONAI.
"""
#def __init__(self, keys, mode='rgb', training=True):
def __init__(self, keys, mode='rgbimage'):
"""
@param[in] mode 'rgbpixel': Load an RGB pixel at a time.
'spixel': Load a pixel-wide (1x1x51)
multispectral vector.
'rgbimage': Load the RGB version of the input
image.
'simage': Load the multispectral version of
the input image.
"""
assert(mode in OdsiDbDataLoader.modes)
self.keys = keys
self.mode = mode
#self._training = training
super().__init__()
@staticmethod
def get_cmfs(cmf_name: str):
"""
@brief Get colour matching funtions for different standards.
@param[in] cmf_name Choose one from [cie1931_2, cie1964_10,
cie2012_2, cie2012_10].
From Wikipia: the CIE's color matching
functions are the numerical description of
the chromatic response of the observer.
They can be thought of as the spectral
sensitivity curves of three linear light
detectors yielding the CIE tristimulus
values X, Y and Z.
Collectively, these three functions describe
the CIE standard observer.
"""
cmf_full_name = {
'cie_2_1931': 'CIE 1931 2 Degree Standard Observer',
'cie_10_1964': 'CIE 1964 10 Degree Standard Observer',
'cie_2_2012': 'CIE 2012 2 Degree Standard Observer',
'cie_10_2012': 'CIE 2012 10 Degree Standard Observer',
}
if cmf_name not in cmf_full_name:
err_msg = '[ERROR] Wrong CMF name. The available options are: '
err_msg += ', '.join(cmf_full_name.keys())
raise AttributeError(err_msg)
standard_obs = colour.colorimetry.MSDS_CMFS_STANDARD_OBSERVER
return standard_obs[cmf_full_name[cmf_name]]
@staticmethod
def gamma_correction(srgb_chan):
"""
@brief Perform gamma correction on an sRGB image (i.e. the range of
the data in the channel is [0., 1.]).
@param[in] u One of the three sRGB channels.
"""
u = srgb_chan[srgb_chan <= 0.0031308]
u_r = srgb_chan[srgb_chan > 0.0031308]
corrected = np.empty_like(srgb_chan)
corrected[srgb_chan <= 0.0031308] = 12.92 * u
corrected[srgb_chan > 0.0031308] = 1.055 * np.power(u_r, 0.416) \
- 0.055
return corrected
@staticmethod
def get_single_wl_im(raw_im_hyper, raw_wl, wl):
"""
@brief Given an image and the corresponding wavelengths this function
returns the image corresponding to a specific channel.
@param[in] raw_im_hyper Hyperspectral time,
shape (h, w, nbands).
@param[in] raw_wl Wavelengths, shape (bands,).
@param[in] wl Specific wavelegth of the image you want to
extract.
@returns an array of dimenstion (h, w).
"""
idx = (np.abs(raw_wl - wl)).argmin()
return raw_im_hyper[:, :, idx].copy()
@staticmethod
def filter_bands(raw_im_hyper, raw_wl, min_wl, max_wl):
"""
@brief Given a hyperspectral image this function removes those
bands outside the [min_wl, max_wl] range.
@param[in] raw_im_hyper Hyperspectral time,
shape (h, w, nbands).
@param[in] raw_wl Wavelengths, shape (bands,).
@param[in] min_wl Minimum wavelength.
@param[in] max_wl Maximum wavelength.
@returns a tuple (new_im_hyper, new_wl) where new_im_hyper is the new
hyperspectral image and new_wl is the new vector of wavelengths
of the channels contained in the new_im_hyper.
"""
# Create new array of wavelengths
new_wl = list(filter(lambda x: (x >= min_wl and x <= max_wl), raw_wl))
# Create new hyperspectral image
new_im_hyper = np.empty((raw_im_hyper.shape[0], raw_im_hyper.shape[1],
len(new_wl)),
dtype=raw_im_hyper.dtype)
for chan_idx, wl in enumerate(new_wl):
new_im_hyper[:, :, chan_idx] = \
OdsiDbDataLoader.LoadImage.get_single_wl_im(raw_im_hyper, raw_wl, wl)
return new_im_hyper, np.array(new_wl)
@staticmethod
def get_additive_correction(cmf_name, wl):
"""
@brief TODO: although not necessary for ODSI-DB because the data goes until 900nm
at least, we should compensate for the data missing the right side of
the spectrum too
"""
# Get sampled colour matching functions
cmfs = OdsiDbDataLoader.LoadImage.get_cmfs(cmf_name)
xbar_y, ybar_y, zbar_y = colour.utilities.tsplit(cmfs.values)
# Get x-coordinates of the missing wavelengths
x_corr = [x for x in cmfs.wavelengths if x < min(wl)]
epsilon = min(wl) - x_corr[-1]
offset = max(x_corr) - min(x_corr) + epsilon
x_corr = [x + offset for x in x_corr]
# Get additive correction for xbar
xbar_y_corr = [y for x, y in zip(cmfs.wavelengths, xbar_y) if x < min(wl)]
xbar_y_corr.reverse()
# Get additive correction for ybar
ybar_y_corr = [y for x, y in zip(cmfs.wavelengths, ybar_y) if x < min(wl)]
ybar_y_corr.reverse()
# Get additive correction for zbar
zbar_y_corr = [y for x, y in zip(cmfs.wavelengths, zbar_y) if x < min(wl)]
zbar_y_corr.reverse()
# Get interpolated functions for the additive correction
f_xbar_corr = scipy.interpolate.PchipInterpolator(x_corr, xbar_y_corr,
extrapolate=False)
f_ybar_corr = scipy.interpolate.PchipInterpolator(x_corr, ybar_y_corr,
extrapolate=False)
f_zbar_corr = scipy.interpolate.PchipInterpolator(x_corr, zbar_y_corr,
extrapolate=False)
return f_xbar_corr, f_ybar_corr, f_zbar_corr
@staticmethod
def get_corrected_cmf(cmf_name, wl):
"""
@brief Get colour matching function modified to colour-compensate
for missing wavelengths in the input images.
@param[in] cmf_name TODO
@param[in] wl TODO
"""
# Get colour matching function (wavelength -> xyz_bar)
cmfs = OdsiDbDataLoader.LoadImage.get_cmfs(cmf_name)
xbar_y, ybar_y, zbar_y = colour.utilities.tsplit(cmfs.values)
f_xbar = scipy.interpolate.PchipInterpolator(cmfs.wavelengths,
xbar_y,
extrapolate=False)
f_ybar = scipy.interpolate.PchipInterpolator(cmfs.wavelengths,
ybar_y,
extrapolate=False)
f_zbar = scipy.interpolate.PchipInterpolator(cmfs.wavelengths,
zbar_y,
extrapolate=False)
# Get additive correction for missing wavelengths (e.g. Nuance EX)
f_xbar_corr, f_ybar_corr, f_zbar_corr = \
OdsiDbDataLoader.LoadImage.get_additive_correction(cmf_name, wl)
# Get corrected colour matching function
xbar_y = np.nan_to_num(f_xbar(wl)) + np.nan_to_num(f_xbar_corr(wl))
ybar_y = np.nan_to_num(f_ybar(wl)) + np.nan_to_num(f_ybar_corr(wl))
zbar_y = np.nan_to_num(f_zbar(wl)) + np.nan_to_num(f_zbar_corr(wl))
f_xbar = scipy.interpolate.PchipInterpolator(wl, xbar_y, extrapolate=False)
f_ybar = scipy.interpolate.PchipInterpolator(wl, ybar_y, extrapolate=False)
f_zbar = scipy.interpolate.PchipInterpolator(wl, zbar_y, extrapolate=False)
return f_xbar, f_ybar, f_zbar
@staticmethod
def hyper2rgb(raw_im_hyper: np.ndarray, raw_wl: np.ndarray,
cmf_name: str = 'cie_2_1931', illuminant_name: str = 'D65',
min_wl: float = 300., max_wl: float = 830.):
"""
@brief Convert hyperspectral image into RGB for debugging purposes.
@details The code in this function follows the paper:
"Creating RGB images from hyperspectral images using a color
matching function" Magnusson et al. 2020.
More info here: https://help.commonvisionblox.com/Spectral/
html_english_color-conversion.html
@param[in] raw_im_hyper Hyperspectral band image read from
ODSI-DB. The intensities are in the
range [0.0, 1.0].
Shape (h, w, nbands).
@param[in] raw_wl Wavelengths, shape (bands,).
In the ODSI-DB, the smallest
wavelength is 397.32nm, and the
largest is 1003.58nm.
@param[in] cmf_name One of [cie_2_1931, cie_10_1964,
cie_2_2012, cie_10_2021].
@param[in] illuminant_name See colour.SDS_ILLUMINANTS for more
information.
@return RGB image in range [0, 255], shape (H, W, 3), dype=np.uint8.
"""
# Remove bands outside the 300 nm to 830 nm range, they do not
# provide any information and this is the range where the D65 illuminant
# is defined
im_hyper, wl = OdsiDbDataLoader.LoadImage.filter_bands(raw_im_hyper,
raw_wl, min_wl,
max_wl)
h, w, nbands = im_hyper.shape
# Get colour matching functions
f_xbar, f_ybar, f_zbar = \
OdsiDbDataLoader.LoadImage.get_corrected_cmf(cmf_name, wl)
# Get illuminant function (wavelength -> relative power)
il = colour.SDS_ILLUMINANTS[illuminant_name]
f_illum = scipy.interpolate.PchipInterpolator(il.wavelengths,
il.values,
extrapolate=True)
# Convert hyperspectral image to XYZ (normalisation scales the
# values so that y = 1 is the white colour)
one_over_n = 1. / np.trapz(f_ybar(wl) * f_illum(wl), wl)
x = one_over_n * np.trapz(f_xbar(wl) * im_hyper * f_illum(wl), wl,
axis=2)
y = one_over_n * np.trapz(f_ybar(wl) * im_hyper * f_illum(wl), wl,
axis=2)
z = one_over_n * np.trapz(f_zbar(wl) * im_hyper * f_illum(wl), wl,
axis=2)
# Convert XYZ to RGB (in the range [0, 1])
red = 3.2406255 * x - 1.5372080 * y - 0.4986286 * z;
green = -0.9689307 * x + 1.8757561 * y + 0.0415175 * z;
blue = 0.0557101 * x - 0.2040211 * y + 1.0569959 * z;
# Gamma correction (without gamma correction, images look overly
# dark)
red = OdsiDbDataLoader.LoadImage.gamma_correction(red)
green = OdsiDbDataLoader.LoadImage.gamma_correction(green)
blue = OdsiDbDataLoader.LoadImage.gamma_correction(blue)
# Reshape images back to 2D
im_rgb = np.dstack((red, green, blue))
# Convert sRGB [0., 1.] to RGB in uint8 [0, 255]
im_rgb = np.clip((255. * im_rgb).round(), 0, 255).astype(np.uint8)
return im_rgb
@staticmethod
def interp_spectra(im_hyper, wl, new_wl, eps=1., mode='linear'):
"""
@brief Given a hyperspectral image sampled at certain wavelengths,
produce a new hyperspectral image sampled at different wavelengths.
@param[in] im_hyper Hyperspectral image of shape (h, w, c).
@param[in] wl Wavelengths of each of the channels of 'im_hyper',
shape (c,).
@param[in] new_wl Wavelengths of each of the channels of the wanted
interpolated image to be returned, shape (c,).
@param[in] mode Interpolation mode, 'nearest' or 'linear' available.
@returns a new hyperspectral image of shape (h, w, c).
"""
# Make sure that the new wavelengths are not outside the provided ones
assert(min(wl) - eps < min(new_wl))
assert(max(wl) + eps > max(new_wl))
# Compute the distance between wavelengths
dist = scipy.spatial.distance.cdist(wl.reshape((wl.shape[0], 1)),
new_wl.reshape((new_wl.shape[0], 1)),
metric='euclidean')
# Compute the nearest wavelength for each band
nearest = np.argmin(dist, axis=0)
# Synthesize new hyperspectral image
h, w, _ = im_hyper.shape
im_hyper_new = np.empty((h, w, new_wl.shape[0]), dtype=im_hyper.dtype)
if mode == 'nearest':
# Loop over the new bands
for i in range(new_wl.shape[0]):
im_hyper_new[:, :, i] = im_hyper[:, :, nearest[i]].copy()
elif mode == 'linear':
# Loop over the new bands
for i in range(new_wl.shape[0]):
# Find the bands before and after
if wl[nearest[i]] > new_wl[i]:
j = nearest[i] - 1
k = nearest[i]
else:
j = nearest[i]
k = nearest[i] + 1
# Clip values at the extremes
if j < 0:
j = 0
if k == wl.shape[0]:
k = wl.shape[0] - 1
# Compute the weights of the bands before and after
if j == k:
w1 = 0.5
w2 = 0.5
else:
d1 = new_wl[i] - wl[j]
d2 = wl[k] - new_wl[i]
norm = d1 + d2
w1 = d2 / norm
w2 = d1 / norm
# Add interpolated channel to the image
im_hyper_new[:, :, i] = w1 * im_hyper[:, :, j] + w2 * im_hyper[:, :, k]
return im_hyper_new
@staticmethod
def read_image(path, mode, new_wl=np.linspace(400, 1000, 204)):
"""
@returns an image with shape (c, h, w) containing values [0, 1].
"""
if mode not in OdsiDbDataLoader.modes:
raise ValueError('[ERROR] The input mode ' + mode \
+ ' is unknown.')
# Read raw TIFF file
im_hyper, wl, _, metadata = torchseg.data_loader.read_stiff(path,
silent=True, rgb_only=False)
# Select the type of input wanted by the user
if mode in ['rgbpixel', 'rgbpixel_test', 'rgbimage']:
# Generate RGB reconstruction
im_rgb = OdsiDbDataLoader.LoadImage.hyper2rgb(im_hyper, wl)
# Convert RGB to float and normalise it to the range [0, 1]
im = im_rgb.astype(np.float32) / 255.
# Center and scale the image
im -= OdsiDbDataLoader.rgb_mean
im /= OdsiDbDataLoader.rgb_std
# Convert the image to (C, H, W)
im = im.transpose((2, 0, 1)).astype(np.float32)
elif mode in ['spixel_51', 'simage_51',
'spixel_170', 'spixel_170_test', 'simage_170', 'boiko']:
# In the ODSI-DB dataset there are images with 51 bands
# (450-950nm, both inclusive) and images with 204 bands
# (400-1000nm)
#
# The boiko mode comes from "Deep Learning for Dental
# Image Analysis" by Boiko et al. 2019
# Get the array of interpolated wavelengths according to the
# input mode selected by the user
new_wl = OdsiDbDataLoader.mode2wl[mode]
# Interpolate hyperspectral image to the requested range
interp_func = dl.OdsiDbDataLoader.LoadImage.interp_spectra
im = interp_func(im_hyper, wl, new_wl)
# Center and scale the image
im -= OdsiDbDataLoader.mode2mean[mode]
im /= OdsiDbDataLoader.mode2std[mode]
# Convert the image to (C, H, W)
im = im.transpose((2, 0, 1)).astype(np.float32)
elif mode in ['spixel_204', 'simage_204']:
# Get the array of interpolated wavelengths according to the
# input mode selected by the user
new_wl = OdsiDbDataLoader.mode2wl[mode]
# Get the function that will interpolate the hyperspectral
# images into a fixed set of wavelengths
interp_func = dl.OdsiDbDataLoader.LoadImage.interp_spectra
# If the image comes from the Nuance EX, let's amend it
if im_hyper.shape[2] == 51:
# Get an array of the wavelengths in [450, 950]
inside_indices = np.array([idx for idx, x in enumerate(new_wl.tolist()) \
if x > 450. and x < 950.])
inside_wl = new_wl[inside_indices]
# Interpolate hyperspectral image to the requested range
inside_im = interp_func(im_hyper, wl, inside_wl)
# Extrapolate to 204 bands
nbands = 204
im = np.empty((im_hyper.shape[0], im_hyper.shape[1], nbands),
dtype=inside_im.dtype)
j = 0
for i in range(nbands):
if i < np.min(inside_indices):
im[:, :, i] = inside_im[:, :, 0]
elif i > np.max(inside_indices):
im[:, :, i] = inside_im[:, :, -1]
else:
im[:, :, i] = inside_im[:, :, j]
j += 1
# Center and scale the image
im -= OdsiDbDataLoader.mode2mean[mode]
im /= OdsiDbDataLoader.mode2std[mode]
# Convert the image to (C, H, W)
im = im.transpose((2, 0, 1)).astype(np.float32)
else:
# Interpolate hyperspectral image to the requested range
im = interp_func(im_hyper, wl, new_wl)
# Center and scale the image
im -= OdsiDbDataLoader.mode2mean[mode]
im /= OdsiDbDataLoader.mode2std[mode]
# Convert the image to (C, H, W)
im = im.transpose((2, 0, 1)).astype(np.float32)
else:
raise ValueError('[ERROR] ODSI-DB mode unknown.')
return im
@staticmethod
def read_label(path):
raw_tiff = torchseg.data_loader.read_mtiff(path)
# Get the shape of the labels (which should be identical to the
# height and width of the image)
shape = raw_tiff[list(raw_tiff.keys())[0]].shape
# Create tensor of labels
n_classes = len(OdsiDbDataLoader.OdsiDbDataset.classnames)
label = np.zeros((n_classes, *shape), dtype=np.float32)
# Build dictionaries to convert quickly from index to class name
# and vice versa
idx2class = OdsiDbDataLoader.OdsiDbDataset.classnames
class2idx = {y: x for x, y in idx2class.items()}
# Populate the binary array for each class
for k, gt in raw_tiff.items():
if k not in class2idx:
raise ValueError('[ERROR] Unknown <' + k + '> label.')
# Find the index of the class with name 'k'
idx = class2idx[k]
# Set the label for the class with name 'k'
label[idx] = gt.astype(np.float32)
# If all the classes are zero for a pixel, we activate the
# background
#label[class2idx['Background'], label.sum(axis=0) == 0] = 1
return label
def forward(self, data):
"""
@param[in] data File name to read.
"""
# Open TIF image
im = OdsiDbDataLoader.LoadImage.read_image(data['image'], self.mode)
# Open ground truth segmentation
label = OdsiDbDataLoader.LoadImage.read_label(data['label'])
# Make default output
dic = {
'image': im,
'label': label,
'mode' : self.mode,
'path' : data['image'],
}
'''
# If the user wants pixel-wise multispectral predictions, i.e. what
# we call 'spixel' mode, we change the output to contain just a
# random pixel from the image
#if self.mode == 'spixel' and self.training:
if self.mode == 'spixel':
# Find all the pixels properly labelled
ann_idx = np.array((np.sum(label, axis=0) == 1).nonzero())
# Take only labelled pixels, shape: (npixels, bands)
pixels = im[:, ann_idx[0], ann_idx[1]].transpose((1, 0))
pixel_labels = label[:, ann_idx[0], ann_idx[1]].transpose((1, 0))
# Reshape (npixels, bands, 1, 1)
pixels = np.expand_dims(pixels, axis=(2, 3))
pixel_labels = np.expand_dims(pixel_labels, axis=(2, 3))
dic = {'image': pixels, 'label': pixel_labels, 'mode': self.mode}
# Choose a random pixel
#pix = np.random.randint(0, ann_idx.shape[1])
#rand_row = ann_idx[0, pix]
#rand_col = ann_idx[1, pix]
# Get annotated hyperspectral pixel
#pixel = im[:, rand_row, rand_col].reshape(
# (im.shape[0], 1, 1))
#pixel_label = label[:, rand_row, rand_col].reshape(
# (label.shape[0], 1, 1))
#dic = {'image': pixel, 'label': pixel_label, 'mode': self.mode}
# TODO: Make many images of 1x1 pixel instead of picking a
# pixel randomly
#dic = {'image': [], 'label': []}
#for i in range(im.shape[1]):
# for j in range(im.shape[2]):
# pixel = im[:, i, j].reshape((im.shape[0], 1, 1))
# pixel_label = label[:, i, j].reshape((label.shape[0], 1, 1))
# dic['image'].append(pixel)
# dic['label'].append(pixel_label)
'''
return dic
#@property
#def training(self):
# return self._training
#@training.setter
#def training(self, training):
# self._training = training
class OdsiDbDataset(monai.data.dataset.PersistentDataset):
# Class indices and names for the ODSI-DB dataset
classnames = {
0 : u'Attached gingiva',
1 : u'Attrition/Erosion',
2 : u'Blood vessel',
3 : u'Calculus',
4 : u'Dentine caries',
5 : u'Enamel',
6 : u'Fibroma',
7 : u'Fluorosis',
8 : u'Gingivitis',
9 : u'Hair',
10: u'Hard palate',
11: u'Inflammation',
12: u'Initial caries',
13: u'Leukoplakia',
14: u'Lip',
15: u'Makeup',
16: u'Malignant lesion',
17: u'Marginal gingiva',
18: u'Metal',
19: u'Microfracture',
20: u'Mole',
21: u'Oral mucosa',
22: u'Out of focus area',
23: u'Pigmentation',
24: u'Plaque',
25: u'Plastic',
26: u'Prosthetics',
27: u'Root',
28: u'Shadow/Noise',
29: u'Skin',
30: u'Soft palate',
31: u'Specular reflection',
32: u'Stain',
33: u'Tongue',
34: u'Ulcer',
}
classnames_reverse = {v:k for k, v in classnames.items()}
#def __init__(self, data_dir, mode='rgb', training='True'):
def __init__(self, data_dir, mode='rgbimage'):
"""
@param[in] mode Input type, see LoadImage constructor for more
details.
"""
# Store parameters
assert(mode in OdsiDbDataLoader.modes)
self.data_dir = data_dir
self.mode = mode
# Get list of TIFF files
tiffs = [f for f in torchseg.utils.listdir(self.data_dir) \
if '.tif' in f]
# Get list of segmentation files
segs = [f for f in tiffs if f.endswith('_masks.tif')]
# Get list of image files (we ignore images without segmentation)
imgs = [f.replace('_masks.tif', '.tif') for f in segs]
for im in imgs:
assert(os.path.isfile(os.path.join(data_dir, im)))
# Build list of dictionaries, where each dictionary
# corresponds to an input image
data = [{
'image': os.path.join(data_dir, im),
'label': os.path.join(data_dir, seg)} \
for im, seg in zip(imgs, segs)]
# Create data augmentation transform
self.image_loader = OdsiDbDataLoader.LoadImage(
keys=['image', 'label'], mode=self.mode)
#keys=['image', 'label'], mode=self.mode, training=training)
trsfm = torchvision.transforms.Compose([
self.image_loader,
monai.transforms.ToTensord(keys=['image', 'label']),
])
# Call PersistentDataset constructor
super().__init__(data, trsfm)
#@property
#def training(self):
# return self.image_loader.training
#@training.setter
#def training(self, training):
# self.image_loader.training = training
"""
@class OdsiDbLoader loads data from the ODSI-DB dataset.
@details Link to the dataset: https://cs.uef.fi/pub/color/spectra/ODSI-DB
"""
def __init__(self, data_dir, batch_size, mode='rgbimage', shuffle=True,
validation_split=0.0, num_workers=1, ignore_labels=[]):
# validation_split=0.0, num_workers=1, training=True):
# Store parameters
self.data_dir = data_dir
self.mode = mode
self.ignore_labels = ignore_labels
# Create dataset handler
self.dataset = OdsiDbDataLoader.OdsiDbDataset(self.data_dir,
mode=self.mode)
# mode=self.mode, training=training)
# Call the constructor of torchseg.base.BaseDataLoader
super().__init__(self.dataset, batch_size, shuffle, validation_split,
num_workers, collate_fn=self.collate_fn)
# num_workers, collate_fn=OdsiDbDataLoader.collate_fn)
def _split_sampler(self, split):
"""
@brief Split the list of inputs between training and validation.
@param[in] split Ratio between 0.0 and 1.0 that will correspond to
validation.
@returns TODO
"""
if split == 0.0:
return None, None
if isinstance(split, int):
assert(split > 0 and split < self.n_samples)
split = float(split) / self.n_samples
# Fix seeds for reproducibility
np.random.seed(0)
random.seed(0)
# Make a list of the indices of all the images
remaining = np.arange(self.n_samples).tolist()
# Get list of labels to check the classes that are present in each image
labels = [os.path.join(self.data_dir, im_dic['label']) \
for im_dic in self.dataset.data]
# Make a dictionary from each class to the indices of the images that
# contain pixels of such class
n_classes = len(OdsiDbDataLoader.OdsiDbDataset.classnames)
class2img = {c: [] for c in range(n_classes)}
for im_idx in remaining:
label_path = labels[im_idx]
label = OdsiDbDataLoader.LoadImage.read_label(label_path).sum(axis=(1,
2)).astype(np.int64)
for c in label.nonzero()[0].tolist():
class2img[c].append(im_idx)
# Put an image of each class in the training split, unless we are in
# testing mode, in which case self.validation split should be equal to 1.0
train_idx = []
if self.validation_split < 1.0:
for c in class2img:
random.shuffle(class2img[c])
im_idx = class2img[c][0]
if im_idx in remaining:
train_idx.append(im_idx)
remaining.remove(im_idx)
# Split the rest of the images according to the given probability
valid_idx = []
while remaining:
im_idx = remaining.pop()
if np.random.binomial(1, 1 - split):
train_idx.append(im_idx)
else:
valid_idx.append(im_idx)
train_idx = np.asarray(train_idx)
valid_idx = np.asarray(valid_idx)
# Create data samplers for each split
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_idx)
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_idx)
# Turn off shuffle option which is mutually exclusive with sampler
self.shuffle = False
self.n_samples = len(train_idx)
return train_sampler, valid_sampler
#@staticmethod
def collate_fn(self, batch, max_h=256, max_w=256):
"""
@brief This function hijacks the batch and crops all the elements of the
batch randomly so that they are the same size. The minimum width
and height of the images in the batch is used as crop size.
@param[in] batch List of dictionaries. Each dictionary represents an
input image.
@param[in] max_h Maximum height for each image in the batch. If an image
has a larger height it will be cropped. This only
applies to the 'spixel' mode.
@param[in] max_w Maximum width for each image in the batch. If an image
is larger it will be cropped. This only applies to the
'spixel' mode.
"""
# If we are in pixel mode we crop a region of 10 * bs x 10 * bs pixels,
# where bs is the batch size
#if 'mode' in batch[0] and batch[0]['mode'] == 'spixel':
# Reshape the batch, one pixel per batch element
#bands = batch[0]['image'].shape[0]
#classes = batch[0]['label'].shape[0]
#pixels_per_im = min_w * min_h
#new_batch = []
#for item in batch:
# im = item['image'].reshape((bands, -1)).permute(1,
# 0).reshape((pixels_per_im, bands, 1, 1)).unbind()
# label = item['label'].reshape((classes, -1)).permute(1,
# 0).reshape((pixels_per_im, classes, 1, 1)).unbind()
# new_batch += [{'image': pix, 'label': pix_label} \
# for pix, pix_label in zip(im, label)]
#batch = new_batch
'''
if 'mode' in batch[0] and batch[0]['mode'] == 'spixel':
# Find minimum number of pixels
min_pixels = np.inf
for item in batch:
if item['image'].shape[0] < min_pixels:
min_pixels = item['image'].shape[0]
# Reduce the number of pixels for performance reasons
#max_npixels = 128
#min_pixels = min(min_pixels, max_npixels)
# Crop number of pixels so that all the elements in the batch have
# the same
new_batch = []
for item in batch:
npixels = item['image'].shape[0]
diff = npixels - min_pixels
if diff > 0:
offset = np.random.randint(0, diff)
item['image'] = item['image'][offset:offset + min_pixels, :]
item['label'] = item['label'][offset:offset + min_pixels, :]
# Reshape the batch, we want a batch of pixels
pixels = item['image'].unbind()
labels = item['label'].unbind()
new_batch += [{'image': pix, 'label': pixl} \
for pix, pixl in zip(pixels, labels)]
# Use batch of pixels
batch = new_batch
else:
'''
# Find minimum dimensions
min_h = np.inf
min_w = np.inf
for item in batch:
if item['image'].shape[1] < min_h:
min_h = item['image'].shape[1]
if item['image'].shape[2] < min_w:
min_w = item['image'].shape[2]
# Crop image for the 'rgbpixel' and 'spixel' cases due to memory and
# speed restrictions
#if self.training:
mode = batch[0].get('mode')
if mode in ['rgbpixel', 'spixel_170']:
min_h = min(min_h, max_h)
min_w = min(min_w, max_w)
# Crop image and labels
for item in batch:
good_crop = False
im = None
label = None
while not good_crop:
# Try a crop
im, label = OdsiDbDataLoader.random_crop(item['image'], item['label'], min_h, min_w)
#item['image'], item['label'] = OdsiDbDataLoader.random_crop(item['image'], item['label'], min_h, min_w)
# Does it have annotated pixels?
ann_idx = torch.sum(label, dim=0) == 1
num_valid_pixels = ann_idx.sum()
if num_valid_pixels > 0:
good_crop = True
item['image'] = im
item['label'] = label
return torch.utils.data.dataloader.default_collate(batch)
@staticmethod
def random_crop(image, label, crop_h, crop_w):
"""
@param[in] image Torch tensor, shape (C, H, W).
@param[in] label Torch tensor, shape (C, H, W).
@param[in] crop_h Crop height.
@param[in] crop_w Crop width.
"""
# Get image dimensions
h = image.shape[1]
w = image.shape[2]
assert(h >= crop_h)
assert(w >= crop_w)
# Compute random crop location
if crop_h < h:
off_row = np.random.randint(0, h - crop_h, size=1)[0]
else:
off_row = 0
if crop_w < w:
off_col = np.random.randint(0, w - crop_w, size=1)[0]
else:
off_col = 0
# Crop image and label
new_image = image[:, off_row:off_row + crop_h, off_col:off_col + crop_w]
new_label = label[:, off_row:off_row + crop_h, off_col:off_col + crop_w]
return new_image, new_label
| 68,696 | 49.773836 | 120 | py |
segodsidb | segodsidb-main/src/config/parser.py | """
@brief Module that contains the ConfigParser class. It is a lightweight module
to read the configuration file and the command line parameters and
combine them into a single place.
@details The code in this module was inspired by:
https://github.com/victoresque/pytorch-template
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 1 Jun 2021.
"""
import os
import logging
import pathlib
import functools
import operator
import datetime
import json
# My imports
import torchseg.logger
import torchseg.utils
class ConfigParser:
def __init__(self, config, resume=None, modification=None, run_id=None):
"""
@class ConfigParser parses the configuration JSON file. Handles
hyperparameters for training, initializations of modules,
checkpoint saving and logging module.
@param[in] config Dict containing configurations, hyperparameters
for training. Contents of `config.json` file
for example.
@param[in] resume String, path to the checkpoint being loaded.
@param[in] modification Dict keychain:value, specifying position values
to be replaced from config dict.
@param[in] run_id Unique Identifier for training processes. Used
to save checkpoints and training log. Timestamp
is being used as default.
"""
# Load config file and apply modification
self._config = _update_config(config, modification)
self.resume = resume
# Set save_dir where trained model and log will be saved.
save_dir = pathlib.Path(self.config['machine']['args']['save_dir'])
exper_name = self.config['name']
# If no ID is specified, timestamp is used as default run-id
if run_id is None:
run_id = datetime.datetime.now().strftime(r'%d%m_%H%M%S')
self._save_dir = save_dir / 'models' / exper_name / run_id
self._log_dir = save_dir / 'log' / exper_name / run_id
# Make directory for saving checkpoints and log.
exist_ok = run_id == ''
self.save_dir.mkdir(parents=True, exist_ok=exist_ok)
self.log_dir.mkdir(parents=True, exist_ok=exist_ok)
# Save updated config file to the checkpoint dir
torchseg.utils.write_json(self.config, self.save_dir / 'config.json')
# Configure logging module
torchseg.logger.LoggerSetup(self.log_dir, self.config['logconf'])
self.log_levels = {
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG
}
@classmethod
def from_args(cls, args, options=''):
"""
@brief Initialize this class from some CLI arguments. Used in training
and testing.
"""
for opt in options:
args.add_argument(*opt.flags, default=None, type=opt.type)
if not isinstance(args, tuple):
args = args.parse_args()
# Select CUDA device (-d option)
if args.device is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
# Setup environment for resuming train/test (-r option)
if args.resume is not None:
resume = pathlib.Path(args.resume)
cfg_fname = resume.parent / 'config.json'
else:
msg_no_cfg = "Configuration file need to be specified. \
Add '-c config.json', for example."
assert args.conf is not None, msg_no_cfg
resume = None
cfg_fname = pathlib.Path(args.conf)
# Setup config (-c option)
config = torchseg.utils.read_json(cfg_fname)
if args.conf and resume:
# Update new config for fine-tuning
config.update(torchseg.utils.read_json(args.conf))
# Setup log config (-l option)
if args.logconf:
config['logconf'] = args.logconf
else:
config['logconf'] = ''
# Parse custom CLI options into dictionary
modification = {
opt.target : getattr(args, _get_opt_name(opt.flags)) \
for opt in options
}
return cls(config, resume, modification)
def init_obj(self, name, module, *args, **kwargs):
"""
@brief Finds a function handle with the name given as 'type' in config,
and returns the instance initialized with corresponding arguments
given.
@details `object = config.init_obj('name', module, a, b=1)` is equivalent
to `object = module.name(a, b=1)`
"""
module_name = self[name]['type']
module_args = dict(self[name]['args'])
assert(all([k not in module_args for k in kwargs]))
# 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return getattr(module, module_name)(*args, **module_args)
def init_ftn(self, name, module, *args, **kwargs):
"""
@brief Finds a function handle with the name given as 'type' in config,
and returns the function with given arguments fixed with
functools.partial.
@details `function = config.init_ftn('name', module, a, b=1)`
is equivalent to
`function = lambda *args, **kwargs:
module.name(a, *args, b=1, **kwargs)`.
"""
module_name = self[name]['type']
module_args = dict(self[name]['args'])
assert(all([k not in module_args for k in kwargs]))
# 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return functools.partial(getattr(module, module_name), *args,
**module_args)
def __getitem__(self, name):
"""@brief Access items like ordinary dict."""
return self.config[name]
def __str__(self):
return json.dumps(self.config, sort_keys=True, indent=4)
def get_logger(self, name, verbosity=2):
msg_verbosity = 'verbosity option {} is invalid. \
Valid options are {}.'.format(verbosity, self.log_levels.keys())
assert verbosity in self.log_levels, msg_verbosity
logger = logging.getLogger(name)
logger.setLevel(self.log_levels[verbosity])
return logger
# Setting read-only attributes
@property
def config(self):
return self._config
@property
def save_dir(self):
return self._save_dir
@property
def log_dir(self):
return self._log_dir
# Helper functions to update config dict with custom CLI options
def _update_config(config, modification):
if modification is None:
return config
for k, v in modification.items():
if v is not None:
_set_by_path(config, k, v)
return config
def _get_opt_name(flags):
for flg in flags:
if flg.startswith('--'):
return flg.replace('--', '').replace('-', '_')
return flags[0].replace('--', '').replace('-', '_')
def _set_by_path(tree, keys, value):
"""@brief Set a value in a nested object in tree by sequence of keys."""
keys = keys.split(';')
_get_by_path(tree, keys[:-1])[keys[-1]] = value
def _get_by_path(tree, keys):
"""@brief Access a nested object in tree by sequence of keys."""
return functools.reduce(operator.getitem, keys, tree)
| 7,569 | 35.570048 | 82 | py |
segodsidb | segodsidb-main/src/logger/logger.py | """
@brief Module to setup and maintain the logging abilities of the training and
validation scripts.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 1 Jun 2021.
"""
import logging
import logging.config
import pathlib
import torchseg.utils
class LoggerSetup():
def __init__(self, save_dir, log_config, default_level=logging.INFO):
"""
@brief Setup logging configuration.
"""
log_config = pathlib.Path(log_config)
if log_config.is_file():
config = torchseg.utils.read_json(log_config)
# Modify logging paths based on run config
for _, handler in config['handlers'].items():
if 'filename' in handler:
handler['filename'] = str(save_dir / handler['filename'])
logging.config.dictConfig(config)
else:
print("Warning: logging configuration file is not found \
in {}.".format(log_config))
logging.basicConfig(level=default_level)
| 1,055 | 33.064516 | 79 | py |
segodsidb | segodsidb-main/src/visualization/visualization.py | """
@brief Module with classes for displaying, drawing and plotting information.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 2 Jun 2021.
"""
import numpy as np
import importlib
import datetime
class TensorboardWriter():
def __init__(self, log_dir, logger, enabled):
self.writer = None
self.selected_module = ""
print('TensorboardWriter.log_dir:', log_dir)
print('TensorboardWriter.logger:', logger)
print('TensorboardWriter.enabled:', enabled)
if enabled:
log_dir = str(log_dir)
# Retrieve vizualization writer
succeeded = False
for module in ["torch.utils.tensorboard", "tensorboardX"]:
try:
self.writer = importlib.import_module(module).SummaryWriter(log_dir)
succeeded = True
print('TensorboardWriter:', module)
break
except ImportError:
succeeded = False
self.selected_module = module
if not succeeded:
message = "Warning: visualization (Tensorboard) is " \
"configured to use, but currently not installed on " \
"this machine. Please install TensorboardX with " \
"pip install tensorboardx, upgrade PyTorch to " \
"version >= 1.1 to use torch.utils.tensorboard or " \
"turn off the option in the 'config.json' file."
logger.warning(message)
self.step = 0
self.mode = ''
self.tb_writer_ftns = {
'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio',
'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'
}
self.tag_mode_exceptions = {'add_histogram', 'add_embedding'}
self.timer = datetime.datetime.now()
def set_step(self, step, mode='train'):
self.mode = mode
self.step = step
if step == 0:
self.timer = datetime.datetime.now()
else:
duration = datetime.datetime.now() - self.timer
self.add_scalar('steps_per_sec', 1 / duration.total_seconds())
self.timer = datetime.datetime.now()
def __getattr__(self, name):
"""
If visualization is configured to use:
return add_data() methods of tensorboard with additional
information (step, tag) added.
Otherwise:
return a blank function handle that does nothing
"""
if name in self.tb_writer_ftns:
add_data = getattr(self.writer, name, None)
def wrapper(tag, data, *args, **kwargs):
if add_data is not None:
# add mode(train/valid) tag
if name not in self.tag_mode_exceptions:
tag = '{}/{}'.format(tag, self.mode)
add_data(tag, data, self.step, *args, **kwargs)
return wrapper
else:
# default action for returning methods defined in this class,
# set_step() for instance.
try:
attr = object.__getattr__(name)
except AttributeError:
raise AttributeError("type object '{}' has no attribute '{}'".format(self.selected_module, name))
return attr
| 3,424 | 35.827957 | 113 | py |
segodsidb | segodsidb-main/src/base/base_model.py | """
@brief Base class for deep learning models.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 1 Jun 2021.
"""
import torch
import numpy as np
import abc
class BaseModel(torch.nn.Module):
"""
@brief Base class for all models.
"""
def from_file():
# TODO
#model = TheModelClass(*args, **kwargs)
#model.load_state_dict(torch.load(PATH))
#model.eval()
pass
@abc.abstractmethod
def forward(self, *inputs):
"""
@brief Forward pass implementation.
@returns The model output.
"""
raise NotImplementedError
def __str__(self):
"""
@brief Print model and number of trainable parameters.
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super().__str__() + '\nTrainable parameters: {}'.format(params)
| 977 | 24.736842 | 79 | py |
segodsidb | segodsidb-main/src/base/base_data_loader.py | """
@brief Module with the definition of the base data loader.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 1 Jun 2021.
"""
import numpy as np
import torch
class BaseDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size, shuffle, validation_split,
num_workers, collate_fn=torch.utils.data.dataloader.default_collate):
"""
@brief Base class for all data loaders.
@param[in] dataset TODO.
@param[in] batch_size Training and validation batch size.
@param[in] shuffle Boolean flag for shuffling data that is
currently not used.
@param[in] validation_split TODO.
@param[in] num_workers Number of PyTorch workers. That is, how
many sub-processes to use for data loading.
@param[in] collate_fn Function that merges a list of samples to
form a mini-batch of tensor[s].
"""
# Sanity check: the dataset must have a training flag, cause we use it
# here in the BaseDataLoader
#assert(hasattr(dataset, 'training'))
# Store parameters into attributes
self.shuffle = shuffle
self.validation_split = validation_split
self.batch_idx = 0
self.n_samples = len(dataset)
self.sampler, self.valid_sampler = \
self._split_sampler(self.validation_split)
self.init_kwargs = {
'dataset': dataset,
'batch_size': batch_size,
'shuffle': self.shuffle,
'collate_fn': collate_fn,
'num_workers': num_workers
}
super().__init__(sampler=self.sampler, **self.init_kwargs)
def _split_sampler(self, split):
if split == 0.0:
return None, None
idx_full = np.arange(self.n_samples)
np.random.seed(0)
np.random.shuffle(idx_full)
if isinstance(split, int):
assert(split > 0 and split < self.n_samples)
#"validation set size is configured to be larger than entire dataset."
len_valid = split
else:
len_valid = int(self.n_samples * split)
valid_idx = idx_full[0:len_valid]
train_idx = np.delete(idx_full, np.arange(0, len_valid))
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_idx)
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_idx)
# Turn off shuffle option which is mutually exclusive with sampler
self.shuffle = False
self.n_samples = len(train_idx)
return train_sampler, valid_sampler
def split_validation(self):
if self.valid_sampler is None:
return None
else:
return torch.utils.data.DataLoader(sampler=self.valid_sampler,
**self.init_kwargs)
#@property
#def training(self):
# return self.dataset.training
#@training.setter
#def training(self, training):
# self.dataset.training = training
| 3,185 | 32.893617 | 82 | py |
segodsidb | segodsidb-main/src/base/base_machine.py | """
@brief Module with base learning machine. This class is meant to control the
training and validation processes for a particular problem or task.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 2 Jun 2021.
"""
import torch
import abc
import numpy as np
# My imports
import torchseg.visualization
class BaseMachine:
"""
@class BaseMachine is an interface class, parent of for all the convolutional
learning machines.
"""
def __init__(self, model, criterion, metric_ftns, optimizer, config):
self.config = config
self.logger = config.get_logger('machine', config['machine']['args']['verbosity'])
self.model = model
self.criterion = criterion
self.metric_ftns = metric_ftns
self.optimizer = optimizer
cfg_machine = config['machine']['args']
self.epochs = cfg_machine['epochs']
self.save_period = cfg_machine['save_period']
self.monitor = cfg_machine.get('monitor', 'off')
# Configuration to monitor model performance and save best
if self.monitor == 'off':
self.mnt_mode = 'off'
self.mnt_best = 0
else:
self.mnt_mode, self.mnt_metric = self.monitor.split()
assert self.mnt_mode in ['min', 'max']
self.mnt_best = np.inf if self.mnt_mode == 'min' else -np.inf
self.early_stop = cfg_machine.get('early_stop', np.inf)
if self.early_stop <= 0:
self.early_stop = np.inf
self.start_epoch = 1
self.checkpoint_dir = config.save_dir
# Setup visualization writer instance
self.writer = torchseg.visualization.TensorboardWriter(config.log_dir,
self.logger, cfg_machine['tensorboard'])
if config.resume is not None:
self._resume_checkpoint(config.resume)
@abc.abstractmethod
def _train_epoch(self, epoch):
"""
@brief Training logic for an epoch.
@param[in] epoch Current epoch number.
"""
raise NotImplementedError
def train(self):
"""
@brief Full training logic.
"""
not_improved_count = 0
for epoch in range(self.start_epoch, self.epochs + 1):
result = self._train_epoch(epoch)
# Save logged informations into log dict
log = {'epoch': epoch}
log.update(result)
# Print logged informations to the screen
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
# Evaluate model performance according to configured metric,
# save best checkpoint as model_best
best = False
if self.mnt_mode != 'off':
try:
# Check whether model performance improved or not,
# according to specified metric(mnt_metric)
improved = (self.mnt_mode == 'min' \
and log[self.mnt_metric] <= self.mnt_best) \
or (self.mnt_mode == 'max' \
and log[self.mnt_metric] >= self.mnt_best)
except KeyError:
self.logger.warning("""Warning: Metric '{}' is not found.
Model performance monitoring is
disabled.""".format(self.mnt_metric))
self.mnt_mode = 'off'
improved = False
if improved:
self.mnt_best = log[self.mnt_metric]
not_improved_count = 0
best = True
else:
not_improved_count += 1
if not_improved_count > self.early_stop:
self.logger.info('Validation performance did not improve ' \
+ 'for ' + str(self.early_stop) \
+ ' epochs. Training stops.')
break
if epoch % self.save_period == 0:
self._save_checkpoint(epoch, save_best=best)
def _save_checkpoint(self, epoch, save_best=False):
"""
@brief Method to save checkpoints.
@param[in] epoch Current epoch number.
@param[in] log Logging information of the epoch.
@param[in] save_best If True, rename the saved checkpoint to
'model_best.pth'.
"""
arch = type(self.model).__name__
state = {
'arch': arch,
'epoch': epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'monitor_best': self.mnt_best,
'config': self.config
}
filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(
epoch))
torch.save(state, filename)
self.logger.info("Saving checkpoint: {} ...".format(filename))
if save_best:
best_path = str(self.checkpoint_dir / 'model_best.pth')
torch.save(state, best_path)
self.logger.info("Saving current best: model_best.pth ...")
def _resume_checkpoint(self, resume_path):
"""
@brief Resume from saved checkpoints.
@param[in] resume_path Checkpoint path to be resumed.
"""
resume_path = str(resume_path)
self.logger.info("Loading checkpoint: {} ...".format(resume_path))
checkpoint = torch.load(resume_path)
self.start_epoch = checkpoint['epoch'] + 1
self.mnt_best = checkpoint['monitor_best']
# Load architecture params from checkpoint
#if checkpoint['config']['arch'] != self.config['arch']:
# self.logger.warning('Warning: Architecture configuration given in \
# config file is different from that of \
# checkpoint. This may yield an exception while \
# state_dict is being loaded.')
self.model.load_state_dict(checkpoint['state_dict'])
# Load optimizer state from checkpoint only when optimizer type is not
# changed
if checkpoint['config']['optimizer']['type'] != \
self.config['optimizer']['type']:
self.logger.warning("""Warning: Optimizer type given in config file is
different from that of checkpoint.
Optimizer parameters not being resumed.""")
else:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger.info('Checkpoint loaded. Resuming training from epoch ' \
+ str(self.start_epoch))
| 6,879 | 37.651685 | 90 | py |
segodsidb | segodsidb-main/src/utils/utils.py | """
@brief Collection of functions for general purpose use.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 1 Jun 2021.
"""
import os
import re
import pathlib
import json
import collections
import torch
import pandas as pd
## ----- Functions ----- ##
def read_json(fname):
"""@brief Reads a JSON file and returns a dictionary."""
fname = pathlib.Path(fname)
with fname.open('rt') as handle:
return json.load(handle, object_hook=collections.OrderedDict)
def write_json(content, fname):
"""@brief Dumps a dictionary into a JSON file."""
fname = pathlib.Path(fname)
with fname.open('wt') as handle:
json.dump(content, handle, indent=4, sort_keys=False)
def setup_gpu_devices(n_gpu_use):
"""
@brief If available, setup GPU devices. Otherwise, CPU will be used.
@param[in] n_gpu_use Number of GPUs you want to use.
@returns a tuple (torch.device, list) where the list has the IDs of the GPUs
that will be used.
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
print("""Warning: There\'s no GPU available on this machine,
training will be performed on CPU.""")
n_gpu_use = 0
if n_gpu_use > n_gpu:
print(f"""Warning: The number of GPU\'s configured to use is {n_gpu_use},
but only {n_gpu} are available on this machine.""")
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
def natsort(l):
"""
@brief Natural sort of a list, i.e. using alphabetic order.
@param[in] l List to sort.
@returns A new list sorted taking into account numbers and not just their
ASCII codes.
"""
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)',
str(key)) ]
return sorted(l, key = alphanum_key)
def listdir(path, hidden=False):
"""
@brief Lists a directory removing the extension of the files and the
hidden files.
@param[in] path String containing the path (relative or absolute) to
the folder that you want to list.
@param[in] hidden Set to True if you want to list the hidden files too.
By default, it is set to False.
@returns A list of visible files and folders inside the given path.
"""
files = []
if hidden:
files = natsort(os.listdir(path))
else:
files = natsort([f for f in os.listdir(path) if not f.startswith('.')])
return files
## ----- Classes ----- ##
class MetricTracker:
def __init__(self, *keys, writer=None):
self.writer = writer
self._data = pd.DataFrame(index=keys,
columns=['total', 'counts', 'average'])
self.reset()
def reset(self):
for col in self._data.columns:
self._data[col].values[:] = 0
def update(self, key, value, n=1):
if self.writer is not None:
self.writer.add_scalar(key, value)
self._data.total[key] += value * n
self._data.counts[key] += n
self._data.average[key] = self._data.total[key] / self._data.counts[key]
def avg(self, key):
return self._data.average[key]
def result(self):
return dict(self._data.average)
| 3,464 | 29.9375 | 82 | py |
segodsidb | segodsidb-main/src/model/loss.py | """
@brief Module that contains loss functions.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 1 Jun 2021.
"""
import numpy as np
import sys
import torch
import monai.losses
def nll_loss(pred, gt):
"""
@brief Negative log-likelihood loss (also called multi-class cross entropy).
@param[in] pred (N,C) where C = number of classes.
@param[in] gt (N) where each value is 0 <= targets[i] <= C - 1.
@returns a scalar with the mean loss.
"""
return torch.nn.functional.nll_loss(pred, gt)
def focal_loss(pred, gt):
"""
@param[in] pred Tensor of probability predictions, shape (B, C, H, W).
@param[in] gt Tensor of ground truth, shape (B, C, H, W).
"""
# FIXME: not working
raise NotImplemented()
fl = monai.losses.FocalLoss()
return fl(pred, gt)
def odsi_db_pw_ce_logprob_loss(raw_pred, raw_gt):
"""
@brief Pixel-wise cross entropy loss.
@details Reduces the loss for the batch using the mean.
@param[in] raw_pred Tensor of predicted log-probabilities,
shape (B, C, H, W).
@param[in] raw_gt Tensor of ground truth probabilities,
shape (B, C, H, W).
@returns a scalar with the mean loss over the images of the batch.
"""
# Class weights according to the number of pixels per class in ODSI-DB,
# a higher number indicates less represented classes
class_weights = torch.cuda.FloatTensor([
0.9682632484506389, 0.9983340617620862, 0.9999394663490677,
0.9995276328275359, 0.9998907852100988, 0.9188844950588891,
0.9999902109476404, 0.9997049747997107, 0.9973278363209698,
0.97715387049886, 0.9607777928694627, 0.9986612604245154,
0.9996366990483456, 0.9999236850100192, 0.9352800022239143,
0.9999932978832073, 0.9999784739894149, 0.986721340312427,
0.9967532371059031, 0.9997563631976796, 0.99995392707397,
0.8692997588756661, 0.860435994817646, 0.9992877927908862,
0.9998345270474653, 0.9957902617780788, 0.9875275587410823,
0.9997695198161124, 0.9879129304566572, 0.6864583697190043,
0.9769124622329116, 0.9681097270561285, 0.9996792888545647,
0.9326207970711948, 0.9999083493782449])
# Flatten predictions and labels
bs = raw_pred.shape[0]
chan = raw_pred.shape[1]
pred_log = torch.reshape(raw_pred, (bs, chan, -1))
gt = torch.reshape(raw_gt, (bs, chan, -1))
#npixels = pred_log.shape[2]
# Loop over the batch (each image might have different number of annotated
# pixels)
ce_loss = torch.empty((bs))
for i in range(bs):
# Filter out those pixels without labels
ann_idx = torch.sum(gt[i, ...], dim=0) == 1
num_valid_pixels = ann_idx.float().sum()
# Convert prediction into nll_loss() preferred input shape (N, C)
log_yhat = pred_log[i, :, ann_idx].permute(1, 0)
# Convert vector of probabilities into target vector
y = torch.argmax(gt[i, :, ann_idx], dim=0)
# Compute pixel-wise multi-class (exclusive) cross-entropy loss
ce_loss[i] = torch.nn.functional.nll_loss(log_yhat, y,
weight=class_weights)
# NOTE: The cross entropy for some of the images might be nan, this happens
# when an image does not have properly annotated pixels
# Reduce the loss for the batch using the mean
is_nan = torch.isnan(ce_loss)
ce_loss[is_nan] = 0
loss = ce_loss.sum() / (~is_nan).float().sum()
return loss
def ResNet_18_CAM_DS_ce_loss(tuple_pred, raw_gt):
"""
@brief Computes the cross entropy loss for all the images in the batch
(taking into account the predictions at all resolution levels).
@details This loss estimates how good is the network at estimating
which classes are present in a given image.
We treat this problem as many binary classification problems,
each class versus all the others.
@param[in] tuple_pred Tuple of six elements, the first one is a tensor
containing the predicted class presence
log-probabilities, shape (R, B, K, 1, 1).
* B is the batch size.
* R will be equivalent to the number of resolution
levels + 1 (the global prediction).
* K is the number of classes.
The rest of the elements of the tuple are the
pseudo-segmentation predictions at different
resolutions.
@param[in] raw_gt Tensor of ground truth probabilities,
shape (B, C, H, W).
"""
# Get the classification predictions, we do not use the
# pseudo-segmentations to calculate the loss
raw_pred = tuple_pred[0]
# Get dimensions
bs = raw_pred.shape[1]
classes = raw_gt.shape[1]
rl = raw_pred.shape[0] # Resolution levels + 1 (the global loss)
# Loop over the images of the batch
ce_loss = torch.cuda.FloatTensor(bs).fill_(0)
for i in range(bs):
# Build ground truth vector of class-presence probabilities from the
# segmentation ground truth that ODSI-DB provides
gt = torch.cuda.FloatTensor(classes).fill_(0)
for j in range(classes):
gt[j] = 1 if 1 in raw_gt[i, j] else 0
# Compute the losses at all resolution levels + global
for j in range(rl):
# Build vector of class-presence log-probability predictions
pred = torch.flatten(raw_pred[j, i])
# Compute loss at this resolution level and add it to the accumulator
ce_loss_per_class = - (gt * pred + (1 - gt) * torch.log(1 - torch.exp(pred)))
# Mean over classes
ce_loss[i] += ce_loss_per_class.mean()
# Reduce the loss for the batch using the mean
is_nan = torch.isnan(ce_loss)
ce_loss[is_nan] = 0
loss = ce_loss.sum() / (~is_nan).float().sum()
return loss
if __name__ == '__main__':
raise RuntimeError('The loss.py module is not is not a script.')
| 6,327 | 37.120482 | 89 | py |
segodsidb | segodsidb-main/src/model/model.py | """
@brief Collection of deep learning models.
@author Luis C. Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 4 Jun 2021.
"""
import torch
import torch.nn.functional as F
import torch.utils.model_zoo
import numpy as np
import monai.networks
import torchvision.models
import collections
import math
import unet
# My imports
import torchseg.base
# PyTorch pretrained model URLs
model_urls = {
"deeplabv3_resnet50_coco":
"https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth",
"deeplabv3_resnet101_coco":
"https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth",
"deeplabv3_mobilenet_v3_large_coco":
"https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth",
}
class MnistModel(torchseg.base.BaseModel):
def __init__(self, num_classes=10):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = torch.nn.Dropout2d()
self.fc1 = torch.nn.Linear(320, 50)
self.fc2 = torch.nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class SimpleUnet(torchseg.base.BaseModel):
"""
@brief Cut-down implementation of a small U-Net. This network is here for
learning and debugging purposes.
@details Four resolution levels with 32, 64, 128, and 128 neurons.
"""
def __init__(self, in_channels=3, out_channels=35):
"""
@param[in] in_channels Number of input channels, e.g. set it to
three for RGB images.
@param[in] out_channels Number of classes.
"""
super().__init__()
# Way down
self.conv1 = self.down_block(in_channels, 32, 7, 3)
self.conv2 = self.down_block(32, 64, 3, 1)
self.conv3 = self.down_block(64, 128, 3, 1)
# Way up
self.upconv3 = self.up_block(128, 64, 3, 1)
self.upconv2 = self.up_block(64 * 2, 32, 3, 1)
self.upconv1 = self.up_block(32 * 2, out_channels, 3, 1)
def down_block(self, in_channels, out_channels, kernel_size, padding):
layer = torch.nn.Sequential(
# First convolution
torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1,
padding=padding),
torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU(),
# Second convolution
torch.nn.Conv2d(out_channels, out_channels, kernel_size, stride=1,
padding=padding),
torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU(),
# Max-pooling
torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
return layer
def up_block(self, in_channels, out_channels, kernel_size, padding):
layer = torch.nn.Sequential(
# First convolution
torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1,
padding=padding),
torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU(),
# Second convolution
torch.nn.Conv2d(out_channels, out_channels, kernel_size, stride=1,
padding=padding),
torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU(),
# Up-convolution
torch.nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3,
stride=2, padding=1, output_padding=1)
)
return layer
def crop(self, x, ref):
if x.shape != ref.shape:
crop_h = ref.shape[2] - x.shape[2]
crop_up = int(np.floor(crop_h / 2))
crop_down = crop_h - crop_up
crop_w = ref.shape[3] - x.shape[3]
crop_left = int(np.floor(crop_w / 2))
crop_right = crop_w - crop_left
x = F.pad(x, (crop_left, crop_right, crop_up, crop_down))
return x
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
upconv3 = self.upconv3(conv3)
upconv3 = self.crop(upconv3, conv2)
upconv2 = self.upconv2(torch.cat([upconv3, conv2], 1))
upconv2 = self.crop(upconv2, conv1)
upconv1 = self.upconv1(torch.cat([upconv2, conv1], 1))
upconv1 = self.crop(upconv1, x)
return F.log_softmax(upconv1, dim=1)
class Unet(torchseg.base.BaseModel):
"""
@class Wraps the 'pip install unet' U-Net implementation.
"""
def __init__(self, in_channels: int = 3, out_channels: int = 35) -> None:
"""
@param[in] in_channels Number of channels of the input image.
@param[in] out_channels Number of output classes.
"""
super().__init__()
# Get segmentation model
self.backbone = unet.UNet(in_channels=in_channels,
out_classes=out_channels,
dimensions=2,
num_encoding_blocks=5,
out_channels_first_layer=64,
normalization='batch',
padding=1,
preactivation=True,
residual=False)
def forward(self, x: torch.Tensor) -> dict[str, torch.Tensor]:
# Get input shape
bs, channels, old_height, old_width = x.shape
# Compute form factor
ff = float(old_width) / old_height
# Resize width to 512
width = 512
height = int(round(width / ff))
x = torchvision.transforms.functional.resize(x, [height, width])
# Find how much we have to pad width and height
powers = [2**i for i in range(9, 13)]
height_padding = pow(2, math.ceil(np.log2(height))) - height
width_padding = pow(2, math.ceil(np.log2(width))) - width
# Add padding to the image
x = F.pad(x, (0, width_padding, 0, height_padding), 'constant', 0)
# Perform forward pass with unet.UNet
x = self.backbone(x)
# Remove padding and convet the image to the original size
x = F.pad(x, (0, -width_padding, 0, -height_padding), 'constant', 0)
# Resize back to original size
x = torchvision.transforms.functional.resize(x, [old_height, old_width])
# Return log-probabilities
return F.log_softmax(x, dim=1)
class Unet_obsolete(torchseg.base.BaseModel):
"""
@brief Classical U-Net implementation.
"""
def __init__(self, in_channels=3, out_channels=35):
"""
@param[in] in_channels Number of input channels, e.g. set it to
three for RGB images.
@param[in] out_channels Number of classes.
"""
super().__init__()
# Way down
self.conv1 = self.down_block(in_channels, 64, 7, 3)
self.conv2 = self.down_block(64, 128, 3, 1)
self.conv3 = self.down_block(128, 256, 3, 1)
self.conv4 = self.down_block(256, 512, 3, 1)
self.conv5 = self.down_block(512, 1024, 3, 1)
# Way up
self.upconv5 = self.up_block(1024, 512, 3, 1)
self.upconv4 = self.up_block(512 * 2, 256, 3, 1)
self.upconv3 = self.up_block(256 * 2, 128, 3, 1)
self.upconv2 = self.up_block(128 * 2, 64, 3, 1)
self.upconv1 = self.up_block(64 * 2, out_channels, 3, 1)
def down_block(self, in_channels, out_channels, kernel_size, padding):
layer = torch.nn.Sequential(
# First convolution
torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1,
padding=padding),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(out_channels),
# Second convolution
torch.nn.Conv2d(out_channels, out_channels, kernel_size, stride=1,
padding=padding),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(out_channels),
# Max-pooling
torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
return layer
def up_block(self, in_channels, out_channels, kernel_size, padding):
layer = torch.nn.Sequential(
# First convolution
torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1,
padding=padding),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(out_channels),
# Second convolution
torch.nn.Conv2d(out_channels, out_channels, kernel_size, stride=1,
padding=padding),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(out_channels),
# Up-convolution
torch.nn.ConvTranspose2d(out_channels, out_channels, kernel_size=3,
stride=2, padding=1, output_padding=1)
)
return layer
def crop(self, x, ref):
if x.shape != ref.shape:
crop_h = ref.shape[2] - x.shape[2]
crop_up = int(np.floor(crop_h / 2))
crop_down = crop_h - crop_up
crop_w = ref.shape[3] - x.shape[3]
crop_left = int(np.floor(crop_w / 2))
crop_right = crop_w - crop_left
x = F.pad(x, (crop_left, crop_right, crop_up, crop_down))
return x
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
upconv5 = self.upconv5(conv5)
upconv5 = self.crop(upconv5, conv4)
upconv4 = self.upconv4(torch.cat([upconv5, conv4], 1))
upconv4 = self.crop(upconv4, conv3)
upconv3 = self.upconv3(torch.cat([upconv4, conv3], 1))
upconv3 = self.crop(upconv3, conv2)
upconv2 = self.upconv2(torch.cat([upconv3, conv2], 1))
upconv2 = self.crop(upconv2, conv1)
upconv1 = self.upconv1(torch.cat([upconv2, conv1], 1))
upconv1 = self.crop(upconv1, x)
return F.log_softmax(upconv1, dim=1)
class VectorUnet(torchseg.base.BaseModel):
"""
@brief Version of the SimpleUnet but without downsampling/upsampling.
@details Four levels with 32, 64, 128, and 128 neurons.
"""
def __init__(self, in_channels=3, out_channels=35):
"""
@param[in] in_channels Number of input channels, e.g. set it to
three for RGB images.
@param[in] out_channels Number of classes.
"""
super().__init__()
# Fake way down
self.conv1 = self.conv_block(in_channels, 64, 1, 0)
self.conv2 = self.conv_block(64, 128, 1, 0)
self.conv3 = self.conv_block(128, 256, 1, 0)
self.conv4 = self.conv_block(256, 512, 1, 0)
self.conv5 = self.conv_block(512, 1024, 1, 0)
# Fake way up
self.upconv5 = self.conv_block(1024, 512, 1, 0)
self.upconv4 = self.conv_block(512 * 2, 256, 1, 0)
self.upconv3 = self.conv_block(256 * 2, 128, 1, 0)
self.upconv2 = self.conv_block(128 * 2, 64, 1, 0)
self.upconv1 = self.conv_block(64 * 2, out_channels, 1, 0, scoring=True)
def conv_block(self, in_channels, out_channels, kernel_size, padding,
scoring=False):
# Create list of operations
modules = [
# First convolution
torch.nn.BatchNorm2d(in_channels),
torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1,
padding=padding),
torch.nn.ReLU(),
# Second convolution
torch.nn.BatchNorm2d(out_channels),
torch.nn.Conv2d(out_channels, out_channels, kernel_size, stride=1,
padding=padding),
torch.nn.ReLU(),
]
# If we are not before softmax, we use BN+ReLU as last operation
if scoring:
modules.append(torch.nn.BatchNorm2d(out_channels))
modules.append(torch.nn.Conv2d(out_channels, out_channels, kernel_size, stride=1, padding=padding))
# modules.append(torch.nn.ReLU())
layer = torch.nn.Sequential(*modules)
return layer
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
upconv5 = self.upconv5(conv5)
upconv4 = self.upconv4(torch.cat([upconv5, conv4], 1))
upconv3 = self.upconv3(torch.cat([upconv4, conv3], 1))
upconv2 = self.upconv2(torch.cat([upconv3, conv2], 1))
upconv1 = self.upconv1(torch.cat([upconv2, conv1], 1))
return F.log_softmax(upconv1, dim=1)
class DeepLabV3(torchseg.base.BaseModel):
class ASPPConv(torch.nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
torch.nn.Conv2d(in_channels, out_channels, 3, padding=dilation,
dilation=dilation, bias=False),
# FIXME: commented out because it does not support bs=1
#torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU()
]
super(DeepLabV3.ASPPConv, self).__init__(*modules)
class ASPPPooling(torch.nn.Sequential):
def __init__(self, in_channels, out_channels):
modules = [
torch.nn.AdaptiveAvgPool2d(1),
torch.nn.Conv2d(in_channels, out_channels, 1, bias=False),
# FIXME: commented out because it does not support bs=1
#torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU()
]
super(DeepLabV3.ASPPPooling, self).__init__(*modules)
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(torch.nn.Module):
def __init__(self, in_channels, atrous_rates, out_channels=256):
super(DeepLabV3.ASPP, self).__init__()
modules = []
modules.append(torch.nn.Sequential(
torch.nn.Conv2d(in_channels, out_channels, 1, bias=False),
# FIXME: commented out because it does not support bs=1
#torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU()))
rates = tuple(atrous_rates)
for rate in rates:
modules.append(DeepLabV3.ASPPConv(in_channels, out_channels, rate))
modules.append(DeepLabV3.ASPPPooling(in_channels, out_channels))
self.convs = torch.nn.ModuleList(modules)
self.project = torch.nn.Sequential(
torch.nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),
# FIXME: commented out because it does not support bs=1
#torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU(),
torch.nn.Dropout(0.5))
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
class DeepLabHead(torch.nn.Sequential):
def __init__(self, in_channels, num_classes):
super(DeepLabV3.DeepLabHead, self).__init__(
DeepLabV3.ASPP(in_channels, [12, 24, 36]),
torch.nn.Conv2d(256, 256, 3, padding=1, bias=False),
#torch.nn.BatchNorm2d(256),
torch.nn.ReLU(),
torch.nn.Conv2d(256, num_classes, 1)
)
def __init__(self, in_channels: int = 3, out_channels: int = 35,
pretrained: bool = False) -> None:
"""
@param[in] in_channels Number of channels of the input image.
@param[in] out_channels Number of output classes.
"""
super().__init__()
# Get ResNet-101 encoder
self.backbone = torchvision.models.resnet.resnet101(
pretrained=True, replace_stride_with_dilation=[False, True, True])
# Freeze ResNet-101 encoder layers
for param in self.backbone.parameters():
param.requires_grad = False
# Get segmentation model
return_layers = {'layer4': 'out'}
self.backbone = torchvision.models._utils.IntermediateLayerGetter(self.backbone,
return_layers=return_layers)
self.decoder = DeepLabV3.DeepLabHead(2048, out_channels)
# TODO: Load pretrained weights from a DeepLabV3 trained on MS-COCO (train2017)
def forward(self, x: torch.Tensor) -> dict[str, torch.Tensor]:
# Encoder: get latent space vector
input_shape = x.shape[-2:]
latent_tensor_dict = self.backbone(x)
# Decoder: get per-class output scores
raw_decoder_output = self.decoder(latent_tensor_dict['out'])
# Final touch: interpolate segmentation maps to the size of the
# original image
interp_decoder_output = F.interpolate(raw_decoder_output,
size=input_shape, mode='bilinear',
align_corners=False)
return F.log_softmax(interp_decoder_output, dim=1)
class ResNet_18_CAM_DS(torchseg.base.BaseModel):
"""
@brief Classification network that produces segmentation predictions as
an intermediary step.
@details Implementation of the architecture presented in
"Intrapapillary capillary loop classification in magnification
endoscopy: open dataset and baseline methodology" by
Luis C. Garcia Peraza Herrera et al., 2020.
"""
def __init__(self, in_channels: int = 3, out_channels: int = 35,
pretrained: bool = False) -> None:
"""
@param[in] in_channels Number of input channels, e.g. set it to
three for RGB images.
@param[in] out_channels Number of classes.
"""
super().__init__()
# Store parameters
self.in_channels = in_channels
self.out_channels = out_channels
self.pretrained = pretrained
# Get ResNet-18 encoder and modify the number of input channels
self.resnet = torchvision.models.resnet.resnet18(pretrained=self.pretrained)
self.resnet.conv1 = torch.nn.Conv2d(self.in_channels, 64,
kernel_size=7, stride=2, padding=3,
bias=False)
# Create "segmentation" layers
self.seg1 = torch.nn.Conv2d(64, self.out_channels, kernel_size=1)
self.seg2 = torch.nn.Conv2d(64, self.out_channels, kernel_size=1)
self.seg3 = torch.nn.Conv2d(128, self.out_channels, kernel_size=1)
self.seg4 = torch.nn.Conv2d(256, self.out_channels, kernel_size=1)
self.seg5 = torch.nn.Conv2d(512, self.out_channels, kernel_size=1)
# Create GAP layers
self.gap1 = torch.nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.gap2 = torch.nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.gap3 = torch.nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.gap4 = torch.nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.gap5 = torch.nn.AdaptiveAvgPool2d(output_size=(1, 1))
# Add the extra stuff to make ResNet-18-CAM-DS
ilg = torchvision.models._utils.IntermediateLayerGetter
self.conv1_tensor = ilg(self.resnet, return_layers={'bn1': 'out'})
self.conv2_tensor = ilg(self.resnet, return_layers={'layer1': 'out'})
self.conv3_tensor = ilg(self.resnet, return_layers={'layer2': 'out'})
self.conv4_tensor = ilg(self.resnet, return_layers={'layer3': 'out'})
self.conv5_tensor = ilg(self.resnet, return_layers={'layer4': 'out'})
# Create layer to convert scores into fake probabilities
self.log_sigmoid = torch.nn.LogSigmoid()
def forward(self, x: torch.Tensor):
"""
@returns a dictionary of log-probability predictions from the side
branches and the global branch. The key 'logprob6_output'
represents the global prediction. The tensors should be
one-dimensional, with each element indicating the probability
of that class being in the image.
"""
# Get tensors at every resolution level
conv1_output = self.conv1_tensor(x)['out']
conv2_output = self.conv2_tensor(x)['out']
conv3_output = self.conv3_tensor(x)['out']
conv4_output = self.conv4_tensor(x)['out']
conv5_output = self.conv5_tensor(x)['out']
# Produce a feature map per class at every resolution level
seg1_output = self.seg1(conv1_output)
seg2_output = self.seg2(conv2_output)
seg3_output = self.seg3(conv3_output)
seg4_output = self.seg4(conv4_output)
seg5_output = self.seg5(conv5_output)
# Compute class scores (with GAP) at every resolution level
# (and globally too)
score1_output = self.gap1(seg1_output)
score2_output = self.gap2(seg2_output)
score3_output = self.gap3(seg3_output)
score4_output = self.gap4(seg4_output)
score5_output = self.gap5(seg5_output)
score6_output = score1_output + score2_output + score3_output \
+ score4_output + score5_output
# Compute sigmoid function to estimate the side "probabilities"
logprob1_output = self.log_sigmoid(score1_output)
logprob2_output = self.log_sigmoid(score2_output)
logprob3_output = self.log_sigmoid(score3_output)
logprob4_output = self.log_sigmoid(score4_output)
logprob5_output = self.log_sigmoid(score5_output)
# Compute the global "probabilities"
logprob6_output = self.log_sigmoid(score6_output)
# Stack the class-presence log-probability predictions at every
# resolution level
class_presence = torch.stack([logprob1_output, logprob2_output,
logprob3_output, logprob4_output,
logprob5_output, logprob6_output])
return class_presence, seg1_output, seg2_output, seg3_output, seg4_output, seg5_output
if __name__ == '__main__':
raise RuntimeError('The model.py module is not is not a script.')
| 23,154 | 38.446337 | 111 | py |
segodsidb | segodsidb-main/src/model/metric.py | """
@brief Module of evaluation measures or metrics.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 2 Jun 2021.
"""
import torch
import numpy as np
import monai.metrics
import torchvision
# My imports
import torchseg.data_loader as dl
def accuracy(pred, gt):
"""
@brief Given a set of data points from repeated measurements of the same
quantity, the set can be said to be accurate if their average is
close to the true value of the quantity being measured.
@param[in] pred TODO.
@param[in] gt TODO.
@returns TODO
"""
with torch.no_grad():
pred = torch.argmax(pred, dim=1)
assert pred.shape[0] == len(gt)
correct = 0
correct += torch.sum(pred == gt).item()
return correct / len(gt)
def top_k_acc(pred, gt, k=3):
"""
@brief Top-k accuracy means that the correct class gets to be in the top-k
predicted probabilities for it to count as correct.
@param[in] pred TODO.
@param[in] gt TODO.
@returns TODO
"""
with torch.no_grad():
pred = torch.topk(pred, k, dim=1)[1]
assert pred.shape[0] == len(gt)
correct = 0
for i in range(k):
correct += torch.sum(pred[:, i] == gt).item()
return correct / len(gt)
def iou(pred, gt, k=1):
"""
@brief Computes the soft intersection over union for binary segmentation.
@param[in] pred TODO.
@param[in] gt TODO.
@param[in] k Index of the positive class (starting from zero).
@returns TODO.
"""
with torch.no_grad():
# TODO
raise NotImplemented()
return 0
def mean_iou(pred, gt, eps=1e-6, thresh=0.5):
"""
@brief Computes the soft mean (over classes) intersection over union for all
the images of the batch. Then all the mIoU per image are averaged over
all the images in the batch.
@param[in] pred Tensor of predicted log-probabilities, shape (B, C, H, W).
@param[in] gt Tensor of ground truth, shape (B, C, H, W).
@returns a floating point with the loss value.
"""
with torch.no_grad():
# Flatten predictions and labels
bs = pred.shape[0]
chan = pred.shape[1]
flat_pred = torch.reshape(torch.exp(pred), (bs, chan, -1))
flat_gt = torch.reshape(gt, (bs, chan, -1))
# Binarise prediction
bin_pred = (flat_pred > thresh).float()
argmax_pred = torch.argmax(flat_pred, dim=1)
one_hot_argmax_pred = torch.zeros_like(bin_pred)
for i in range(bs):
one_hot_argmax_pred[i] = torch.nn.functional.one_hot(argmax_pred[i],
num_classes=chan).transpose(0, 1)
bin_pred *= one_hot_argmax_pred
# Intersection
inter = torch.sum(bin_pred * flat_gt, dim=2)
# Union
union = torch.sum(bin_pred, dim=2) + torch.sum(flat_gt, dim=2) - inter
# Mean IoU over classes
miou = torch.mean((inter + eps) / (union + eps), dim=1)
# Mean over the images in the batch
batch_miou = torch.mean(miou).item()
return batch_miou
def odsi_db_mean_iou(pred, gt, eps=1e-6, thresh=0.5):
"""
@brief Computes the soft mean (over classes) intersection over union for all
the images of the batch. Then all the mIoU per image are averaged over
all the images in the batch.
@param[in] pred Tensor of predicted log-probabilities, shape (B, C, H, W).
@param[in] gt Tensor of ground truth, shape (B, C, H, W).
@returns a floating point with the loss value.
"""
with torch.no_grad():
# Flatten predictions and labels
bs = pred.shape[0]
chan = pred.shape[1]
flat_pred = torch.reshape(torch.exp(pred), (bs, chan, -1))
flat_gt = torch.reshape(gt, (bs, chan, -1))
# Binarise prediction
flat_bin_pred = (flat_pred > thresh).float()
argmax_pred = torch.argmax(flat_pred, dim=1)
one_hot_argmax_pred = torch.zeros_like(flat_bin_pred)
for i in range(bs):
one_hot_argmax_pred[i] = torch.nn.functional.one_hot(argmax_pred[i],
num_classes=chan).transpose(0, 1)
flat_bin_pred *= one_hot_argmax_pred
# NOTE: Every image in the batch can have a different number of annotated
# pixels
miou = torch.empty((bs))
for i in range(bs):
# Filter predictions without annotation, i.e. when all the ground
# truth classes are zero
ann_idx = torch.sum(flat_gt[i, ...], dim=0) == 1
y_true = flat_gt[i, :, ann_idx]
y_pred = flat_bin_pred[i, :, ann_idx]
# Intersection
inter = torch.sum(y_pred * y_true, dim=1)
# Union
union = torch.sum(y_pred, dim=1) + torch.sum(y_true, dim=1) - inter
# Mean IoU over classes
miou[i] = torch.mean((inter + eps) / (union + eps), dim=0)
# Mean over the images in the batch
batch_miou = torch.mean(miou).item()
return batch_miou
def odsi_db_monai_metric(pred, gt, metric_name, ignore_labels=[], thresh=0.5):
"""
@brief Computes the balanced accuracy averaged over all classes.
@details True negatives: only the labelled pixels of the batch are counted.
@param[in] pred Tensor of predicted log-probabilities,
shape (B, C, H, W).
@param[in] gt Tensor of ground truth, shape (B, C, H, W).
@param[in] ignore_labels List of classes (class names) that should be
ignored when averaging the metric over classes.
@param[in] thresh Theshold used to binarise predictions.
@returns a scalar with the metric for the whole batch.
"""
retval = None
with torch.no_grad():
# Flatten predictions and labels, and convert log-probabilities into
# probabilities
bs = pred.shape[0]
chan = pred.shape[1]
height = pred.shape[2]
width = pred.shape[3]
flat_pred = torch.reshape(torch.exp(pred), (bs, chan, -1))
flat_gt = torch.reshape(gt, (bs, chan, -1))
# Binarise prediction
flat_bin_pred = (flat_pred > thresh).float()
# Shapes of bin_pred and flat_gt now are (B, C, H*W)
assert(flat_bin_pred.shape == (bs, chan, height * width))
assert(flat_gt.shape == (bs, chan, height * width))
# NOTE: Every image in the batch can have a different number of
# annotated pixels
conf_mat = torch.zeros((1, chan, 4)).cuda()
for i in range(bs):
# Get tensor of booleans of valid annotated pixels (i.e. they
# have been assigned to only one class)
valid_idx = torch.sum(flat_gt[i, ...], dim=0) == 1
# TODO: Add a case for when valid_idx is empty, i.e. what
# happens if the image does not have annotated pixels?
# Get prediction and ground truth tensors, the shapes of the
# tensors are (1, nclasses, npixels)
y_true = flat_gt[i, :, valid_idx][None, :, :]
y_pred = flat_bin_pred[i, :, valid_idx][None, :, :]
# Accumulate the confusion matrix over the images in the batch
conf_mat += monai.metrics.get_confusion_matrix(y_pred, y_true)
if metric_name == 'confusion matrix':
retval = conf_mat
else:
# Compute the metric with MONAI
met = monai.metrics.compute_confusion_matrix_metric(metric_name,
conf_mat)[0]
# We might want to ignore some classes for whatever reason
idx2class = dl.OdsiDbDataLoader.OdsiDbDataset.classnames
class2idx = {y: x for x, y in idx2class.items()}
if ignore_labels:
ignore_labels = [class2idx[x] for x in ignore_labels]
for idx in ignore_labels:
met[idx] = float('nan')
# Mean metric over classes (ignoring classes whose metric is nan)
is_nan = torch.isnan(met)
met[is_nan] = 0
mean_over_classes = (met.sum() / (~is_nan).float().sum()).item()
retval = mean_over_classes
return retval
def odsi_db_accuracy(pred, gt, ignore_labels=[]):
return odsi_db_monai_metric(pred, gt, 'accuracy',
ignore_labels=ignore_labels)
def odsi_db_balanced_accuracy(pred, gt, ignore_labels=[]):
sens = odsi_db_monai_metric(pred, gt, 'sensitivity',
ignore_labels=ignore_labels)
spec = odsi_db_monai_metric(pred, gt, 'specificity',
ignore_labels=ignore_labels)
return .5 * sens + .5 * spec
#return odsi_db_monai_metric(pred, gt, 'balanced accuracy',
# ignore_labels=ignore_labels)
def odsi_db_sensitivity(pred, gt, ignore_labels=[]):
return odsi_db_monai_metric(pred, gt, 'sensitivity',
ignore_labels=ignore_labels)
def odsi_db_specificity(pred, gt, ignore_labels=[]):
return odsi_db_monai_metric(pred, gt, 'specificity',
ignore_labels=ignore_labels)
def odsi_db_precision(pred, gt, ignore_labels=[]):
return odsi_db_monai_metric(pred, gt, 'precision',
ignore_labels=ignore_labels)
def odsi_db_f1_score(pred, gt, ignore_labels=[]):
return odsi_db_monai_metric(pred, gt, 'f1 score',
ignore_labels=ignore_labels)
def odsi_db_conf_mat(pred, gt, ignore_labels=[]):
return odsi_db_monai_metric(pred, gt, 'confusion matrix',
ignore_labels=ignore_labels)
def odsi_db_ResNet_18_CAM_DS_accuracy(tuple_pred, raw_gt, ignore_labels=[],
thresh=0.5, pred_index=5):
"""
@brief Class-presence accuracy metric for images.
@details TP, TN, FP, FN explanation:
* True positive: the network estimates that the image contains pixels
of class K, and the image contains them.
* True negative: the network estimates that the image does not contain
pixels of class K, and the image does not contain
them.
* False positive: the network estimates that the image contains pixels
of class K, and the image does not contain them.
* False negative: the network estimates that the image does not contain
pixels of class K, but the image contains them.
Given an image, we have a vector of predictions, and a ground truth
vector. Each element of the vector represents a class, a 1 means that
the class is present in the image (i.e. there is at least one pixel
belonging to this class), and a 0 means that the class is not present
in the image.
Based on these two vectors (prediction and ground truth) we can build
a confusion matrix, and then compute an accuracy. This accuracy tells
you how good is the system in predicting the classes that are present
(and not present) in the image.
@param[in] tuple_pred Tuple of six elements, the first one is a tensor
containing the predicted class presence
log-probabilities, shape (R, B, K, 1, 1).
* B is the batch size.
* R will be equivalent to the number of resolution
levels + 1 (the global prediction).
* K is the number of classes.
The rest of the elements of the tuple are the
pseudo-segmentation predictions at different
resolutions.
@param[in] raw_gt Tensor of ground truth probabilities,
shape (B, C, H, W).
@param[in] thresh Threshold to convert soft probability predictions
into binary predictions.
@param[in] pred_index tuple_pred[0] contains a tensor of dimensions
(R, B, K, 1, 1), where each element of R contains
a class-presence prediction at a different
resolution.
The last item of R contains the global
prediction. Using this parameter you can choose
to use the class-presence prediction at any specific
resolution level (or the global one).
By default, pred_index=5 because
that is where the global prediction is
stored by the forward pass of the model.
@returns the average accuracy over the images in the batch.
"""
retval = None
with torch.no_grad():
# Get the classification predictions, we will not use the
# pseudo-segmentations to calculate the this metric
raw_pred = tuple_pred[0]
# Get dimensions
bs = raw_pred.shape[1]
classes = raw_gt.shape[1]
rl = raw_pred.shape[0] # Resolution levels + 1 (the global loss)
# Loop over the batch
acc = torch.cuda.FloatTensor(bs)
for i in range(bs):
# Build ground truth vector of class-presence probabilities from
# the segmentation ground truth that ODSI-DB provides
gt = torch.cuda.FloatTensor(classes).fill_(0)
for j in range(classes):
gt[j] = 1 if 1 in raw_gt[i, j] else 0
# Get the class-presence prediction vector, the five is to get the
# global prediction (computed as the sum of the side predictions)
soft_pred = torch.exp(torch.flatten(raw_pred[pred_index, i]))
pred = (soft_pred > thresh).float()
# Compute confusion matrix
tp = torch.sum(torch.logical_and(pred == 1, gt == 1))
tn = torch.sum(torch.logical_and(pred == 0, gt == 0))
fp = torch.sum(torch.logical_and(pred == 1, gt == 0))
fn = torch.sum(torch.logical_and(pred == 0, gt == 1))
# Compute "accuracy" of class-presence detection for this
# batch image
acc[i] = (tp + tn) / (tp + tn + fp + fn)
# Compute the mean accuracy over the images of the batch
retval = acc.mean().item()
return retval
def convert_tuple_pred_to_segmap(tuple_pred, raw_gt, pred_index):
"""
@brief Convert segmentation feature maps into log-probability maps.
"""
# Get "segmentation" feature maps
segmap = tuple_pred[1 + pred_index]
bs, classes, small_h, small_w = segmap.shape
_, _, big_h, big_w = raw_gt.shape
# Resize the "segmentation" feature maps to the original image size
segmap = torchvision.transforms.Resize((big_h, big_w),
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)(segmap)
# Apply sigmoid to convert the "segmentation" feature maps into
# "probability" maps
segmap = torch.nn.functional.logsigmoid(segmap)
return segmap
def odsi_db_ResNet_18_CAM_DS_pw_accuracy(tuple_pred, raw_gt, ignore_labels=[],
pred_index=0):
"""
@brief Evaluate the pixel-wise accuracy of the class-presence
classification model.
@param[in] tuple_pred See the comments in
odsi_db_ResNet_18_CAM_DS_accuracy().
@param[in] raw_gt See the comments in
odsi_db_ResNet_18_CAM_DS_accuracy().
@param[in] pred_index Indicates the resolution level where to take the
segmentation from, starting from zero with the
one of largest resolution.
@details The model and the method to obtain the segmentation is as
explained in:
Garcia-Peraza Herrera et al. 2020 "Intrapapillary capillary loop
classification in magnification endoscopy: open dataset and
baseline methodology".
"""
with torch.no_grad():
segmap = convert_tuple_pred_to_segmap(tuple_pred, raw_gt, pred_index)
return odsi_db_accuracy(segmap, raw_gt, ignore_labels)
def odsi_db_ResNet_18_CAM_DS_pw_balanced_accuracy(tuple_pred, raw_gt,
ignore_labels=[],
pred_index=0):
"""
@brief Evaluate the pixel-wise balanced accuracy of the class-presence
classification model.
@details The model and the method to obtain the segmentation is as
explained in:
Garcia-Peraza Herrera et al. 2020 "Intrapapillary capillary loop
classification in magnification endoscopy: open dataset and
baseline methodology".
"""
with torch.no_grad():
segmap = convert_tuple_pred_to_segmap(tuple_pred, raw_gt, pred_index)
return odsi_db_balanced_accuracy(segmap, raw_gt, ignore_labels)
if __name__ == '__main__':
raise RuntimeError('The metric.py module is not is not a script.')
| 17,446 | 37.944196 | 82 | py |
segodsidb | segodsidb-main/src/machine/machine.py | """
@brief Module that contains the learning machine for each problem/task.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 2 Jun 2021.
"""
import numpy as np
import torch
import torchvision.utils
# My imports
import torchseg.base
import torchseg.utils
class GenericMachine(torchseg.base.BaseMachine):
"""
@class Generic learning machine.
@param[in] model TODO
@param[in] criterion TODO
@param[in] metric_ftns TODO
@param[in] optimizer TODO
@param[in] config TODO
@param[in] device TODO
@param[in] data_loader TODO
@param[in] valid_data_loader TODO
@param[in] lr_scheduler TODO
@param[in] len_epoch TODO
@param[in] acc_steps Number of batches used to accumulate gradients.
Increasing this value is (almost) equivalent to increasing
the batch size, but without using more GPU memory. Obviously,
this will slow down the training.
"""
def __init__(self, model, criterion, metric_ftns, optimizer, config, device,
data_loader, valid_data_loader=None, lr_scheduler=None,
len_epoch=None, acc_steps=1):
super().__init__(model, criterion, metric_ftns, optimizer, config)
self.config = config
self.device = device
self.data_loader = data_loader
if len_epoch is None:
# Epoch-based training
self.len_epoch = len(self.data_loader)
else:
# Iteration-based training
self.data_loader = torchseg.utils.inf_loop(data_loader)
self.len_epoch = len_epoch
self.acc_steps = acc_steps
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = int(np.sqrt(data_loader.batch_size))
self.train_metrics = torchseg.utils.MetricTracker('loss',
*[m.__name__ for m in self.metric_ftns], writer=self.writer)
self.valid_metrics = torchseg.utils.MetricTracker('loss',
*[m.__name__ for m in self.metric_ftns], writer=self.writer)
def _train_epoch(self, epoch):
"""
@brief Training logic for an epoch.
@param[in] epoch Integer, current training epoch.
@returns a log that contains average loss and metric in this epoch.
"""
self.model.train()
self.train_metrics.reset()
self.data_loader.training = True
# Data structures for accumulated gradients
acc_batches = 0
acc_loss = 0
self.optimizer.zero_grad()
# For all the batches of this epoch
for batch_idx, raw_data in enumerate(self.data_loader):
# Machinery for accumulated gradients, which can be used by users when they want to
# use a larger batch size than what fits in GPU
acc_batches += 1
# Load batch (and expected output) into the GPU, len(data) here must be equal
# to your batch size
data = raw_data['image']
target = raw_data['label']
data, target = data.to(self.device), target.to(self.device)
#import cv2
#foo = raw_data['image'][0].numpy().transpose((1, 2, 0)).astype(
# np.uint8)[...,::-1]
#print('Foo:', foo.shape)
#cv2.imshow('image', foo)
#fool = raw_data['label'][0, 0, :, :].numpy() * 255
#cv2.imshow('label', fool)
#cv2.waitKey(0)
# Perform inference
output = self.model(data)
# Compute loss
loss = self.criterion(output, target) / self.acc_steps
# Update weights with backpropagation
loss.backward()
acc_loss += loss.item()
# Machinery for accumulated gradients
if acc_batches == self.acc_steps:
self.logger.debug('Train Epoch: {} {} LR: {} Accumulated loss: {:.6f}'.format(
epoch,
self._progress(batch_idx),
[ group['lr'] for group in self.optimizer.param_groups ],
acc_loss))
self.optimizer.step()
acc_batches = 0
acc_loss = 0
self.optimizer.zero_grad()
self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)
self.train_metrics.update('loss', loss.item() * self.acc_steps)
for met in self.metric_ftns:
self.train_metrics.update(met.__name__, met(output, target))
if batch_idx % self.log_step == 0:
self.logger.debug('Train Epoch: {} {} LR: {} Loss: {:.6f}'.format(
epoch,
self._progress(batch_idx),
[ group['lr'] for group in self.optimizer.param_groups ],
loss.item() * self.acc_steps))
# FIXME: Add image only if it has three channels
#self.writer.add_image('input',
# torchvision.utils.make_grid(data.cpu(), nrow=8,
# normalize=True))
if batch_idx == self.len_epoch:
break
log = self.train_metrics.result()
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{'val_' + k : v for k, v in val_log.items()})
if self.lr_scheduler is not None:
if type(self.lr_scheduler) == \
torch.optim.lr_scheduler.ReduceLROnPlateau:
#self.lr_scheduler.step(self.train_metrics.avg(self.mnt_metric))
self.lr_scheduler.step(val_log['loss'])
else:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
"""
@brief Validate after training an epoch.
@param[in] epoch Integer, current training epoch.
@returns A log that with the validation information.
"""
self.model.eval()
self.valid_metrics.reset()
self.data_loader.training = False
with torch.no_grad():
#for batch_idx, (data, target) in enumerate(self.valid_data_loader):
for batch_idx, raw_data in enumerate(self.data_loader):
data = raw_data['image']
target = raw_data['label']
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
print('Validation epoch:', epoch - 1)
print('len(self.valid_data_loader):', len(self.valid_data_loader))
print('batch_idx:', batch_idx)
self.writer.set_step((epoch - 1) \
* len(self.valid_data_loader) + batch_idx, 'valid')
self.valid_metrics.update('loss', loss.item())
for met in self.metric_ftns:
self.valid_metrics.update(met.__name__, met(output, target))
# FIXME: Add image only if it has three channels
#self.writer.add_image('input',
# torchvision.utils.make_grid(data.cpu(), nrow=8,
# normalize=True))
# Add histogram of model parameters to the tensorboard
# FIXME: disabled as it is not currently used
#for name, p in self.model.named_parameters():
# self.writer.add_histogram(name, p, bins='auto')
return self.valid_metrics.result()
def _progress(self, batch_idx):
"""
@brief TODO
@param[in] batch_idx TODO
"""
base = '[{}/{} ({:.0f}%)]'
if hasattr(self.data_loader, 'n_samples'):
current = batch_idx * self.data_loader.batch_size
total = self.data_loader.n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
| 8,241 | 38.435407 | 96 | py |
segodsidb | segodsidb-main/test/test_hyper2rgb.py | """
@brief Unit tests for the reconstruction of RGB images from hyperspectral data.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 9 Feb 2022.
"""
import unittest
import numpy as np
import colour
import scipy
# My imports
import torchseg.data_loader
class TestReconstructionMethods(unittest.TestCase):
def test_reconstruction_of_nuance_d65(self):
# Get function for the D65 illuminant
il = colour.SDS_ILLUMINANTS['D65']
f_illum = scipy.interpolate.PchipInterpolator(il.wavelengths, il.values, extrapolate=True)
# Generate a hyperspectral image of 51 bands containing the D65 illuminant
min_wl = 450
max_wl = 950
nbands = max_wl - min_wl
bands = np.linspace(min_wl, max_wl, nbands)
h = 1
w = 1
im_hyper = np.empty((h, w, nbands))
im_hyper[:, :, :] = f_illum(bands)
# Convert illuminant image to RGB
image_loader = torchseg.data_loader.OdsiDbDataLoader.LoadImage
im_rgb = image_loader.hyper2rgb(im_hyper, bands)
print(im_rgb)
print(im_rgb.shape)
if __name__ == '__main__':
unittest.main()
| 1,182 | 27.166667 | 98 | py |
segodsidb | segodsidb-main/test/test_metric.py | """
@brief Unit tests for the reconstruction of RGB images from hyperspectral data.
@author Luis Carlos Garcia Peraza Herrera (luiscarlos.gph@gmail.com).
@date 9 Feb 2022.
"""
import unittest
import numpy as np
import colour
import scipy
# My imports
import torchseg.data_loader
class TestReconstructionMethods(unittest.TestCase):
def test_reconstruction_of_nuance_d65(self):
# Get function for the D65 illuminant
il = colour.SDS_ILLUMINANTS['D65']
f_illum = scipy.interpolate.PchipInterpolator(il.wavelengths, il.values, extrapolate=True)
# Generate a hyperspectral image of 51 bands containing the D65 illuminant
min_wl = 450
max_wl = 950
nbands = max_wl - min_wl
bands = np.linspace(min_wl, max_wl, nbands)
h = 1
w = 1
im_hyper = np.empty((h, w, nbands))
im_hyper[:, :, :] = f_illum(bands)
# Convert illuminant image to RGB
image_loader = torchseg.data_loader.OdsiDbDataLoader.LoadImage
im_rgb = image_loader.hyper2rgb(im_hyper, bands)
print(im_rgb)
print(im_rgb.shape)
if __name__ == '__main__':
unittest.main()
| 1,182 | 27.166667 | 98 | py |
DiffProxy | DiffProxy-main/generate.py | import os
import os.path as pth
import json
import numpy as np
import torch
from sbs_generators import generator_lookup_table
from differentiable_generator import StyleGANCond
from utils import read_image, write_image
sat_dir = 'C:/Program Files/Allegorithmic/Substance Automation Toolkit'
def compare2real():
generator_name = 'arc_pavement'
out_dir = pth.join('./pretrained/synthesis', generator_name)
graph_filename = f'./data/sbs/{generator_name}16.sbs'
model_path = './pretrained/arc_pavement_20000_nogan.pkl'
json_input = None
image_res = 256
n_samples = 32
if pth.exists(out_dir) is False:
os.makedirs(out_dir)
sampler = generator_lookup_table[generator_name](graph_filename, 'generator', sat_dir, image_res)
if json_input is None:
sampler.sample(out_dir, n_samples=n_samples, vis_every=1)
json_file = pth.join(out_dir, 'dataset.json')
with open(json_file) as f:
sampled_params = json.load(f)['labels']
else:
sampler.sample_with_json(out_dir, json_input)
with open(json_input) as f:
sampled_params = json.load(f)['labels']
init = {'method': 'avg'}
G = StyleGANCond(generator_name, model_path, init, model_type='norm')
for params in sampled_params:
image_name = pth.basename(params[0])
real_np = read_image(pth.join(out_dir, image_name))
parameters = params[1]
p = torch.as_tensor(parameters, dtype=torch.float64, device=G.device).unsqueeze(0)
comp = real_np.copy()
G.set_params(p)
fake = G().detach().squeeze().cpu().numpy()
comp = np.concatenate((comp, fake), axis=1)
write_image(pth.join(out_dir, f'fake_{image_name}'), fake)
write_image(pth.join(out_dir, f'comp_{image_name}'), comp)
if __name__ == "__main__":
compare2real() | 1,856 | 33.388889 | 101 | py |
DiffProxy | DiffProxy-main/sbs_generators.py | import os
import os.path as pth
import numbers
import shutil
import glob
import random
import torch
import numpy as np
from abc import ABC, abstractmethod
import xml.etree.ElementTree as ET
import json
import subprocess
from collections import OrderedDict
from utils import Timer, read_image, write_image
class SimpleSBSGraph:
def __init__(self, sbs_file_name, params):
self.sbs_file_name = sbs_file_name
self.params = params
self.xml_tree, self.n_nodes, self.node_params_dict = self.parse()
@staticmethod
def set_param(param_val_xml, val, is_int):
if isinstance(val, (list, tuple)):
if not all(isinstance(x, numbers.Number) for x in val):
raise RuntimeError('Unknown parameter type.')
param_val_str = ' '.join(str(x) for x in val)
else:
raise RuntimeError(f'Unknown parameter type: {type(val)}')
param_tag = f'constantValueInt{len(val)}' if is_int else f'constantValueFloat{len(val)}'
if len(val) == 1 and is_int:
param_tag = 'constantValueInt32'
param_val_xml_ = param_val_xml.find(param_tag)
if param_val_xml_ is None:
ET.SubElement(param_val_xml, param_tag).set('v', param_val_str)
else:
param_val_xml_.set('v', param_val_str)
def parse(self):
# Parse XML file
doc_xml = ET.parse(self.sbs_file_name)
graph_xml = doc_xml.getroot().find('content/graph')
# find graph outputs
graph_outputs_by_uid = {}
for output_ in graph_xml.iter('graphoutput'):
output_name = output_.find('identifier').get('v')
output_uid = output_.find('uid').get('v')
graph_outputs_by_uid[output_uid] = output_name
n_nodes = 0
node_params_dict = OrderedDict()
output_name_list = []
# check generator nodes
for node_xml in graph_xml.iter('compNode'):
node_uid = int(node_xml.find('uid').get('v'))
node_imp = node_xml.find('compImplementation')[0]
if node_imp.tag == 'compInstance':
node_params = {}
for param_xml in node_imp.iter('parameter'):
param_name = param_xml.find('name').get('v')
if param_name in self.params:
node_params[param_name] = param_xml.find('paramValue')
# add unregistered params to nodes
unregistered_param_names = set(self.params) - set(node_params)
print(f"In Node {n_nodes}, found registered params:{set(node_params)}")
print(f"In Node {n_nodes}, found unregistered params: {unregistered_param_names}")
params = node_imp.find('parameters')
for param_name in unregistered_param_names:
param_xml = ET.SubElement(params, 'parameter')
ET.SubElement(param_xml, 'name').set('v', param_name)
param_val_xml = ET.SubElement(param_xml, 'paramValue')
self.set_param(param_val_xml,
val=self.params[param_name].default_val,
is_int=self.params[param_name].is_discrete)
node_params[param_name] = param_val_xml
n_nodes += 1
node_params_dict[node_uid] = {'params': node_params, 'name': None}
elif node_imp.tag == 'compOutputBridge':
pass
else:
raise NotImplementedError(f'This simple sbs parse cannot recognize this types of node: {node_imp.tag}')
for node_xml in graph_xml.iter('compNode'):
node_imp = node_xml.find('compImplementation')[0]
if node_imp.tag == 'compInstance':
pass
elif node_imp.tag == 'compOutputBridge':
output_uid = node_imp.find('output').get('v')
output_name_list.append(graph_outputs_by_uid[output_uid])
connections = node_xml.findall('connections/connection')
if len(connections) != 1:
raise RuntimeError('A output node is not connected.')
gen_uid = int(connections[0].find('connRef').get('v'))
if gen_uid not in node_params_dict:
raise RuntimeError('Cannot find input generator node for this output node.')
node_params_dict[gen_uid]['name'] = graph_outputs_by_uid[output_uid]
else:
raise NotImplementedError(f'This simple sbs parse cannot recognize this types of node: {node_imp.tag}')
return doc_xml, n_nodes, node_params_dict
class Sampler(ABC):
@abstractmethod
def sample(self):
pass
@abstractmethod
def size(self):
pass
class RandomSampler(Sampler):
def __init__(self, min_val, max_val, default_val=None, is_discrete=False):
self.min_val = min_val
self.max_val = max_val
self.default_val = default_val if default_val is not None else self.min_val
self.is_discrete = is_discrete
if is_discrete:
self.func = random.randint
else:
self.func = random.uniform
def sample(self):
val = []
for min_v, max_v in zip(self.min_val, self.max_val):
val.append(self.func(min_v, max_v))
return val
def size(self):
return len(self.min_val)
class GaussianRandomSampler(Sampler):
def __init__(self, min_val, max_val, mean_val, std_val, default_val=None, is_discrete=False):
self.min_val = min_val
self.max_val = max_val
self.mean_val = mean_val
self.std_val = std_val
self.default_val = default_val if default_val is not None else mean_val
self.is_discrete = is_discrete
def get_sample_np(self):
val = np.random.normal(self.mean_val, self.std_val)
val = np.clip(val, self.min_val, self.max_val)
return val
def sample(self):
val = self.get_sample_np()
if self.is_discrete:
val = np.rint(val).astype(np.int)
return val.tolist()
def size(self):
return len(self.mean_val)
class ParameterNormalizer:
def __init__(self, min_, max_):
self.min_ = min_.clone()
self.max_ = max_.clone()
self.range = self.max_ - self.min_
def normalize(self, x):
return torch.nan_to_num((x - self.min_) / self.range)
def denormalize(self, x):
return x * self.range + self.min_
def __str__(self):
return 'Parameter Normalizer'
class ParameterStandarizer:
def __init__(self, mean, std):
self.mean = mean.clone()
self.std = std.clone()
def normalize(self, x):
return torch.nan_to_num((x - self.mean) / self.std)
def denormalize(self, x):
return x * self.std + self.mean
def __str__(self):
return 'Parameter Standarizer'
class ParameterRegularizer:
def __init__(self, min_, max_):
self.min_ = min_.clone()
self.max_ = max_.clone()
def regularize(self, x):
return torch.clamp(x, self.min_, self.max_)
def regularize_(self, x):
x.clamp_(self.min_, self.max_)
def check_valid(self, x):
all_min = x >= self.min_
if not torch.all(all_min):
l = x.shape[1]
for k in range(l):
i, j = x[0, k], self.min_[0, k]
if i < j:
print(f'For {k}th params: {i} < {j}')
raise RuntimeError('Invalid parameters')
all_max = x <= self.max_
if not torch.all(all_max):
l = x.shape[1]
for k in range(l):
i, j = x[0, k], self.max_[0, k]
if i > j:
print(f'For {k}th params: {i} > {j}')
raise RuntimeError('Invalid parameters')
def get_normalizer(generator_name, normalization_type, batch_size, device):
if normalization_type == 'norm':
params = generator_lookup_table[generator_name].get_params()
normalizer = SBSGenerators.get_normalizer(params, batch_size, device)
elif normalization_type == 'std':
params = generator_lookup_table[generator_name].get_params()
normalizer = SBSGenerators.get_standarizer(params, batch_size, device)
else:
normalizer = None
return normalizer
class SBSGenerators:
@staticmethod
def get_params():
pass
@staticmethod
def get_normalizer(params, batch_size, device):
min_, max_ = [], []
for param_name, param_sampler in params.items():
min_val = param_sampler.min_val
max_val = param_sampler.max_val
min_.extend(min_val)
max_.extend(max_val)
min_tensor = torch.as_tensor(min_, dtype=torch.float64, device=device)
max_tensor = torch.as_tensor(max_, dtype=torch.float64, device=device)
min_tensor = min_tensor.expand((batch_size, -1))
max_tensor = max_tensor.expand((batch_size, -1))
return ParameterNormalizer(min_tensor, max_tensor)
@staticmethod
def get_standarizer(params, batch_size, device):
mean, std = [], []
for param_name, param_sampler in params.items():
mean_v = param_sampler.mean_val
std_v = param_sampler.std_val
mean.extend(mean_v)
std.extend(std_v)
mean_tensor = torch.as_tensor(mean, dtype=torch.float64, device=device)
std_tensor = torch.as_tensor(std, dtype=torch.float64, device=device)
mean_tensor = mean_tensor.expand((batch_size, -1))
std_tensor = std_tensor.expand((batch_size, -1))
return ParameterStandarizer(mean_tensor, std_tensor)
@staticmethod
def get_regularizer(params, batch_size, device):
min_, max_ = [], []
for param_name, param_sampler in params.items():
min_val = param_sampler.min_val
max_val = param_sampler.max_val
min_.extend(min_val)
max_.extend(max_val)
min_tensor = torch.as_tensor(min_, dtype=torch.float64, device=device)
max_tensor = torch.as_tensor(max_, dtype=torch.float64, device=device)
min_tensor = min_tensor.expand((batch_size, -1))
max_tensor = max_tensor.expand((batch_size, -1))
return ParameterRegularizer(min_tensor, max_tensor)
def __init__(self, graph_filename, graph_name, sat_dir, image_res):
self.grah_filename = graph_filename
self.graph_name = graph_name
self.sat_dir = sat_dir
self.image_res = image_res
# load parameters
self.params = self.get_params()
# load graph
self.graph = SimpleSBSGraph(graph_filename, self.params)
@staticmethod
def save_params(all_params, all_image_names, output_dir, i_batch):
assert len(all_params) == len(all_image_names)
data = dict()
data['labels'] = []
for params, image_name in zip(all_params, all_image_names):
data['labels'].append([image_name, params])
with open(pth.join(output_dir, f'dataset{i_batch}.json'), 'w') as outfile:
json.dump(data, outfile, indent=4)
@staticmethod
def combine_params(input_path, n_batch, move_to_folder=None):
assert (n_batch >= 1)
if move_to_folder is not None:
os.makedirs(move_to_folder, exist_ok=True)
data_path = pth.join(input_path, 'dataset0.json')
with open(data_path) as f:
data = json.load(f)
if move_to_folder is not None:
shutil.move(data_path, pth.join(move_to_folder, f'dataset0.json'))
for i in range(1, n_batch):
data_path = pth.join(input_path, f'dataset{i}.json')
with open(data_path) as f:
data_i = json.load(f)
data['labels'].extend(data_i['labels'])
if move_to_folder is not None:
shutil.move(data_path, pth.join(move_to_folder, f'dataset{i}.json'))
output_path = pth.join(input_path, 'dataset.json')
with open(output_path, 'w') as outfile:
json.dump(data, outfile, indent=4)
def sample(self, output_dir, n_samples, vis_every):
n_samples = n_samples // self.graph.n_nodes * self.graph.n_nodes
n_batch = n_samples // self.graph.n_nodes
timer = Timer()
timer.begin("Begin Sampling")
for i in range(n_batch):
params_list = []
# sample parameters
for nodes in self.graph.node_params_dict.values():
params = []
for param_name, param_sampler in self.params.items():
val = param_sampler.sample()
self.graph.set_param(nodes['params'][param_name], val, param_sampler.is_discrete)
params.extend(val)
params_list.append(params)
# save sbs
output_graph_filename = pth.join(output_dir, f'tmp{i}.sbs')
self.save_graph(output_graph_filename)
image_names_list = self.save_sample(output_graph_filename, output_dir, i)
self.save_params(params_list, image_names_list, output_dir, i)
if i % vis_every == 0 or i == n_batch - 1:
timer.end(f'Generated {(i+1)*self.graph.n_nodes}/{n_samples} samples')
timer.begin()
# combine parameter json into one file
self.combine_params(output_dir, n_batch, move_to_folder=pth.join(output_dir, 'params'))
# move generated sbs and sbsar files to an sbs folder
sbs_files = glob.glob(pth.join(output_dir, '*.sbs')) + glob.glob(pth.join(output_dir, '*.sbsar'))
sbs_out_dir = pth.join(output_dir, 'sbs')
if pth.exists(sbs_out_dir):
shutil.rmtree(sbs_out_dir)
os.makedirs(sbs_out_dir)
for sbs_file in sbs_files:
shutil.move(sbs_file, sbs_out_dir)
def sample_with_json(self, output_dir, json_file):
if isinstance(json_file, str):
with open(json_file) as f:
params = json.load(f)['labels']
else:
params = json_file
n_samples = len(params)
n_batch = n_samples // self.graph.n_nodes
timer = Timer()
timer.begin("Begin Sampling")
for i in range(n_batch):
image_names = []
for k, node in enumerate(self.graph.node_params_dict.values()):
idx = i * self.graph.n_nodes + k
# set parameters
s = 0
for param_name, param_sampler in self.params.items():
r = param_sampler.size()
val = [int(np.rint(x)) if param_sampler.is_discrete else x for x in params[idx][1][s:s + r]]
self.graph.set_param(node['params'][param_name], val, param_sampler.is_discrete)
s += r
# record image name
image_name = pth.join(output_dir, pth.basename(params[idx][0]))
image_names.append(image_name)
# save sbs
output_graph_filename = pth.join(output_dir, f'tmp{i}.sbs')
self.save_graph(output_graph_filename)
self.save_sample(output_graph_filename, output_dir, i, image_names)
# move generated sbs and sbsar files to an sbs folder
sbs_files = glob.glob(pth.join(output_dir, '*.sbs')) + glob.glob(pth.join(output_dir, '*.sbsar'))
sbs_out_dir = pth.join(output_dir, 'sbs')
if pth.exists(sbs_out_dir):
shutil.rmtree(sbs_out_dir)
os.makedirs(sbs_out_dir)
for sbs_file in sbs_files:
shutil.move(sbs_file, sbs_out_dir)
# save sbs graph back to an sbs file
def save_graph(self, output_graph_filename):
self.graph.xml_tree.write(output_graph_filename)
# cook and output images
def save_sample(self, input_graph_filename, output_dir, i_batch, image_names=None):
tmp_output_dir = pth.join(output_dir, 'tmp')
os.makedirs(tmp_output_dir, exist_ok=True)
command_cooker = (
f'"{os.path.join(self.sat_dir, "sbscooker")}" '
f'--inputs "{input_graph_filename}" '
f'--alias "sbs://{os.path.join(self.sat_dir, "resources", "packages")}" '
f'--output-path {{inputPath}}')
completed_process = subprocess.run(command_cooker, shell=True, capture_output=True, text=True)
if completed_process.returncode != 0:
raise RuntimeError(f'Error while running sbs cooker:\n{completed_process.stderr}')
# import pdb; pdb.set_trace()
cooked_input_graph_filename = pth.splitext(input_graph_filename)[0] + '.sbsar'
image_format = 'png'
command_render = (
f'"{os.path.join(self.sat_dir, "sbsrender")}" render '
f'--inputs "{cooked_input_graph_filename}" '
f'--input-graph "{self.graph_name}" '
f'--output-format "{image_format}" '
f'--output-path "{tmp_output_dir}" '
f'--output-name "{{outputNodeName}}"')
completed_process = subprocess.run(command_render, shell=True, capture_output=True, text=True)
if completed_process.returncode != 0:
raise RuntimeError(f'Error while running sbs render:\n{completed_process.stderr}')
image_list = [pth.join(tmp_output_dir, f'{node["name"]}.png') for node in self.graph.node_params_dict.values()]
assert len(image_list) == self.graph.n_nodes
image_names_list = []
dir_name = pth.basename(output_dir)
for i, image_filename in enumerate(image_list):
if image_names is None:
image_name = pth.join(output_dir, '{:08d}.png'.format(i_batch*self.graph.n_nodes + i))
else:
image_name = image_names[i]
shutil.move(image_filename, image_name)
# convert to 8bit
self.convert(image_name)
image_name = f'{dir_name}/{pth.basename(image_name)}'
image_names_list.append(image_name)
os.rmdir(tmp_output_dir)
return image_names_list
@staticmethod
def convert(image_file):
im = read_image(image_file)
write_image(image_file, im)
class ArcPavement(SBSGenerators):
@staticmethod
def get_params():
params = OrderedDict([('pattern_amount', RandomSampler((4,), (32,), (12,), True)),
('arcs_amount', RandomSampler((4,), (20,), (14,), True)),
('pattern_scale', RandomSampler((0.9,), (1.0,), (1.0,), False)),
('pattern_width', RandomSampler((0.7,), (0.9,), (0.8,), False)),
('pattern_height', RandomSampler((0.8,), (1.0,), (0.9,), False)),
('pattern_width_random', RandomSampler((0.0,), (0.2,), (0.0,), False)),
('pattern_height_random', RandomSampler((0.0,), (0.2,), (0.0,), False)),
('global_pattern_width_random', RandomSampler((0.0,), (0.2,), (0.0,), False)),
('pattern_height_decrease', RandomSampler((0.0,), (0.5,), (0.25,), False)),
('color_random', RandomSampler((0.0,), (1.0,), (0.0,), False)),
])
return params
generator_lookup_table = {'arc_pavement': ArcPavement,
}
| 19,510 | 37.107422 | 119 | py |
DiffProxy | DiffProxy-main/vgg.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
vgg_path = 'pretrained/vgg_conv.pt'
# vgg definition that conveniently let's you grab the outputs from any layer
class VGG(nn.Module):
def __init__(self, pool='max'):
super(VGG, self).__init__()
# vgg modules
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
if pool == 'max':
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)
elif pool == 'avg':
self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool3 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool4 = nn.AvgPool2d(kernel_size=2, stride=2)
self.pool5 = nn.AvgPool2d(kernel_size=2, stride=2)
self.normalizer = VGG19Normalizer()
def forward(self, x, out_keys):
out = {}
out['r11'] = F.relu(self.conv1_1(x))
out['r12'] = F.relu(self.conv1_2(out['r11']))
out['p1'] = self.pool1(out['r12'])
out['r21'] = F.relu(self.conv2_1(out['p1']))
out['r22'] = F.relu(self.conv2_2(out['r21']))
out['p2'] = self.pool2(out['r22'])
out['r31'] = F.relu(self.conv3_1(out['p2']))
out['r32'] = F.relu(self.conv3_2(out['r31']))
out['r33'] = F.relu(self.conv3_3(out['r32']))
out['r34'] = F.relu(self.conv3_4(out['r33']))
out['p3'] = self.pool3(out['r34'])
out['r41'] = F.relu(self.conv4_1(out['p3']))
out['r42'] = F.relu(self.conv4_2(out['r41']))
out['r43'] = F.relu(self.conv4_3(out['r42']))
out['r44'] = F.relu(self.conv4_4(out['r43']))
out['p4'] = self.pool4(out['r44'])
out['r51'] = F.relu(self.conv5_1(out['p4']))
out['r52'] = F.relu(self.conv5_2(out['r51']))
out['r53'] = F.relu(self.conv5_3(out['r52']))
out['r54'] = F.relu(self.conv5_4(out['r53']))
out['p5'] = self.pool5(out['r54'])
return [out[key] for key in out_keys]
def extract_features(self, im, feature_layers, style_layers, detach_features=False, detach_styles=False):
if im.shape[1] == 1:
im = torch.cat((im, im, im), dim=1)
x = self.normalizer.normalize(im)
out = {}
out['r11'] = F.relu(self.conv1_1(x))
out['r12'] = F.relu(self.conv1_2(out['r11']))
out['p1'] = self.pool1(out['r12'])
out['r21'] = F.relu(self.conv2_1(out['p1']))
out['r22'] = F.relu(self.conv2_2(out['r21']))
out['p2'] = self.pool2(out['r22'])
out['r31'] = F.relu(self.conv3_1(out['p2']))
out['r32'] = F.relu(self.conv3_2(out['r31']))
out['r33'] = F.relu(self.conv3_3(out['r32']))
out['r34'] = F.relu(self.conv3_4(out['r33']))
out['p3'] = self.pool3(out['r34'])
out['r41'] = F.relu(self.conv4_1(out['p3']))
out['r42'] = F.relu(self.conv4_2(out['r41']))
out['r43'] = F.relu(self.conv4_3(out['r42']))
out['r44'] = F.relu(self.conv4_4(out['r43']))
out['p4'] = self.pool4(out['r44'])
out['r51'] = F.relu(self.conv5_1(out['p4']))
out['r52'] = F.relu(self.conv5_2(out['r51']))
out['r53'] = F.relu(self.conv5_3(out['r52']))
out['r54'] = F.relu(self.conv5_4(out['r53']))
out['p5'] = self.pool5(out['r54'])
if not detach_features:
extracted_features = [out[layer] for layer in feature_layers]
else:
extracted_features = [out[layer].detach() for layer in feature_layers]
if not detach_styles:
extracted_styles = [gram(out[layer]) for layer in style_layers]
else:
extracted_styles = [gram(out[layer]).detach() for layer in style_layers]
return extracted_features, extracted_styles
# get network
def get_vgg19():
vgg = VGG()
vgg.load_state_dict(torch.load(vgg_path))
for param in vgg.parameters():
param.requires_grad = False
if torch.cuda.is_available():
vgg.cuda()
return vgg
def gram(x):
b, c, h, w = x.shape
F = x.view(b, c, h*w)
G = torch.bmm(F, F.transpose(1,2))
G.div_(h*w)
return G
class WeightedLoss(nn.Module):
def __init__(self, weights, metric='l2'):
super(WeightedLoss, self).__init__()
self.weights = weights
if metric == 'l2':
self.criterion = nn.MSELoss().cuda()
elif metric == 'l1':
self.criterion = nn.L1Loss().cuda()
else:
raise NotImplementedError('Unknown metric {}'.format(metric))
def forward(self, x, y):
loss = torch.tensor(0.0, requires_grad=True).cuda()
for w, x_, y_ in zip(self.weights, x, y):
loss = loss + w * self.criterion(x_, y_)
return loss
class VGG19Normalizer(nn.Module):
def __init__(self):
super(VGG19Normalizer,self).__init__()
imagenet_mean = [0.40760392, 0.45795686, 0.48501961]
self.mean = torch.as_tensor(imagenet_mean)[None, :, None, None].cuda()
def normalize(self, image):
image = image * 0.5 + 0.5 # (-1, 1) to (0, 1)
image = image[:, [2, 1, 0], :, :] # turn to BGR
image = image - self.mean # subtract imagenet mean
image = image * 255.0
return image
def denormalize(self, image):
image = image / 255.0
image = image + self.mean # add imagenet mean
image = image[:, [2, 1, 0], :, :] # turn to RGB
image = image * 2.0 - 1.0 # (0, 1) to (-1, 1)
return image
| 6,808 | 40.266667 | 109 | py |
DiffProxy | DiffProxy-main/loss.py | import torch.nn as nn
import vgg
from training.loss import *
class ProxyLoss:
def __init__(self, device, G, D, augment_pipe=None, r1_gamma=10, style_mixing_prob=0, pl_weight=0, pl_batch_shrink=2, pl_decay=0.01, pl_no_weight_grad=False, blur_init_sigma=0, blur_fade_kimg=0):
super().__init__()
self.device = device
self.G = G
# l1 loss
self.l1_criterion = nn.L1Loss()
# VGG loss
self.vgg19 = vgg.get_vgg19()
self.style_layers = ['r11', 'r21', 'r31', 'r41', 'r51']
self.style_weights = [1 / n ** 2 for n in [64, 128, 256, 512, 512]]
self.feature_layers = ['r22', 'r32', 'r42']
self.feature_weights = [1e-3, 1e-3, 1e-3]
self.criterion_feature = vgg.WeightedLoss(self.feature_weights, metric='l1')
self.criterion_style = vgg.WeightedLoss(self.style_weights, metric='l1')
self.feat_w = 10.0
self.style_w = 1.0
print(f"Loss Config: l1_w = 1, feat_w = {self.feat_w}, style_w = {self.style_w}")
def run_G(self, z, c, update_emas=False):
ws = self.G.mapping(z, c, update_emas=update_emas)
img = self.G.synthesis(ws, update_emas=update_emas)
return img, ws
def accumulate_gradients(self, real_img, real_c, gen_z):
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, real_c)
# l1 loss
loss_l1 = self.l1_criterion(gen_img, real_img)
training_stats.report('Loss/L1/loss', loss_l1)
# VGG loss
real_feat, real_style = self.vgg19.extract_features(real_img, self.feature_layers, self.style_layers,
detach_features=True, detach_styles=True)
recon_feat, recon_style = self.vgg19.extract_features(gen_img, self.feature_layers, self.style_layers)
feature_loss = self.criterion_feature(real_feat, recon_feat) * self.feat_w
style_loss = self.criterion_style(real_style, recon_style) * self.style_w
training_stats.report('Loss/Feat/loss', feature_loss)
training_stats.report('Loss/Style/loss', style_loss)
loss = loss_l1 + feature_loss + style_loss
training_stats.report('Loss/G/loss', loss)
with torch.autograd.profiler.record_function('Gmain_backward'):
loss.backward()
class ProxyGANLoss:
def __init__(self, device, G, D, augment_pipe=None, r1_gamma=10, style_mixing_prob=0, pl_weight=0, pl_batch_shrink=2, pl_decay=0.01, pl_no_weight_grad=False, blur_init_sigma=0, blur_fade_kimg=0):
super().__init__()
self.device = device
self.G = G
self.D = D
self.r1_gamma = r1_gamma
self.pl_weight = 0 # do not include path length regularization
self.blur_init_sigma = blur_init_sigma
self.blur_fade_kimg = blur_fade_kimg
# l1 loss
self.l1_criterion = nn.L1Loss()
# VGG loss
self.vgg19 = vgg.get_vgg19()
self.style_layers = ['r11', 'r21', 'r31', 'r41', 'r51']
self.style_weights = [1 / n ** 2 for n in [64, 128, 256, 512, 512]]
self.feature_layers = ['r22', 'r32', 'r42']
self.feature_weights = [1e-3, 1e-3, 1e-3]
self.criterion_feature = vgg.WeightedLoss(self.feature_weights, metric='l1')
self.criterion_style = vgg.WeightedLoss(self.style_weights, metric='l1')
self.l1_w = 1.0
self.feat_w = 1.0
self.style_w = 1.0
self.gan_w = 0.1
print(f"Loss Config: l1_w = {self.l1_w}, feat_w = {self.feat_w}, style_w = {self.style_w}, gan_w = {self.gan_w}")
def run_G(self, z, c, update_emas=False):
ws = self.G.mapping(z, c, update_emas=update_emas)
img = self.G.synthesis(ws, update_emas=update_emas)
return img, ws
def run_D(self, img, c, blur_sigma=0, update_emas=False):
blur_size = np.floor(blur_sigma * 3)
if blur_size > 0:
with torch.autograd.profiler.record_function('blur'):
f = torch.arange(-blur_size, blur_size + 1, device=img.device).div(blur_sigma).square().neg().exp2()
img = upfirdn2d.filter2d(img, f / f.sum())
logits = self.D(img, c, update_emas=update_emas)
return logits
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gain, cur_nimg):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']
if self.pl_weight == 0:
phase = {'Greg': 'none', 'Gboth': 'Gmain'}.get(phase, phase)
if self.r1_gamma == 0:
phase = {'Dreg': 'none', 'Dboth': 'Dmain'}.get(phase, phase)
blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), 0) * self.blur_init_sigma if self.blur_fade_kimg > 0 else 0
# Gmain: Maximize logits for generated images.
if phase in ['Gmain', 'Gboth']:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, real_c)
gen_logits = self.run_D(gen_img, real_c, blur_sigma=blur_sigma)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
# l1 loss
loss_l1 = self.l1_criterion(gen_img, real_img) * self.l1_w
training_stats.report('Loss/L1/loss', loss_l1)
# VGG loss
real_feat, real_style = self.vgg19.extract_features(real_img, self.feature_layers, self.style_layers,
detach_features=True, detach_styles=True)
recon_feat, recon_style = self.vgg19.extract_features(gen_img, self.feature_layers, self.style_layers)
feature_loss = self.criterion_feature(real_feat, recon_feat) * self.feat_w
style_loss = self.criterion_style(real_style, recon_style) * self.style_w
training_stats.report('Loss/Feat/loss', feature_loss)
training_stats.report('Loss/Style/loss', style_loss)
# GAN loss
loss_G = torch.nn.functional.softplus(-gen_logits).mean()*self.gan_w # -log(sigmoid(gen_logits))
training_stats.report('Loss/GAN/loss', loss_G)
loss_Gmain = loss_G + loss_l1 + feature_loss + style_loss
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
loss_Gmain.mul(gain).backward()
# loss_Gmain.mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if phase in ['Dmain', 'Dboth']:
with torch.autograd.profiler.record_function('Dgen_forward'):
gen_img, _gen_ws = self.run_G(gen_z, real_c, update_emas=True)
gen_logits = self.run_D(gen_img, real_c, blur_sigma=blur_sigma, update_emas=True)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if phase in ['Dmain', 'Dreg', 'Dboth']:
name = 'Dreal' if phase == 'Dmain' else 'Dr1' if phase == 'Dreg' else 'Dreal_Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(phase in ['Dreg', 'Dboth'])
real_logits = self.run_D(real_img_tmp, real_c, blur_sigma=blur_sigma)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if phase in ['Dmain', 'Dboth']:
loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if phase in ['Dreg', 'Dboth']:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(loss_Dreal + loss_Dr1).mean().mul(gain).backward()
| 9,171 | 50.52809 | 199 | py |
DiffProxy | DiffProxy-main/differentiable_generator.py | import sys
import os.path as pth
import torch.nn as nn
import torch
import pickle
import cv2
from utils import read_image
from sbs_generators import generator_lookup_table, SBSGenerators
sys.path.append('stylegan')
sat_dir = 'C:/Program Files/Allegorithmic/Substance Automation Toolkit'
sbs_lib = {'arc_pavement': './data/sbs/arc_pavement.sbs'}
def get_real_generator(generator_name):
if generator_name in sbs_lib:
graph_filename = sbs_lib[generator_name]
return generator_lookup_table[generator_name](graph_filename, 'generator', sat_dir, 256)
else:
raise RuntimeError(f'Cannot find {generator_name}\'s real signature')
class DifferentiableGenerator(nn.Module):
def output(self, *arg):
return self.forward()
def sample_params(self):
return None
def get_params(self):
return None
def set_params(self, params):
pass
def regularize(self):
pass
def get_optimizable(self):
raise NotImplementedError
class StyleGANCond(DifferentiableGenerator):
@staticmethod
def load_pretrained_model(model_path):
with open(model_path, 'rb') as f:
dat = pickle.load(f)
if 'G_ema' in dat:
G = dat['G_ema']
else:
G = dat['G']
G = G.eval().requires_grad_(False).cuda() # torch.nn.Module
return G
def get_normalizer(self):
params = generator_lookup_table[self.generator_name].get_params()
normalizer = SBSGenerators.get_normalizer(params, batch_size=1, device=self.device)
return normalizer
def get_standarizer(self):
params = generator_lookup_table[self.generator_name].get_params()
normalizer = SBSGenerators.get_standarizer(params, batch_size=1, device=self.device)
return normalizer
def get_reguluarizer(self):
params = generator_lookup_table[self.generator_name].get_params()
regularizer = SBSGenerators.get_regularizer(params, batch_size=1, device=self.device)
return regularizer
def init_avg(self):
assert self.normalization is not None
if self.normalization == 'norm':
self.set_params((self.normalizer.min_ + self.normalizer.max_)*0.5)
else:
self.set_params(self.normalizer.mean)
def init_rand(self):
params = self.sample_params()
self.set_params(params)
def sample_params(self):
if self.normalization == 'norm':
params = torch.rand_like(self.normalizer.min_) * (self.normalizer.max_ - self.normalizer.min_) + self.normalizer.min_
else:
params = torch.randn_like(self.normalizer.mean) * self.normalizer.std + self.normalizer.mean
self.regularizer.regularize_(params)
return params
def init_direct(self, init_params):
params = torch.as_tensor(init_params, device=self.device, dtype=torch.float64).unsqueeze(0)
assert params.ndim == 2
self.set_params(params)
def __init__(self, generator_name, model_path, init, model_type='none'):
super(StyleGANCond, self).__init__()
self.model_path = model_path
self.generator_name = generator_name
self.init = init
self.G = self.load_pretrained_model(model_path)
self.img_resolution = self.G.img_resolution
self.real_G = None
self.params = None
self.optimizable = None
self.device = torch.device('cuda')
self.model_type = model_type
assert model_type in ['norm', 'std']
self.normalization = model_type
if self.normalization == 'norm':
self.normalizer = self.get_normalizer()
else:
self.normalizer = self.get_standarizer()
self.regularizer = self.get_reguluarizer()
init_method = init['method']
if init_method == 'avg':
self.init_avg()
elif init_method == 'rand':
self.init_rand()
elif init_method == 'direct':
self.init_direct(init['init_params'])
else:
raise RuntimeError("Unknown initialization method")
def get_optimizable(self):
return self.optimizable
def forward(self):
ws = self.G.mapping(None, self.params)
im = self.G.synthesis(ws)
im = (im + 1.0) / 2.0 # (-1, 1) to (0, 1)
return im.clamp(0, 1)
def regularize(self):
params = self.get_params()
params = self.regularizer.regularize(params)
params = self.normalizer.normalize(params)
self.params.data = params.data
def get_params(self):
return self.normalizer.denormalize(self.params)
def set_params(self, params, validate=True):
if validate:
# params are unnormalized
self.regularizer.check_valid(params)
self.params = self.normalizer.normalize(params).detach().clone()
self.params.requires_grad = True
self.optimizable = [self.params]
# get real generator map
def output(self, tmp_dir, tmp_image_filename):
if self.real_G is None:
self.real_G = get_real_generator(self.generator_name)
params = self.get_params()[0].detach().cpu().tolist()
json_file = [(tmp_image_filename, params)]
self.real_G.sample_with_json(tmp_dir, json_file)
# reload image from disk
image_filename = pth.join(tmp_dir, tmp_image_filename)
image_np = read_image(image_filename)
image_np = cv2.resize(image_np, (self.G.img_resolution, self.G.img_resolution))
assert image_np.ndim == 2
image = torch.as_tensor(image_np, dtype=torch.float32, device=self.device).unsqueeze(0).unsqueeze(0)
return image | 5,747 | 32.811765 | 129 | py |
DiffProxy | DiffProxy-main/stylegan/legacy.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Converting legacy network pickle into the new format."""
import click
import pickle
import re
import copy
import numpy as np
import torch
import dnnlib
from torch_utils import misc
#----------------------------------------------------------------------------
def load_network_pkl(f, force_fp16=False):
data = _LegacyUnpickler(f).load()
# Legacy TensorFlow pickle => convert.
if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
tf_G, tf_D, tf_Gs = data
G = convert_tf_generator(tf_G)
D = convert_tf_discriminator(tf_D)
G_ema = convert_tf_generator(tf_Gs)
data = dict(G=G, D=D, G_ema=G_ema)
# Add missing fields.
if 'training_set_kwargs' not in data:
data['training_set_kwargs'] = None
if 'augment_pipe' not in data:
data['augment_pipe'] = None
# Validate contents.
assert isinstance(data['G'], torch.nn.Module)
assert isinstance(data['D'], torch.nn.Module)
assert isinstance(data['G_ema'], torch.nn.Module)
assert isinstance(data['training_set_kwargs'], (dict, type(None)))
assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
# Force FP16.
if force_fp16:
for key in ['G', 'D', 'G_ema']:
old = data[key]
kwargs = copy.deepcopy(old.init_kwargs)
fp16_kwargs = kwargs.get('synthesis_kwargs', kwargs)
fp16_kwargs.num_fp16_res = 4
fp16_kwargs.conv_clamp = 256
if kwargs != old.init_kwargs:
new = type(old)(**kwargs).eval().requires_grad_(False)
misc.copy_params_and_buffers(old, new, require_all=True)
data[key] = new
return data
#----------------------------------------------------------------------------
class _TFNetworkStub(dnnlib.EasyDict):
pass
class _LegacyUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'dnnlib.tflib.network' and name == 'Network':
return _TFNetworkStub
return super().find_class(module, name)
#----------------------------------------------------------------------------
def _collect_tf_params(tf_net):
# pylint: disable=protected-access
tf_params = dict()
def recurse(prefix, tf_net):
for name, value in tf_net.variables:
tf_params[prefix + name] = value
for name, comp in tf_net.components.items():
recurse(prefix + name + '/', comp)
recurse('', tf_net)
return tf_params
#----------------------------------------------------------------------------
def _populate_module_params(module, *patterns):
for name, tensor in misc.named_params_and_buffers(module):
found = False
value = None
for pattern, value_fn in zip(patterns[0::2], patterns[1::2]):
match = re.fullmatch(pattern, name)
if match:
found = True
if value_fn is not None:
value = value_fn(*match.groups())
break
try:
assert found
if value is not None:
tensor.copy_(torch.from_numpy(np.array(value)))
except:
print(name, list(tensor.shape))
raise
#----------------------------------------------------------------------------
def convert_tf_generator(tf_G):
if tf_G.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_G.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None, none=None):
known_kwargs.add(tf_name)
val = tf_kwargs.get(tf_name, default)
return val if val is not None else none
# Convert kwargs.
from training import networks_stylegan2
network_class = networks_stylegan2.Generator
kwargs = dnnlib.EasyDict(
z_dim = kwarg('latent_size', 512),
c_dim = kwarg('label_size', 0),
w_dim = kwarg('dlatent_size', 512),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
architecture = kwarg('architecture', 'skip'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
use_noise = kwarg('use_noise', True),
activation = kwarg('nonlinearity', 'lrelu'),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 8),
embed_features = kwarg('label_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('mapping_nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.01),
w_avg_beta = kwarg('w_avg_beta', 0.995, none=1),
),
)
# Check for unknown kwargs.
kwarg('truncation_psi')
kwarg('truncation_cutoff')
kwarg('style_mixing_prob')
kwarg('structure')
kwarg('conditioning')
kwarg('fused_modconv')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_G)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value
kwargs.synthesis.kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
G = network_class(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
# pylint: disable=f-string-without-interpolation
_populate_module_params(G,
r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'],
r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'],
r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0],
r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'],
r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0],
r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'],
r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(),
r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'],
r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0],
r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'],
r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'],
r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0],
r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'],
r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1,
r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'],
r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1,
r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'.*\.resample_filter', None,
r'.*\.act_filter', None,
)
return G
#----------------------------------------------------------------------------
def convert_tf_discriminator(tf_D):
if tf_D.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_D.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None):
known_kwargs.add(tf_name)
return tf_kwargs.get(tf_name, default)
# Convert kwargs.
kwargs = dnnlib.EasyDict(
c_dim = kwarg('label_size', 0),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
architecture = kwarg('architecture', 'resnet'),
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
cmap_dim = kwarg('mapping_fmaps', None),
block_kwargs = dnnlib.EasyDict(
activation = kwarg('nonlinearity', 'lrelu'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
freeze_layers = kwarg('freeze_layers', 0),
),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 0),
embed_features = kwarg('mapping_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.1),
),
epilogue_kwargs = dnnlib.EasyDict(
mbstd_group_size = kwarg('mbstd_group_size', None),
mbstd_num_channels = kwarg('mbstd_num_features', 1),
activation = kwarg('nonlinearity', 'lrelu'),
),
)
# Check for unknown kwargs.
kwarg('structure')
kwarg('conditioning')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_D)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value
kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
from training import networks_stylegan2
D = networks_stylegan2.Discriminator(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
# pylint: disable=f-string-without-interpolation
_populate_module_params(D,
r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'],
r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'],
r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1),
r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'],
r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'],
r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(),
r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'],
r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(),
r'b4\.out\.bias', lambda: tf_params[f'Output/bias'],
r'.*\.resample_filter', None,
)
return D
#----------------------------------------------------------------------------
@click.command()
@click.option('--source', help='Input pickle', required=True, metavar='PATH')
@click.option('--dest', help='Output pickle', required=True, metavar='PATH')
@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True)
def convert_network_pickle(source, dest, force_fp16):
"""Convert legacy network pickle into the native PyTorch format.
The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA.
It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks.
Example:
\b
python legacy.py \\
--source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\
--dest=stylegan2-cat-config-f.pkl
"""
print(f'Loading "{source}"...')
with dnnlib.util.open_url(source) as f:
data = load_network_pkl(f, force_fp16=force_fp16)
print(f'Saving "{dest}"...')
with open(dest, 'wb') as f:
pickle.dump(data, f)
print('Done.')
#----------------------------------------------------------------------------
if __name__ == "__main__":
convert_network_pickle() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 16,561 | 50.117284 | 154 | py |
DiffProxy | DiffProxy-main/stylegan/train.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Train a GAN using the techniques described in the paper
"Alias-Free Generative Adversarial Networks"."""
import os
import click
import re
import json
import tempfile
import torch
import dnnlib
from training import training_loop_gan, training_loop_nogan
from metrics import metric_main
from torch_utils import training_stats
from torch_utils import custom_ops
#----------------------------------------------------------------------------
def subprocess_fn(rank, c, temp_dir, no_gan):
dnnlib.util.Logger(file_name=os.path.join(c.run_dir, 'log.txt'), file_mode='a', should_flush=True)
# Init torch.distributed.
if c.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=c.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=c.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if c.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0:
custom_ops.verbosity = 'none'
# Execute training loop.
if no_gan:
training_loop_nogan.training_loop(rank=rank, **c)
else:
training_loop_gan.training_loop(rank=rank, **c)
#----------------------------------------------------------------------------
def launch_training(c, desc, outdir, dry_run, no_gan):
dnnlib.util.Logger(should_flush=True)
# Pick output directory.
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]
cur_run_id = max(prev_run_ids, default=-1) + 1
c.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{desc}')
assert not os.path.exists(c.run_dir)
# Print options.
print()
print('Training options:')
print(json.dumps(c, indent=2))
print()
print(f'Output directory: {c.run_dir}')
print(f'Number of GPUs: {c.num_gpus}')
print(f'Batch size: {c.batch_size} images')
print(f'Training duration: {c.total_kimg} kimg')
print(f'Dataset path: {c.training_set_kwargs.path}')
print(f'Dataset size: {c.training_set_kwargs.max_size} images')
print(f'Dataset resolution: {c.training_set_kwargs.resolution}')
print(f'Dataset labels: {c.training_set_kwargs.use_labels}')
print(f'Dataset x-flips: {c.training_set_kwargs.xflip}')
print(f'Training by GAN loss: {not no_gan}')
print()
# Dry run?
if dry_run:
print('Dry run; exiting.')
return
# Create output directory.
print('Creating output directory...')
os.makedirs(c.run_dir)
with open(os.path.join(c.run_dir, 'training_options.json'), 'wt') as f:
json.dump(c, f, indent=2)
# Launch processes.
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if c.num_gpus == 1:
subprocess_fn(rank=0, c=c, temp_dir=temp_dir, no_gan=no_gan)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(c, temp_dir, no_gan), nprocs=c.num_gpus)
#----------------------------------------------------------------------------
def init_dataset_kwargs(data):
try:
dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False)
dataset_obj = dnnlib.util.construct_class_by_name(**dataset_kwargs) # Subclass of training.dataset.Dataset.
dataset_kwargs.resolution = dataset_obj.resolution # Be explicit about resolution.
dataset_kwargs.use_labels = dataset_obj.has_labels # Be explicit about labels.
dataset_kwargs.max_size = len(dataset_obj) # Be explicit about dataset size.
return dataset_kwargs, dataset_obj.name
except IOError as err:
raise click.ClickException(f'--data: {err}')
#----------------------------------------------------------------------------
def parse_comma_separated_list(s):
if isinstance(s, list):
return s
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',')
#----------------------------------------------------------------------------
@click.command()
# Required.
@click.option('--outdir', help='Where to save the results', metavar='DIR', required=True)
@click.option('--cfg', help='Base configuration', type=click.Choice(['stylegan3-t', 'stylegan3-r', 'stylegan2']), required=True)
@click.option('--data', help='Training data', metavar='[ZIP|DIR]', type=str, required=True)
@click.option('--gpus', help='Number of GPUs to use', metavar='INT', type=click.IntRange(min=1), required=True)
@click.option('--batch', help='Total batch size', metavar='INT', type=click.IntRange(min=1), required=True)
@click.option('--gamma', help='R1 regularization weight', metavar='FLOAT', type=click.FloatRange(min=0), required=True)
# Optional features.
@click.option('--cond', help='Train conditional model', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--mirror', help='Enable dataset x-flips', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--aug', help='Augmentation mode', type=click.Choice(['noaug', 'ada', 'fixed']), default='ada', show_default=True)
@click.option('--resume', help='Resume from given network pickle', metavar='[PATH|URL]', type=str)
@click.option('--freezed', help='Freeze first layers of D', metavar='INT', type=click.IntRange(min=0), default=0, show_default=True)
# Misc hyperparameters.
@click.option('--p', help='Probability for --aug=fixed', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0.2, show_default=True)
@click.option('--target', help='Target value for --aug=ada', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0.6, show_default=True)
@click.option('--batch-gpu', help='Limit batch size per GPU', metavar='INT', type=click.IntRange(min=1))
@click.option('--cbase', help='Capacity multiplier', metavar='INT', type=click.IntRange(min=1), default=32768, show_default=True)
@click.option('--cmax', help='Max. feature maps', metavar='INT', type=click.IntRange(min=1), default=512, show_default=True)
@click.option('--glr', help='G learning rate [default: varies]', metavar='FLOAT', type=click.FloatRange(min=0))
@click.option('--dlr', help='D learning rate', metavar='FLOAT', type=click.FloatRange(min=0), default=0.002, show_default=True)
@click.option('--map-depth', help='Mapping network depth [default: varies]', metavar='INT', type=click.IntRange(min=1))
@click.option('--mbstd-group', help='Minibatch std group size', metavar='INT', type=click.IntRange(min=1), default=4, show_default=True)
# Misc settings.
@click.option('--desc', help='String to include in result dir name', metavar='STR', type=str)
@click.option('--metrics', help='Quality metrics', metavar='[NAME|A,B,C|none]', type=parse_comma_separated_list, default='fid50k_full', show_default=True)
@click.option('--kimg', help='Total training duration', metavar='KIMG', type=click.IntRange(min=1), default=25000, show_default=True)
@click.option('--tick', help='How often to print progress', metavar='KIMG', type=click.IntRange(min=1), default=4, show_default=True)
@click.option('--snap', help='How often to save snapshots', metavar='TICKS', type=click.IntRange(min=1), default=50, show_default=True)
@click.option('--seed', help='Random seed', metavar='INT', type=click.IntRange(min=0), default=0, show_default=True)
@click.option('--fp32', help='Disable mixed-precision', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--nobench', help='Disable cuDNN benchmarking', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--workers', help='DataLoader worker processes', metavar='INT', type=click.IntRange(min=1), default=3, show_default=True)
@click.option('-n','--dry-run', help='Print training options and exit', is_flag=True)
# additional
@click.option('--generator_name', help='Specify the name of generator', metavar='STR', type=str, required=True)
@click.option('--norm_type', help='Normalization performed on parameters', metavar='STR', type=str, required=True)
@click.option('--no_gan', help='If not use GAN loss', metavar='BOOL', type=bool, required=True)
@click.option('--cond_d', help='If use Conditional Discriminator', metavar='BOOL', type=bool, required=True)
def main(**kwargs):
"""Train a GAN using the techniques described in the paper
"Alias-Free Generative Adversarial Networks".
Examples:
\b
# Train StyleGAN3-T for AFHQv2 using 8 GPUs.
python train.py --outdir=~/training-runs --cfg=stylegan3-t --data=~/datasets/afhqv2-512x512.zip \\
--gpus=8 --batch=32 --gamma=8.2 --mirror=1
\b
# Fine-tune StyleGAN3-R for MetFaces-U using 1 GPU, starting from the pre-trained FFHQ-U pickle.
python train.py --outdir=~/training-runs --cfg=stylegan3-r --data=~/datasets/metfacesu-1024x1024.zip \\
--gpus=8 --batch=32 --gamma=6.6 --mirror=1 --kimg=5000 --snap=5 \\
--resume=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-ffhqu-1024x1024.pkl
\b
# Train StyleGAN2 for FFHQ at 1024x1024 resolution using 8 GPUs.
python train.py --outdir=~/training-runs --cfg=stylegan2 --data=~/datasets/ffhq-1024x1024.zip \\
--gpus=8 --batch=32 --gamma=10 --mirror=1 --aug=noaug
"""
# size of z
z_dim = 0 # 512
print(f"z_dim = {z_dim}")
# Initialize config.
opts = dnnlib.EasyDict(kwargs) # Command line arguments.
c = dnnlib.EasyDict() # Main config dict.
c.G_kwargs = dnnlib.EasyDict(class_name=None, z_dim=z_dim, w_dim=512, mapping_kwargs=dnnlib.EasyDict())
c.D_kwargs = dnnlib.EasyDict(class_name='training.networks_stylegan2.Discriminator', block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict())
c.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', betas=[0,0.99], eps=1e-8)
c.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', betas=[0,0.99], eps=1e-8)
c.loss_kwargs = dnnlib.EasyDict(class_name='loss.ProxyLoss') if opts.no_gan else dnnlib.EasyDict(class_name='loss.ProxyGANLoss')
c.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, prefetch_factor=2)
# customized configs
c.generator_name = opts.generator_name
c.norm_type = opts.norm_type
if not opts.no_gan:
c.cond_D = opts.cond_d
# Training set.
c.training_set_kwargs, dataset_name = init_dataset_kwargs(data=opts.data)
if opts.cond and not c.training_set_kwargs.use_labels:
raise click.ClickException('--cond=True requires labels specified in dataset.json')
c.training_set_kwargs.use_labels = opts.cond
c.training_set_kwargs.xflip = opts.mirror
# Hyperparameters & settings.
c.num_gpus = opts.gpus
c.batch_size = opts.batch
c.batch_gpu = opts.batch_gpu or opts.batch // opts.gpus
c.G_kwargs.channel_base = c.D_kwargs.channel_base = opts.cbase
c.G_kwargs.channel_max = c.D_kwargs.channel_max = opts.cmax
c.G_kwargs.mapping_kwargs.num_layers = (8 if opts.cfg == 'stylegan2' else 2) if opts.map_depth is None else opts.map_depth
c.D_kwargs.block_kwargs.freeze_layers = opts.freezed
c.D_kwargs.epilogue_kwargs.mbstd_group_size = opts.mbstd_group
c.loss_kwargs.r1_gamma = opts.gamma
c.G_opt_kwargs.lr = (0.002 if opts.cfg == 'stylegan2' else 0.0025) if opts.glr is None else opts.glr
c.D_opt_kwargs.lr = opts.dlr
c.metrics = opts.metrics
c.total_kimg = opts.kimg
c.kimg_per_tick = opts.tick
c.image_snapshot_ticks = c.network_snapshot_ticks = opts.snap
c.random_seed = c.training_set_kwargs.random_seed = opts.seed
c.data_loader_kwargs.num_workers = opts.workers
# Sanity checks.
if c.batch_size % c.num_gpus != 0:
raise click.ClickException('--batch must be a multiple of --gpus')
if c.batch_size % (c.num_gpus * c.batch_gpu) != 0:
raise click.ClickException('--batch must be a multiple of --gpus times --batch-gpu')
if c.batch_gpu < c.D_kwargs.epilogue_kwargs.mbstd_group_size:
raise click.ClickException('--batch-gpu cannot be smaller than --mbstd')
if any(not metric_main.is_valid_metric(metric) for metric in c.metrics):
raise click.ClickException('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
# Base configuration.
c.ema_kimg = c.batch_size * 10 / 32
if opts.cfg == 'stylegan2':
c.G_kwargs.class_name = 'training.networks_stylegan2.Generator'
c.loss_kwargs.style_mixing_prob = 0.9 # Enable style mixing regularization.
c.loss_kwargs.pl_weight = 2 # Enable path length regularization.
c.G_reg_interval = 4 # Enable lazy regularization for G.
c.G_kwargs.fused_modconv_default = 'inference_only' # Speed up training by using regular convolutions instead of grouped convolutions.
c.loss_kwargs.pl_no_weight_grad = True # Speed up path length regularization by skipping gradient computation wrt. conv2d weights.
else:
c.G_kwargs.class_name = 'training.networks_stylegan3.Generator'
c.G_kwargs.magnitude_ema_beta = 0.5 ** (c.batch_size / (20 * 1e3))
if opts.cfg == 'stylegan3-r':
c.G_kwargs.conv_kernel = 1 # Use 1x1 convolutions.
c.G_kwargs.channel_base *= 2 # Double the number of feature maps.
c.G_kwargs.channel_max *= 2
c.G_kwargs.use_radial_filters = True # Use radially symmetric downsampling filters.
c.loss_kwargs.blur_init_sigma = 10 # Blur the images seen by the discriminator.
c.loss_kwargs.blur_fade_kimg = c.batch_size * 200 / 32 # Fade out the blur during the first N kimg.
# Augmentation.
if opts.aug != 'noaug':
c.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1)
if opts.aug == 'ada':
c.ada_target = opts.target
if opts.aug == 'fixed':
c.augment_p = opts.p
# Resume.
if opts.resume is not None:
c.resume_pkl = opts.resume
c.ada_kimg = 100 # Make ADA react faster at the beginning.
c.ema_rampup = None # Disable EMA rampup.
c.loss_kwargs.blur_init_sigma = 0 # Disable blur rampup.
# Performance-related toggles.
if opts.fp32:
c.G_kwargs.num_fp16_res = c.D_kwargs.num_fp16_res = 0
c.G_kwargs.conv_clamp = c.D_kwargs.conv_clamp = None
if opts.nobench:
c.cudnn_benchmark = False
# Description string.
if opts.no_gan:
desc = f'{opts.cfg:s}-{dataset_name:s}-gpus{c.num_gpus:d}-batch{c.batch_size:d}-no_gan'
else:
desc = f'{opts.cfg:s}-{dataset_name:s}-gpus{c.num_gpus:d}-batch{c.batch_size:d}-gamma{c.loss_kwargs.r1_gamma:g}'
if opts.desc is not None:
desc += f'-{opts.desc}'
# Launch.
launch_training(c=c, desc=desc, outdir=opts.outdir, dry_run=opts.dry_run, no_gan=opts.no_gan)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 16,998 | 53.483974 | 210 | py |
DiffProxy | DiffProxy-main/stylegan/training/loss.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Loss functions."""
import numpy as np
import torch
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import upfirdn2d
#----------------------------------------------------------------------------
class Loss:
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, gain, cur_nimg): # to be overridden by subclass
raise NotImplementedError()
#----------------------------------------------------------------------------
class StyleGAN2Loss(Loss):
def __init__(self, device, G, D, augment_pipe=None, r1_gamma=10, style_mixing_prob=0, pl_weight=0, pl_batch_shrink=2, pl_decay=0.01, pl_no_weight_grad=False, blur_init_sigma=0, blur_fade_kimg=0):
super().__init__()
self.device = device
self.G = G
self.D = D
self.augment_pipe = augment_pipe
self.r1_gamma = r1_gamma
self.style_mixing_prob = style_mixing_prob
self.pl_weight = pl_weight
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_no_weight_grad = pl_no_weight_grad
self.pl_mean = torch.zeros([], device=device)
self.blur_init_sigma = blur_init_sigma
self.blur_fade_kimg = blur_fade_kimg
def run_G(self, z, c, update_emas=False):
ws = self.G.mapping(z, c, update_emas=update_emas)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c, update_emas=False)[:, cutoff:]
img = self.G.synthesis(ws, update_emas=update_emas)
return img, ws
def run_D(self, img, c, blur_sigma=0, update_emas=False):
blur_size = np.floor(blur_sigma * 3)
if blur_size > 0:
with torch.autograd.profiler.record_function('blur'):
f = torch.arange(-blur_size, blur_size + 1, device=img.device).div(blur_sigma).square().neg().exp2()
img = upfirdn2d.filter2d(img, f / f.sum())
if self.augment_pipe is not None:
img = self.augment_pipe(img)
logits = self.D(img, c, update_emas=update_emas)
return logits
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, gain, cur_nimg):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']
if self.pl_weight == 0:
phase = {'Greg': 'none', 'Gboth': 'Gmain'}.get(phase, phase)
if self.r1_gamma == 0:
phase = {'Dreg': 'none', 'Dboth': 'Dmain'}.get(phase, phase)
blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), 0) * self.blur_init_sigma if self.blur_fade_kimg > 0 else 0
# Gmain: Maximize logits for generated images.
if phase in ['Gmain', 'Gboth']:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c)
gen_logits = self.run_D(gen_img, gen_c, blur_sigma=blur_sigma)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits))
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
loss_Gmain.mean().mul(gain).backward()
# Gpl: Apply path length regularization.
if phase in ['Greg', 'Gboth']:
with torch.autograd.profiler.record_function('Gpl_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size])
pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients(self.pl_no_weight_grad):
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0]
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
self.pl_mean.copy_(pl_mean.detach())
pl_penalty = (pl_lengths - pl_mean).square()
training_stats.report('Loss/pl_penalty', pl_penalty)
loss_Gpl = pl_penalty * self.pl_weight
training_stats.report('Loss/G/reg', loss_Gpl)
with torch.autograd.profiler.record_function('Gpl_backward'):
loss_Gpl.mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if phase in ['Dmain', 'Dboth']:
with torch.autograd.profiler.record_function('Dgen_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, update_emas=True)
gen_logits = self.run_D(gen_img, gen_c, blur_sigma=blur_sigma, update_emas=True)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if phase in ['Dmain', 'Dreg', 'Dboth']:
name = 'Dreal' if phase == 'Dmain' else 'Dr1' if phase == 'Dreg' else 'Dreal_Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(phase in ['Dreg', 'Dboth'])
real_logits = self.run_D(real_img_tmp, real_c, blur_sigma=blur_sigma)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if phase in ['Dmain', 'Dboth']:
loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if phase in ['Dreg', 'Dboth']:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(loss_Dreal + loss_Dr1).mean().mul(gain).backward()
#---------------------------------------------------------------------------- | 7,998 | 56.135714 | 199 | py |
DiffProxy | DiffProxy-main/stylegan/training/augment.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Augmentation pipeline from the paper
"Training Generative Adversarial Networks with Limited Data".
Matches the original implementation by Karras et al. at
https://github.com/NVlabs/stylegan2-ada/blob/main/training/augment.py"""
import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
# Coefficients of various wavelet decomposition low-pass filters.
wavelets = {
'haar': [0.7071067811865476, 0.7071067811865476],
'db1': [0.7071067811865476, 0.7071067811865476],
'db2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'db3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'db4': [-0.010597401784997278, 0.032883011666982945, 0.030841381835986965, -0.18703481171888114, -0.02798376941698385, 0.6308807679295904, 0.7148465705525415, 0.23037781330885523],
'db5': [0.003335725285001549, -0.012580751999015526, -0.006241490213011705, 0.07757149384006515, -0.03224486958502952, -0.24229488706619015, 0.13842814590110342, 0.7243085284385744, 0.6038292697974729, 0.160102397974125],
'db6': [-0.00107730108499558, 0.004777257511010651, 0.0005538422009938016, -0.031582039318031156, 0.02752286553001629, 0.09750160558707936, -0.12976686756709563, -0.22626469396516913, 0.3152503517092432, 0.7511339080215775, 0.4946238903983854, 0.11154074335008017],
'db7': [0.0003537138000010399, -0.0018016407039998328, 0.00042957797300470274, 0.012550998556013784, -0.01657454163101562, -0.03802993693503463, 0.0806126091510659, 0.07130921926705004, -0.22403618499416572, -0.14390600392910627, 0.4697822874053586, 0.7291320908465551, 0.39653931948230575, 0.07785205408506236],
'db8': [-0.00011747678400228192, 0.0006754494059985568, -0.0003917403729959771, -0.00487035299301066, 0.008746094047015655, 0.013981027917015516, -0.04408825393106472, -0.01736930100202211, 0.128747426620186, 0.00047248457399797254, -0.2840155429624281, -0.015829105256023893, 0.5853546836548691, 0.6756307362980128, 0.3128715909144659, 0.05441584224308161],
'sym2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'sym3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'sym4': [-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427],
'sym5': [0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728],
'sym6': [0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148],
'sym7': [0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255],
'sym8': [-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609],
}
#----------------------------------------------------------------------------
# Helpers for constructing transformation matrices.
def matrix(*rows, device=None):
assert all(len(row) == len(rows[0]) for row in rows)
elems = [x for row in rows for x in row]
ref = [x for x in elems if isinstance(x, torch.Tensor)]
if len(ref) == 0:
return misc.constant(np.asarray(rows), device=device)
assert device is None or device == ref[0].device
elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems]
return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1))
def translate2d(tx, ty, **kwargs):
return matrix(
[1, 0, tx],
[0, 1, ty],
[0, 0, 1],
**kwargs)
def translate3d(tx, ty, tz, **kwargs):
return matrix(
[1, 0, 0, tx],
[0, 1, 0, ty],
[0, 0, 1, tz],
[0, 0, 0, 1],
**kwargs)
def scale2d(sx, sy, **kwargs):
return matrix(
[sx, 0, 0],
[0, sy, 0],
[0, 0, 1],
**kwargs)
def scale3d(sx, sy, sz, **kwargs):
return matrix(
[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1],
**kwargs)
def rotate2d(theta, **kwargs):
return matrix(
[torch.cos(theta), torch.sin(-theta), 0],
[torch.sin(theta), torch.cos(theta), 0],
[0, 0, 1],
**kwargs)
def rotate3d(v, theta, **kwargs):
vx = v[..., 0]; vy = v[..., 1]; vz = v[..., 2]
s = torch.sin(theta); c = torch.cos(theta); cc = 1 - c
return matrix(
[vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0],
[vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0],
[vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0],
[0, 0, 0, 1],
**kwargs)
def translate2d_inv(tx, ty, **kwargs):
return translate2d(-tx, -ty, **kwargs)
def scale2d_inv(sx, sy, **kwargs):
return scale2d(1 / sx, 1 / sy, **kwargs)
def rotate2d_inv(theta, **kwargs):
return rotate2d(-theta, **kwargs)
#----------------------------------------------------------------------------
# Versatile image augmentation pipeline from the paper
# "Training Generative Adversarial Networks with Limited Data".
#
# All augmentations are disabled by default; individual augmentations can
# be enabled by setting their probability multipliers to 1.
@persistence.persistent_class
class AugmentPipe(torch.nn.Module):
def __init__(self,
xflip=0, rotate90=0, xint=0, xint_max=0.125,
scale=0, rotate=0, aniso=0, xfrac=0, scale_std=0.2, rotate_max=1, aniso_std=0.2, xfrac_std=0.125,
brightness=0, contrast=0, lumaflip=0, hue=0, saturation=0, brightness_std=0.2, contrast_std=0.5, hue_max=1, saturation_std=1,
imgfilter=0, imgfilter_bands=[1,1,1,1], imgfilter_std=1,
noise=0, cutout=0, noise_std=0.1, cutout_size=0.5,
):
super().__init__()
self.register_buffer('p', torch.ones([])) # Overall multiplier for augmentation probability.
# Pixel blitting.
self.xflip = float(xflip) # Probability multiplier for x-flip.
self.rotate90 = float(rotate90) # Probability multiplier for 90 degree rotations.
self.xint = float(xint) # Probability multiplier for integer translation.
self.xint_max = float(xint_max) # Range of integer translation, relative to image dimensions.
# General geometric transformations.
self.scale = float(scale) # Probability multiplier for isotropic scaling.
self.rotate = float(rotate) # Probability multiplier for arbitrary rotation.
self.aniso = float(aniso) # Probability multiplier for anisotropic scaling.
self.xfrac = float(xfrac) # Probability multiplier for fractional translation.
self.scale_std = float(scale_std) # Log2 standard deviation of isotropic scaling.
self.rotate_max = float(rotate_max) # Range of arbitrary rotation, 1 = full circle.
self.aniso_std = float(aniso_std) # Log2 standard deviation of anisotropic scaling.
self.xfrac_std = float(xfrac_std) # Standard deviation of frational translation, relative to image dimensions.
# Color transformations.
self.brightness = float(brightness) # Probability multiplier for brightness.
self.contrast = float(contrast) # Probability multiplier for contrast.
self.lumaflip = float(lumaflip) # Probability multiplier for luma flip.
self.hue = float(hue) # Probability multiplier for hue rotation.
self.saturation = float(saturation) # Probability multiplier for saturation.
self.brightness_std = float(brightness_std) # Standard deviation of brightness.
self.contrast_std = float(contrast_std) # Log2 standard deviation of contrast.
self.hue_max = float(hue_max) # Range of hue rotation, 1 = full circle.
self.saturation_std = float(saturation_std) # Log2 standard deviation of saturation.
# Image-space filtering.
self.imgfilter = float(imgfilter) # Probability multiplier for image-space filtering.
self.imgfilter_bands = list(imgfilter_bands) # Probability multipliers for individual frequency bands.
self.imgfilter_std = float(imgfilter_std) # Log2 standard deviation of image-space filter amplification.
# Image-space corruptions.
self.noise = float(noise) # Probability multiplier for additive RGB noise.
self.cutout = float(cutout) # Probability multiplier for cutout.
self.noise_std = float(noise_std) # Standard deviation of additive RGB noise.
self.cutout_size = float(cutout_size) # Size of the cutout rectangle, relative to image dimensions.
# Setup orthogonal lowpass filter for geometric augmentations.
self.register_buffer('Hz_geom', upfirdn2d.setup_filter(wavelets['sym6']))
# Construct filter bank for image-space filtering.
Hz_lo = np.asarray(wavelets['sym2']) # H(z)
Hz_hi = Hz_lo * ((-1) ** np.arange(Hz_lo.size)) # H(-z)
Hz_lo2 = np.convolve(Hz_lo, Hz_lo[::-1]) / 2 # H(z) * H(z^-1) / 2
Hz_hi2 = np.convolve(Hz_hi, Hz_hi[::-1]) / 2 # H(-z) * H(-z^-1) / 2
Hz_fbank = np.eye(4, 1) # Bandpass(H(z), b_i)
for i in range(1, Hz_fbank.shape[0]):
Hz_fbank = np.dstack([Hz_fbank, np.zeros_like(Hz_fbank)]).reshape(Hz_fbank.shape[0], -1)[:, :-1]
Hz_fbank = scipy.signal.convolve(Hz_fbank, [Hz_lo2])
Hz_fbank[i, (Hz_fbank.shape[1] - Hz_hi2.size) // 2 : (Hz_fbank.shape[1] + Hz_hi2.size) // 2] += Hz_hi2
self.register_buffer('Hz_fbank', torch.as_tensor(Hz_fbank, dtype=torch.float32))
def forward(self, images, debug_percentile=None):
assert isinstance(images, torch.Tensor) and images.ndim == 4
batch_size, num_channels, height, width = images.shape
device = images.device
if debug_percentile is not None:
debug_percentile = torch.as_tensor(debug_percentile, dtype=torch.float32, device=device)
# -------------------------------------
# Select parameters for pixel blitting.
# -------------------------------------
# Initialize inverse homogeneous 2D transform: G_inv @ pixel_out ==> pixel_in
I_3 = torch.eye(3, device=device)
G_inv = I_3
# Apply x-flip with probability (xflip * strength).
if self.xflip > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 2)
i = torch.where(torch.rand([batch_size], device=device) < self.xflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
G_inv = G_inv @ scale2d_inv(1 - 2 * i, 1)
# Apply 90 degree rotations with probability (rotate90 * strength).
if self.rotate90 > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 4)
i = torch.where(torch.rand([batch_size], device=device) < self.rotate90 * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 4))
G_inv = G_inv @ rotate2d_inv(-np.pi / 2 * i)
# Apply integer translation with probability (xint * strength).
if self.xint > 0:
t = (torch.rand([batch_size, 2], device=device) * 2 - 1) * self.xint_max
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xint * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, (debug_percentile * 2 - 1) * self.xint_max)
G_inv = G_inv @ translate2d_inv(torch.round(t[:,0] * width), torch.round(t[:,1] * height))
# --------------------------------------------------------
# Select parameters for general geometric transformations.
# --------------------------------------------------------
# Apply isotropic scaling with probability (scale * strength).
if self.scale > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.scale_std)
s = torch.where(torch.rand([batch_size], device=device) < self.scale * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.scale_std))
G_inv = G_inv @ scale2d_inv(s, s)
# Apply pre-rotation with probability p_rot.
p_rot = 1 - torch.sqrt((1 - self.rotate * self.p).clamp(0, 1)) # P(pre OR post) = p
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.rotate_max)
G_inv = G_inv @ rotate2d_inv(-theta) # Before anisotropic scaling.
# Apply anisotropic scaling with probability (aniso * strength).
if self.aniso > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.aniso_std)
s = torch.where(torch.rand([batch_size], device=device) < self.aniso * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.aniso_std))
G_inv = G_inv @ scale2d_inv(s, 1 / s)
# Apply post-rotation with probability p_rot.
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.zeros_like(theta)
G_inv = G_inv @ rotate2d_inv(-theta) # After anisotropic scaling.
# Apply fractional translation with probability (xfrac * strength).
if self.xfrac > 0:
t = torch.randn([batch_size, 2], device=device) * self.xfrac_std
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xfrac * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, torch.erfinv(debug_percentile * 2 - 1) * self.xfrac_std)
G_inv = G_inv @ translate2d_inv(t[:,0] * width, t[:,1] * height)
# ----------------------------------
# Execute geometric transformations.
# ----------------------------------
# Execute if the transform is not identity.
if G_inv is not I_3:
# Calculate padding.
cx = (width - 1) / 2
cy = (height - 1) / 2
cp = matrix([-cx, -cy, 1], [cx, -cy, 1], [cx, cy, 1], [-cx, cy, 1], device=device) # [idx, xyz]
cp = G_inv @ cp.t() # [batch, xyz, idx]
Hz_pad = self.Hz_geom.shape[0] // 4
margin = cp[:, :2, :].permute(1, 0, 2).flatten(1) # [xy, batch * idx]
margin = torch.cat([-margin, margin]).max(dim=1).values # [x0, y0, x1, y1]
margin = margin + misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy] * 2, device=device)
margin = margin.max(misc.constant([0, 0] * 2, device=device))
margin = margin.min(misc.constant([width-1, height-1] * 2, device=device))
mx0, my0, mx1, my1 = margin.ceil().to(torch.int32)
# Pad image and adjust origin.
images = torch.nn.functional.pad(input=images, pad=[mx0,mx1,my0,my1], mode='reflect')
G_inv = translate2d((mx0 - mx1) / 2, (my0 - my1) / 2) @ G_inv
# Upsample.
images = upfirdn2d.upsample2d(x=images, f=self.Hz_geom, up=2)
G_inv = scale2d(2, 2, device=device) @ G_inv @ scale2d_inv(2, 2, device=device)
G_inv = translate2d(-0.5, -0.5, device=device) @ G_inv @ translate2d_inv(-0.5, -0.5, device=device)
# Execute transformation.
shape = [batch_size, num_channels, (height + Hz_pad * 2) * 2, (width + Hz_pad * 2) * 2]
G_inv = scale2d(2 / images.shape[3], 2 / images.shape[2], device=device) @ G_inv @ scale2d_inv(2 / shape[3], 2 / shape[2], device=device)
grid = torch.nn.functional.affine_grid(theta=G_inv[:,:2,:], size=shape, align_corners=False)
images = grid_sample_gradfix.grid_sample(images, grid)
# Downsample and crop.
images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True)
# --------------------------------------------
# Select parameters for color transformations.
# --------------------------------------------
# Initialize homogeneous 3D transformation matrix: C @ color_in ==> color_out
I_4 = torch.eye(4, device=device)
C = I_4
# Apply brightness with probability (brightness * strength).
if self.brightness > 0:
b = torch.randn([batch_size], device=device) * self.brightness_std
b = torch.where(torch.rand([batch_size], device=device) < self.brightness * self.p, b, torch.zeros_like(b))
if debug_percentile is not None:
b = torch.full_like(b, torch.erfinv(debug_percentile * 2 - 1) * self.brightness_std)
C = translate3d(b, b, b) @ C
# Apply contrast with probability (contrast * strength).
if self.contrast > 0:
c = torch.exp2(torch.randn([batch_size], device=device) * self.contrast_std)
c = torch.where(torch.rand([batch_size], device=device) < self.contrast * self.p, c, torch.ones_like(c))
if debug_percentile is not None:
c = torch.full_like(c, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.contrast_std))
C = scale3d(c, c, c) @ C
# Apply luma flip with probability (lumaflip * strength).
v = misc.constant(np.asarray([1, 1, 1, 0]) / np.sqrt(3), device=device) # Luma axis.
if self.lumaflip > 0:
i = torch.floor(torch.rand([batch_size, 1, 1], device=device) * 2)
i = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.lumaflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
C = (I_4 - 2 * v.ger(v) * i) @ C # Householder reflection.
# Apply hue rotation with probability (hue * strength).
if self.hue > 0 and num_channels > 1:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.hue_max
theta = torch.where(torch.rand([batch_size], device=device) < self.hue * self.p, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.hue_max)
C = rotate3d(v, theta) @ C # Rotate around v.
# Apply saturation with probability (saturation * strength).
if self.saturation > 0 and num_channels > 1:
s = torch.exp2(torch.randn([batch_size, 1, 1], device=device) * self.saturation_std)
s = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.saturation * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.saturation_std))
C = (v.ger(v) + (I_4 - v.ger(v)) * s) @ C
# ------------------------------
# Execute color transformations.
# ------------------------------
# Execute if the transform is not identity.
if C is not I_4:
images = images.reshape([batch_size, num_channels, height * width])
if num_channels == 3:
images = C[:, :3, :3] @ images + C[:, :3, 3:]
elif num_channels == 1:
C = C[:, :3, :].mean(dim=1, keepdims=True)
images = images * C[:, :, :3].sum(dim=2, keepdims=True) + C[:, :, 3:]
else:
raise ValueError('Image must be RGB (3 channels) or L (1 channel)')
images = images.reshape([batch_size, num_channels, height, width])
# ----------------------
# Image-space filtering.
# ----------------------
if self.imgfilter > 0:
num_bands = self.Hz_fbank.shape[0]
assert len(self.imgfilter_bands) == num_bands
expected_power = misc.constant(np.array([10, 1, 1, 1]) / 13, device=device) # Expected power spectrum (1/f).
# Apply amplification for each band with probability (imgfilter * strength * band_strength).
g = torch.ones([batch_size, num_bands], device=device) # Global gain vector (identity).
for i, band_strength in enumerate(self.imgfilter_bands):
t_i = torch.exp2(torch.randn([batch_size], device=device) * self.imgfilter_std)
t_i = torch.where(torch.rand([batch_size], device=device) < self.imgfilter * self.p * band_strength, t_i, torch.ones_like(t_i))
if debug_percentile is not None:
t_i = torch.full_like(t_i, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.imgfilter_std)) if band_strength > 0 else torch.ones_like(t_i)
t = torch.ones([batch_size, num_bands], device=device) # Temporary gain vector.
t[:, i] = t_i # Replace i'th element.
t = t / (expected_power * t.square()).sum(dim=-1, keepdims=True).sqrt() # Normalize power.
g = g * t # Accumulate into global gain.
# Construct combined amplification filter.
Hz_prime = g @ self.Hz_fbank # [batch, tap]
Hz_prime = Hz_prime.unsqueeze(1).repeat([1, num_channels, 1]) # [batch, channels, tap]
Hz_prime = Hz_prime.reshape([batch_size * num_channels, 1, -1]) # [batch * channels, 1, tap]
# Apply filter.
p = self.Hz_fbank.shape[1] // 2
images = images.reshape([1, batch_size * num_channels, height, width])
images = torch.nn.functional.pad(input=images, pad=[p,p,p,p], mode='reflect')
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels)
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels)
images = images.reshape([batch_size, num_channels, height, width])
# ------------------------
# Image-space corruptions.
# ------------------------
# Apply additive RGB noise with probability (noise * strength).
if self.noise > 0:
sigma = torch.randn([batch_size, 1, 1, 1], device=device).abs() * self.noise_std
sigma = torch.where(torch.rand([batch_size, 1, 1, 1], device=device) < self.noise * self.p, sigma, torch.zeros_like(sigma))
if debug_percentile is not None:
sigma = torch.full_like(sigma, torch.erfinv(debug_percentile) * self.noise_std)
images = images + torch.randn([batch_size, num_channels, height, width], device=device) * sigma
# Apply cutout with probability (cutout * strength).
if self.cutout > 0:
size = torch.full([batch_size, 2, 1, 1, 1], self.cutout_size, device=device)
size = torch.where(torch.rand([batch_size, 1, 1, 1, 1], device=device) < self.cutout * self.p, size, torch.zeros_like(size))
center = torch.rand([batch_size, 2, 1, 1, 1], device=device)
if debug_percentile is not None:
size = torch.full_like(size, self.cutout_size)
center = torch.full_like(center, debug_percentile)
coord_x = torch.arange(width, device=device).reshape([1, 1, 1, -1])
coord_y = torch.arange(height, device=device).reshape([1, 1, -1, 1])
mask_x = (((coord_x + 0.5) / width - center[:, 0]).abs() >= size[:, 0] / 2)
mask_y = (((coord_y + 0.5) / height - center[:, 1]).abs() >= size[:, 1] / 2)
mask = torch.logical_or(mask_x, mask_y).to(torch.float32)
images = images * mask
return images
#----------------------------------------------------------------------------
| 26,617 | 59.910755 | 366 | py |
DiffProxy | DiffProxy-main/stylegan/training/dataset.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Streaming images and labels from datasets created with dataset_tool.py."""
import os
import numpy as np
import zipfile
import PIL.Image
import json
import torch
import dnnlib
try:
import pyspng
except ImportError:
pyspng = None
#----------------------------------------------------------------------------
class Dataset(torch.utils.data.Dataset):
def __init__(self,
name, # Name of the dataset.
raw_shape, # Shape of the raw image data (NCHW).
max_size = None, # Artificially limit the size of the dataset. None = no limit. Applied before xflip.
use_labels = False, # Enable conditioning labels? False = label dimension is zero.
xflip = False, # Artificially double the size of the dataset via x-flips. Applied after max_size.
random_seed = 0, # Random seed to use when applying max_size.
):
self._name = name
self._raw_shape = list(raw_shape)
self._use_labels = use_labels
self._raw_labels = None
self._label_shape = None
# Apply max_size.
self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64)
if (max_size is not None) and (self._raw_idx.size > max_size):
np.random.RandomState(random_seed).shuffle(self._raw_idx)
self._raw_idx = np.sort(self._raw_idx[:max_size])
# Apply xflip.
self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8)
if xflip:
self._raw_idx = np.tile(self._raw_idx, 2)
self._xflip = np.concatenate([self._xflip, np.ones_like(self._xflip)])
def _get_raw_labels(self):
if self._raw_labels is None:
self._raw_labels = self._load_raw_labels() if self._use_labels else None
if self._raw_labels is None:
self._raw_labels = np.zeros([self._raw_shape[0], 0], dtype=np.float32)
assert isinstance(self._raw_labels, np.ndarray)
assert self._raw_labels.shape[0] == self._raw_shape[0]
assert self._raw_labels.dtype in [np.float32, np.int64]
if self._raw_labels.dtype == np.int64:
assert self._raw_labels.ndim == 1
assert np.all(self._raw_labels >= 0)
return self._raw_labels
def close(self): # to be overridden by subclass
pass
def _load_raw_image(self, raw_idx): # to be overridden by subclass
raise NotImplementedError
def _load_raw_labels(self): # to be overridden by subclass
raise NotImplementedError
def __getstate__(self):
return dict(self.__dict__, _raw_labels=None)
def __del__(self):
try:
self.close()
except:
pass
def __len__(self):
return self._raw_idx.size
def __getitem__(self, idx):
image = self._load_raw_image(self._raw_idx[idx])
assert isinstance(image, np.ndarray)
assert list(image.shape) == self.image_shape
assert image.dtype == np.uint8
if self._xflip[idx]:
assert image.ndim == 3 # CHW
image = image[:, :, ::-1]
return image.copy(), self.get_label(idx)
def get_label(self, idx):
label = self._get_raw_labels()[self._raw_idx[idx]]
if label.dtype == np.int64:
onehot = np.zeros(self.label_shape, dtype=np.float32)
onehot[label] = 1
label = onehot
return label.copy()
def get_details(self, idx):
d = dnnlib.EasyDict()
d.raw_idx = int(self._raw_idx[idx])
d.xflip = (int(self._xflip[idx]) != 0)
d.raw_label = self._get_raw_labels()[d.raw_idx].copy()
return d
@property
def name(self):
return self._name
@property
def image_shape(self):
return list(self._raw_shape[1:])
@property
def num_channels(self):
assert len(self.image_shape) == 3 # CHW
return self.image_shape[0]
@property
def resolution(self):
assert len(self.image_shape) == 3 # CHW
assert self.image_shape[1] == self.image_shape[2]
return self.image_shape[1]
@property
def label_shape(self):
if self._label_shape is None:
raw_labels = self._get_raw_labels()
if raw_labels.dtype == np.int64:
self._label_shape = [int(np.max(raw_labels)) + 1]
else:
self._label_shape = raw_labels.shape[1:]
return list(self._label_shape)
@property
def label_dim(self):
assert len(self.label_shape) == 1
return self.label_shape[0]
@property
def has_labels(self):
return any(x != 0 for x in self.label_shape)
@property
def has_onehot_labels(self):
return self._get_raw_labels().dtype == np.int64
#----------------------------------------------------------------------------
class ImageFolderDataset(Dataset):
def __init__(self,
path, # Path to directory or zip.
resolution = None, # Ensure specific resolution, None = highest available.
**super_kwargs, # Additional arguments for the Dataset base class.
):
self._path = path
self._zipfile = None
if os.path.isdir(self._path):
self._type = 'dir'
self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files}
elif self._file_ext(self._path) == '.zip':
self._type = 'zip'
self._all_fnames = set(self._get_zipfile().namelist())
else:
raise IOError('Path must point to a directory or zip')
PIL.Image.init()
self._image_fnames = sorted(fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION)
if len(self._image_fnames) == 0:
raise IOError('No image files found in the specified path')
name = os.path.splitext(os.path.basename(self._path))[0]
raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape)
if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution):
raise IOError('Image files do not match the specified resolution')
super().__init__(name=name, raw_shape=raw_shape, **super_kwargs)
@staticmethod
def _file_ext(fname):
return os.path.splitext(fname)[1].lower()
def _get_zipfile(self):
assert self._type == 'zip'
if self._zipfile is None:
self._zipfile = zipfile.ZipFile(self._path)
return self._zipfile
def _open_file(self, fname):
if self._type == 'dir':
return open(os.path.join(self._path, fname), 'rb')
if self._type == 'zip':
return self._get_zipfile().open(fname, 'r')
return None
def close(self):
try:
if self._zipfile is not None:
self._zipfile.close()
finally:
self._zipfile = None
def __getstate__(self):
return dict(super().__getstate__(), _zipfile=None)
def _load_raw_image(self, raw_idx):
fname = self._image_fnames[raw_idx]
with self._open_file(fname) as f:
if pyspng is not None and self._file_ext(fname) == '.png':
image = pyspng.load(f.read())
else:
image = np.array(PIL.Image.open(f))
if image.ndim == 2:
image = image[:, :, np.newaxis] # HW => HWC
image = image.transpose(2, 0, 1) # HWC => CHW
return image
def _load_raw_labels(self):
fname = 'dataset.json'
if fname not in self._all_fnames:
return None
with self._open_file(fname) as f:
labels = json.load(f)['labels']
if labels is None:
return None
labels = dict(labels)
labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames]
labels = np.array(labels)
labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim])
return labels
#----------------------------------------------------------------------------
| 8,642 | 35.16318 | 158 | py |
DiffProxy | DiffProxy-main/stylegan/training/training_loop_gan.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Main training loop."""
import os
import time
import copy
import json
import pickle
import psutil
import PIL.Image
import numpy as np
import torch
import dnnlib
from torch_utils import misc
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import grid_sample_gradfix
import legacy
from metrics import metric_main
from sbs_generators import get_normalizer
#----------------------------------------------------------------------------
def setup_snapshot_image_grid(training_set, random_seed=0):
rnd = np.random.RandomState(random_seed)
gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
# No labels => show random subset of training samples.
all_indices = list(range(len(training_set)))
rnd.shuffle(all_indices)
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
# Load data.
images, labels = zip(*[training_set[i] for i in grid_indices])
return (gw, gh), np.stack(images), np.stack(labels)
#----------------------------------------------------------------------------
def save_image_grid(img, fname, drange, grid_size):
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
gw, gh = grid_size
_N, C, H, W = img.shape
img = img.reshape([gh, gw, C, H, W])
img = img.transpose(0, 3, 1, 4, 2)
img = img.reshape([gh * H, gw * W, C])
assert C in [1, 3]
if C == 1:
PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)
if C == 3:
PIL.Image.fromarray(img, 'RGB').save(fname)
#----------------------------------------------------------------------------
def training_loop(
run_dir = '.', # Output directory.
training_set_kwargs = {}, # Options for training set.
data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.
G_kwargs = {}, # Options for generator network.
D_kwargs = {}, # Options for discriminator network.
G_opt_kwargs = {}, # Options for generator optimizer.
D_opt_kwargs = {}, # Options for discriminator optimizer.
augment_kwargs = None, # Options for augmentation pipeline. None = disable.
loss_kwargs = {}, # Options for loss function.
metrics = [], # Metrics to evaluate during training.
random_seed = 0, # Global random seed.
num_gpus = 1, # Number of GPUs participating in the training.
rank = 0, # Rank of the current process in [0, num_gpus[.
batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
batch_gpu = 4, # Number of samples processed at a time by one GPU.
ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
ema_rampup = 0.05, # EMA ramp-up coefficient. None = no rampup.
G_reg_interval = None, # How often to perform regularization for G? None = disable lazy regularization.
D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization.
augment_p = 0, # Initial value of augmentation probability.
ada_target = None, # ADA target value. None = fixed p.
ada_interval = 4, # How often to perform ADA adjustment?
ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
kimg_per_tick = 4, # Progress snapshot interval.
image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.
network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.
resume_pkl = None, # Network pickle to resume training from.
resume_kimg = 0, # First kimg to report when resuming training.
cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?
abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.
progress_fn = None, # Callback function for updating training progress. Called for all ranks.
generator_name = None, # Generator name used to get parameter normalizer
norm_type = 'none', # Normalization types
cond_D = True, # Conditional on Discriminator
):
# Initialize.
start_time = time.time()
device = torch.device('cuda', rank)
np.random.seed(random_seed * num_gpus + rank)
torch.manual_seed(random_seed * num_gpus + rank)
torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.
torch.backends.cuda.matmul.allow_tf32 = False # Improves numerical accuracy.
torch.backends.cudnn.allow_tf32 = False # Improves numerical accuracy.
conv2d_gradfix.enabled = True # Improves training speed.
grid_sample_gradfix.enabled = True # Avoids errors with the augmentation pipe.
# Load training set.
if rank == 0:
print('Loading training set...')
training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
if rank == 0:
print()
print('Num images: ', len(training_set))
print('Image shape:', training_set.image_shape)
print('Label shape:', training_set.label_shape)
print('Batch Size:', batch_size)
print('# of GPU: ', num_gpus)
print('Batch per GPU:', batch_gpu)
print()
augment_pipe = None
# Construct networks.
if rank == 0:
print('Constructing networks...')
common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels)
G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
d_common_kwargs = common_kwargs.copy()
if not cond_D:
d_common_kwargs['c_dim'] = 0
D = dnnlib.util.construct_class_by_name(**D_kwargs, **d_common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
G_ema = copy.deepcopy(G).eval()
if rank == 0:
print("D.c_dim = ", D.c_dim)
# Resume from existing pickle.
if (resume_pkl is not None) and (rank == 0):
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
resume_data = legacy.load_network_pkl(f)
for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
# Print network summary tables.
if rank == 0:
z = torch.empty([batch_gpu, G.z_dim], device=device)
c = torch.empty([batch_gpu, G.c_dim], device=device)
img = misc.print_module_summary(G, [z, c])
misc.print_module_summary(D, [img, c])
# Distribute across GPUs.
if rank == 0:
print(f'Distributing across {num_gpus} GPUs...')
for module in [G, D, G_ema, augment_pipe]:
if module is not None and num_gpus > 1:
for param in misc.params_and_buffers(module):
torch.distributed.broadcast(param, src=0)
# Setup training phases.
if rank == 0:
print('Setting up training phases...')
loss = dnnlib.util.construct_class_by_name(device=device, G=G, D=D, augment_pipe=augment_pipe, **loss_kwargs) # subclass of training.loss.Loss
phases = []
for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:
if reg_interval is None:
opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)]
else: # Lazy regularization.
mb_ratio = reg_interval / (reg_interval + 1)
opt_kwargs = dnnlib.EasyDict(opt_kwargs)
opt_kwargs.lr = opt_kwargs.lr * mb_ratio
opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]
phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)]
for phase in phases:
phase.start_event = None
phase.end_event = None
if rank == 0:
phase.start_event = torch.cuda.Event(enable_timing=True)
phase.end_event = torch.cuda.Event(enable_timing=True)
# initialize normalizer
assert norm_type in ['none', 'norm', 'std']
normalizer = get_normalizer(generator_name, norm_type, batch_size, device)
if rank == 0:
print(f'Normalization type: {norm_type}')
print(f'Normalizer: {normalizer}')
# Export sample images.
grid_size = None
grid_z = None
grid_c = None
if rank == 0:
print('Exporting sample images...')
grid_size, images, labels = setup_snapshot_image_grid(training_set=training_set)
save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)
grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu)
# grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)
grid_c = torch.from_numpy(labels).to(device)
if normalizer is not None:
normalizer_grid = get_normalizer(generator_name, norm_type, grid_c.shape[0], device)
grid_c = normalizer_grid.normalize(grid_c)
grid_c = grid_c.split(batch_gpu)
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
save_image_grid(images, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=grid_size)
# Initialize logs.
if rank == 0:
print('Initializing logs...')
stats_collector = training_stats.Collector(regex='.*')
stats_metrics = dict()
stats_jsonl = None
stats_tfevents = None
if rank == 0:
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
try:
import torch.utils.tensorboard as tensorboard
stats_tfevents = tensorboard.SummaryWriter(run_dir)
except ImportError as err:
print('Skipping tfevents export:', err)
# Train.
if rank == 0:
print(f'Training for {total_kimg} kimg...')
print()
cur_nimg = resume_kimg * 1000
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - start_time
batch_idx = 0
if progress_fn is not None:
progress_fn(0, total_kimg)
while True:
# Fetch training data.
with torch.autograd.profiler.record_function('data_fetch'):
phase_real_img, phase_real_c = next(training_set_iterator)
phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)
# phase_real_c = phase_real_c.to(device).split(batch_gpu)
phase_real_c = phase_real_c.to(device)
if normalizer is not None:
phase_real_c = normalizer.normalize(phase_real_c)
phase_real_c = phase_real_c.split(batch_gpu)
all_gen_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)
all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
# Execute training phases.
for phase, phase_gen_z in zip(phases, all_gen_z):
# (Gmain 1); (Greg 4); (Dmain, 1); (Dreg, 16);
if batch_idx % phase.interval != 0:
continue
if phase.start_event is not None:
phase.start_event.record(torch.cuda.current_stream(device))
# Accumulate gradients.
phase.opt.zero_grad(set_to_none=True)
phase.module.requires_grad_(True)
for real_img, real_c, gen_z in zip(phase_real_img, phase_real_c, phase_gen_z):
loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z,
gain=phase.interval, cur_nimg=cur_nimg)
phase.module.requires_grad_(False)
# Update weights.
with torch.autograd.profiler.record_function(phase.name + '_opt'):
params = [param for param in phase.module.parameters() if param.grad is not None]
if len(params) > 0:
flat = torch.cat([param.grad.flatten() for param in params])
if num_gpus > 1:
torch.distributed.all_reduce(flat)
flat /= num_gpus
misc.nan_to_num(flat, nan=0, posinf=1e5, neginf=-1e5, out=flat)
grads = flat.split([param.numel() for param in params])
for param, grad in zip(params, grads):
param.grad = grad.reshape(param.shape)
phase.opt.step()
# Phase done.
if phase.end_event is not None:
phase.end_event.record(torch.cuda.current_stream(device))
# Update G_ema.
with torch.autograd.profiler.record_function('Gema'):
ema_nimg = ema_kimg * 1000
if ema_rampup is not None:
ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
for p_ema, p in zip(G_ema.parameters(), G.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for b_ema, b in zip(G_ema.buffers(), G.buffers()):
b_ema.copy_(b)
# Update state.
cur_nimg += batch_size
batch_idx += 1
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
continue
# Print status line, accumulating the same information in training_stats.
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))
if rank == 0:
print(' '.join(fields))
# Check for abort.
if (not done) and (abort_fn is not None) and abort_fn():
done = True
if rank == 0:
print()
print('Aborting...')
# Save image snapshot.
if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1,1], grid_size=grid_size)
# Save network snapshot.
snapshot_pkl = None
snapshot_data = None
if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
snapshot_data = dict(G=G, D=D, G_ema=G_ema, augment_pipe=augment_pipe, training_set_kwargs=dict(training_set_kwargs))
for key, value in snapshot_data.items():
if isinstance(value, torch.nn.Module):
value = copy.deepcopy(value).eval().requires_grad_(False)
if num_gpus > 1:
misc.check_ddp_consistency(value, ignore_regex=r'.*\.[^.]+_(avg|ema)')
for param in misc.params_and_buffers(value):
torch.distributed.broadcast(param, src=0)
snapshot_data[key] = value.cpu()
del value # conserve memory
snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
if rank == 0:
with open(snapshot_pkl, 'wb') as f:
pickle.dump(snapshot_data, f)
# Evaluate metrics.
if (snapshot_data is not None) and (len(metrics) > 0):
if rank == 0:
print('Evaluating metrics...')
for metric in metrics:
result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
stats_metrics.update(result_dict.results)
del snapshot_data # conserve memory
# Collect statistics.
for phase in phases:
value = []
if (phase.start_event is not None) and (phase.end_event is not None):
phase.end_event.synchronize()
value = phase.start_event.elapsed_time(phase.end_event)
training_stats.report0('Timing/' + phase.name, value)
stats_collector.update()
stats_dict = stats_collector.as_dict()
# Update logs.
timestamp = time.time()
if stats_jsonl is not None:
fields = dict(stats_dict, timestamp=timestamp)
stats_jsonl.write(json.dumps(fields) + '\n')
stats_jsonl.flush()
if stats_tfevents is not None:
global_step = int(cur_nimg / 1e3)
walltime = timestamp - start_time
for name, value in stats_dict.items():
stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
for name, value in stats_metrics.items():
stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
stats_tfevents.flush()
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
# Update state.
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - tick_end_time
if done:
break
# Done.
if rank == 0:
print()
print('Exiting...')
#----------------------------------------------------------------------------
| 21,150 | 49.239905 | 168 | py |
DiffProxy | DiffProxy-main/stylegan/training/training_loop_nogan.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Main training loop."""
import os
import time
import copy
import json
import pickle
import psutil
import PIL.Image
import numpy as np
import torch
import dnnlib
from torch_utils import misc
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import grid_sample_gradfix
import legacy
from metrics import metric_main
from sbs_generators import get_normalizer
#----------------------------------------------------------------------------
def setup_snapshot_image_grid(training_set, random_seed=0):
rnd = np.random.RandomState(random_seed)
gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
# No labels => show random subset of training samples.
all_indices = list(range(len(training_set)))
rnd.shuffle(all_indices)
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
# Load data.
images, labels = zip(*[training_set[i] for i in grid_indices])
return (gw, gh), np.stack(images), np.stack(labels)
#----------------------------------------------------------------------------
def save_image_grid(img, fname, drange, grid_size):
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
gw, gh = grid_size
_N, C, H, W = img.shape
img = img.reshape([gh, gw, C, H, W])
img = img.transpose(0, 3, 1, 4, 2)
img = img.reshape([gh * H, gw * W, C])
assert C in [1, 3]
if C == 1:
PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)
if C == 3:
PIL.Image.fromarray(img, 'RGB').save(fname)
# ----------------------------------------------------------------------------
def training_loop(
run_dir = '.', # Output directory.
training_set_kwargs = {}, # Options for training set.
data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.
G_kwargs = {}, # Options for generator network.
D_kwargs = {}, # Options for discriminator network.
G_opt_kwargs = {}, # Options for generator optimizer.
D_opt_kwargs = {}, # Options for discriminator optimizer.
augment_kwargs = None, # Options for augmentation pipeline. None = disable.
loss_kwargs = {}, # Options for loss function.
metrics = [], # Metrics to evaluate during training.
random_seed = 0, # Global random seed.
num_gpus = 1, # Number of GPUs participating in the training.
rank = 0, # Rank of the current process in [0, num_gpus[.
batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
batch_gpu = 4, # Number of samples processed at a time by one GPU.
ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
ema_rampup = 0.05, # EMA ramp-up coefficient. None = no rampup.
G_reg_interval = None, # How often to perform regularization for G? None = disable lazy regularization.
D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization.
augment_p = 0, # Initial value of augmentation probability.
ada_target = None, # ADA target value. None = fixed p.
ada_interval = 4, # How often to perform ADA adjustment?
ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
kimg_per_tick = 4, # Progress snapshot interval.
image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.
network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.
resume_pkl = None, # Network pickle to resume training from.
resume_kimg = 0, # First kimg to report when resuming training.
cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?
abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.
progress_fn = None, # Callback function for updating training progress. Called for all ranks.
generator_name = None, # Generator name used to get parameter normalizer
norm_type = 'none', # Normalization types
):
# Initialize.
start_time = time.time()
device = torch.device('cuda', rank)
np.random.seed(random_seed * num_gpus + rank)
torch.manual_seed(random_seed * num_gpus + rank)
torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.
torch.backends.cuda.matmul.allow_tf32 = False # Improves numerical accuracy.
torch.backends.cudnn.allow_tf32 = False # Improves numerical accuracy.
conv2d_gradfix.enabled = True # Improves training speed.
grid_sample_gradfix.enabled = True # Avoids errors with the augmentation pipe.
# Load training set.
if rank == 0:
print('Loading training set...')
training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
if rank == 0:
print()
print('Num images: ', len(training_set))
print('Image shape:', training_set.image_shape)
print('Label shape:', training_set.label_shape)
print('Batch Size:', batch_size)
print('# of GPU: ', num_gpus)
print('Batch per GPU:', batch_gpu)
print()
augment_pipe= None
# Construct networks.
if rank == 0:
print('Constructing networks...')
common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels)
G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
# Resume from existing pickle.
if (resume_pkl is not None) and (rank == 0):
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
resume_data = pickle.load(f)
for name, module in [('G', G)]:
misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
# Print network summary tables.
if rank == 0:
z = torch.empty([batch_gpu, G.z_dim], device=device)
c = torch.empty([batch_gpu, G.c_dim], device=device)
misc.print_module_summary(G, [z, c])
# Setup training phases.
if rank == 0:
print('Setting up training phases...')
# Distribute across GPUs.
if rank == 0:
print(f'Distributing across {num_gpus} GPUs...')
for module in [G]:
if module is not None and num_gpus > 1:
for param in misc.params_and_buffers(module):
torch.distributed.broadcast(param, src=0)
loss = dnnlib.util.construct_class_by_name(device=device, G=G, D=None, augment_pipe=augment_pipe, **loss_kwargs) # subclass of training.loss.Loss
phases = []
opt = dnnlib.util.construct_class_by_name(params=G.parameters(), **G_opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name='G', module=G, opt=opt)]
for phase in phases:
phase.start_event = None
phase.end_event = None
if rank == 0:
phase.start_event = torch.cuda.Event(enable_timing=True)
phase.end_event = torch.cuda.Event(enable_timing=True)
# initialize normalizer
assert norm_type in ['none', 'norm', 'std']
normalizer = get_normalizer(generator_name, norm_type, batch_size, device)
if rank == 0:
print(f'Normalization type: {norm_type}')
print(f'Normalizer: {normalizer}')
# Export sample images.
grid_size = None
grid_z = None
grid_c = None
if rank == 0:
print('Exporting sample images...')
grid_size, images, labels = setup_snapshot_image_grid(training_set=training_set)
save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)
grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu)
# grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)
grid_c = torch.from_numpy(labels).to(device)
if normalizer is not None:
normalizer_grid = get_normalizer(generator_name, norm_type, grid_c.shape[0], device)
grid_c = normalizer_grid.normalize(grid_c)
grid_c = grid_c.split(batch_gpu)
images = torch.cat([G(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
save_image_grid(images, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=grid_size)
# Initialize logs.
if rank == 0:
print('Initializing logs...')
stats_collector = training_stats.Collector(regex='.*')
stats_metrics = dict()
stats_jsonl = None
stats_tfevents = None
if rank == 0:
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
try:
import torch.utils.tensorboard as tensorboard
stats_tfevents = tensorboard.SummaryWriter(run_dir)
except ImportError as err:
print('Skipping tfevents export:', err)
# Train.
if rank == 0:
print(f'Training for {total_kimg} kimg...')
print()
cur_nimg = resume_kimg * 1000
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - start_time
batch_idx = 0
if progress_fn is not None:
progress_fn(0, total_kimg)
while True:
# Fetch training data.
with torch.autograd.profiler.record_function('data_fetch'):
phase_real_img, phase_real_c = next(training_set_iterator)
phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)
# phase_real_c = phase_real_c.to(device).split(batch_gpu)
phase_real_c = phase_real_c.to(device)
if normalizer is not None:
phase_real_c = normalizer.normalize(phase_real_c)
phase_real_c = phase_real_c.split(batch_gpu)
all_gen_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)
all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
# Execute training phases.
for phase, phase_gen_z in zip(phases, all_gen_z):
if phase.start_event is not None:
phase.start_event.record(torch.cuda.current_stream(device))
# Accumulate gradients.
phase.opt.zero_grad(set_to_none=True)
phase.module.requires_grad_(True)
for real_img, real_c, gen_z in zip(phase_real_img, phase_real_c, phase_gen_z):
loss.accumulate_gradients(real_img=real_img, real_c=real_c, gen_z=gen_z)
phase.module.requires_grad_(False)
# Update weights.
with torch.autograd.profiler.record_function(phase.name + '_opt'):
params = [param for param in phase.module.parameters() if param.grad is not None]
if len(params) > 0:
flat = torch.cat([param.grad.flatten() for param in params])
if num_gpus > 1:
torch.distributed.all_reduce(flat)
flat /= num_gpus
misc.nan_to_num(flat, nan=0, posinf=1e5, neginf=-1e5, out=flat)
grads = flat.split([param.numel() for param in params])
for param, grad in zip(params, grads):
param.grad = grad.reshape(param.shape)
phase.opt.step()
# Phase done.
if phase.end_event is not None:
phase.end_event.record(torch.cuda.current_stream(device))
# Update state.
cur_nimg += batch_size
batch_idx += 1
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
continue
# Print status line, accumulating the same information in training_stats.
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))
if rank == 0:
print(' '.join(fields))
# Check for abort.
if (not done) and (abort_fn is not None) and abort_fn():
done = True
if rank == 0:
print()
print('Aborting...')
# Save image snapshot.
if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
images = torch.cat([G(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1,1], grid_size=grid_size)
# Save network snapshot.
snapshot_pkl = None
snapshot_data = None
if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
snapshot_data = dict(G=G, training_set_kwargs=dict(training_set_kwargs))
for key, value in snapshot_data.items():
if isinstance(value, torch.nn.Module):
value = copy.deepcopy(value).eval().requires_grad_(False)
if num_gpus > 1:
misc.check_ddp_consistency(value, ignore_regex=r'.*\.[^.]+_(avg|ema)')
for param in misc.params_and_buffers(value):
torch.distributed.broadcast(param, src=0)
snapshot_data[key] = value.cpu()
del value # conserve memory
snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
if rank == 0:
with open(snapshot_pkl, 'wb') as f:
pickle.dump(snapshot_data, f)
# Evaluate metrics.
if (snapshot_data is not None) and (len(metrics) > 0):
if rank == 0:
print('Evaluating metrics...')
for metric in metrics:
result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
stats_metrics.update(result_dict.results)
del snapshot_data # conserve memory
# Collect statistics.
for phase in phases:
value = []
if (phase.start_event is not None) and (phase.end_event is not None):
phase.end_event.synchronize()
value = phase.start_event.elapsed_time(phase.end_event)
training_stats.report0('Timing/' + phase.name, value)
stats_collector.update()
stats_dict = stats_collector.as_dict()
# Update logs.
timestamp = time.time()
if stats_jsonl is not None:
fields = dict(stats_dict, timestamp=timestamp)
stats_jsonl.write(json.dumps(fields) + '\n')
stats_jsonl.flush()
if stats_tfevents is not None:
global_step = int(cur_nimg / 1e3)
walltime = timestamp - start_time
for name, value in stats_dict.items():
stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
for name, value in stats_metrics.items():
stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
stats_tfevents.flush()
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
# Update state.
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - tick_end_time
if done:
break
# Done.
if rank == 0:
print()
print('Exiting...')
#----------------------------------------------------------------------------
| 19,024 | 48.160207 | 168 | py |
DiffProxy | DiffProxy-main/stylegan/training/networks_stylegan2.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Network architectures from the paper
"Analyzing and Improving the Image Quality of StyleGAN".
Matches the original implementation of configs E-F by Karras et al. at
https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py"""
import numpy as np
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_resample
from torch_utils.ops import upfirdn2d
from torch_utils.ops import bias_act
from torch_utils.ops import fma
#----------------------------------------------------------------------------
@misc.profiled_function
def normalize_2nd_moment(x, dim=1, eps=1e-8):
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
#----------------------------------------------------------------------------
@misc.profiled_function
def modulated_conv2d(
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles, # Modulation coefficients of shape [batch_size, in_channels].
noise = None, # Optional noise tensor to add to the output activations.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
padding = 0, # Padding with respect to the upsampled image.
resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
demodulate = True, # Apply weight demodulation?
flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
):
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
if demodulate and noise is not None:
x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(batch_size)
misc.assert_shape(x, [batch_size, in_channels, None, None])
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class FullyConnectedLayer(torch.nn.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.activation = activation
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)
self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight.to(x.dtype) * self.weight_gain
b = self.bias
if b is not None:
b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
x = torch.addmm(b.unsqueeze(0), x, w.t())
else:
x = x.matmul(w.t())
x = bias_act.bias_act(x, b, act=self.activation)
return x
def extra_repr(self):
return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class Conv2dLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output to +-X, None = disable clamping.
channels_last = False, # Expect the input to have memory_format=channels_last?
trainable = True, # Update the weights of this layer during training?
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.activation = activation
self.up = up
self.down = down
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.act_gain = bias_act.activation_funcs[activation].def_gain
memory_format = torch.channels_last if channels_last else torch.contiguous_format
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)
bias = torch.zeros([out_channels]) if bias else None
if trainable:
self.weight = torch.nn.Parameter(weight)
self.bias = torch.nn.Parameter(bias) if bias is not None else None
else:
self.register_buffer('weight', weight)
if bias is not None:
self.register_buffer('bias', bias)
else:
self.bias = None
def forward(self, x, gain=1):
w = self.weight * self.weight_gain
b = self.bias.to(x.dtype) if self.bias is not None else None
flip_weight = (self.up == 1) # slightly faster
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp)
return x
def extra_repr(self):
return ' '.join([
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},',
f'up={self.up}, down={self.down}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class MappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers = 8, # Number of mapping layers.
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.998, # Decay for tracking the moving average of W during training, None = do not track.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
if embed_features is None:
embed_features = w_dim
if c_dim == 0:
embed_features = 0
if layer_features is None:
layer_features = w_dim
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
if c_dim > 0:
self.embed = FullyConnectedLayer(c_dim, embed_features)
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function('input'):
if self.z_dim > 0:
misc.assert_shape(z, [None, self.z_dim])
x = normalize_2nd_moment(z.to(torch.float32))
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, y], dim=1) if x is not None else y
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f'fc{idx}')
x = layer(x)
# Update moving average of W.
if update_emas and self.w_avg_beta is not None:
with torch.autograd.profiler.record_function('update_w_avg'):
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function('broadcast'):
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function('truncate'):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
def extra_repr(self):
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this layer.
kernel_size = 3, # Convolution kernel size.
up = 1, # Integer upsampling factor.
use_noise = False, # Enable noise input?
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
channels_last = False, # Use channels_last format for the weights?
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.w_dim = w_dim
self.resolution = resolution
self.up = up
self.use_noise = use_noise
self.activation = activation
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.act_gain = bias_act.activation_funcs[activation].def_gain
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
if use_noise:
self.register_buffer('noise_const', torch.randn([resolution, resolution]))
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
assert noise_mode in ['random', 'const', 'none']
in_resolution = self.resolution // self.up
misc.assert_shape(x, [None, self.in_channels, in_resolution, in_resolution])
styles = self.affine(w)
noise = None
if self.use_noise and noise_mode == 'random':
noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
if self.use_noise and noise_mode == 'const':
noise = self.noise_const * self.noise_strength
flip_weight = (self.up == 1) # slightly faster
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
return x
def extra_repr(self):
return ' '.join([
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},',
f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class ToRGBLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.w_dim = w_dim
self.conv_clamp = conv_clamp
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
def forward(self, x, w, fused_modconv=True):
styles = self.affine(w) * self.weight_gain
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv)
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
return x
def extra_repr(self):
return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training.
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.fused_modconv_default = fused_modconv_default
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
if in_channels == 0:
self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
if in_channels != 0:
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
if is_last or architecture == 'skip':
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
self.num_torgb += 1
if in_channels != 0 and architecture == 'resnet':
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):
_ = update_emas # unused
misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim])
w_iter = iter(ws.unbind(dim=1))
if ws.device.type != 'cuda':
force_fp32 = True
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
if fused_modconv is None:
fused_modconv = self.fused_modconv_default
if fused_modconv == 'inference_only':
fused_modconv = (not self.training)
# Input.
if self.in_channels == 0:
x = self.const.to(dtype=dtype, memory_format=memory_format)
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
else:
misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
if self.in_channels == 0:
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
elif self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
x = y.add_(x)
else:
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
# ToRGB.
if img is not None:
misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
img = upfirdn2d.upsample2d(img, self.resample_filter)
if self.is_last or self.architecture == 'skip':
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)
img = img.add_(y) if img is not None else y
assert x.dtype == dtype
assert img is None or img.dtype == torch.float32
return x, img
def extra_repr(self):
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisNetwork(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
**block_kwargs, # Arguments for SynthesisBlock.
):
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
super().__init__()
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.num_fp16_res = num_fp16_res
self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.num_ws = 0
for res in self.block_resolutions:
in_channels = channels_dict[res // 2] if res > 4 else 0
out_channels = channels_dict[res]
use_fp16 = (res >= fp16_resolution)
is_last = (res == self.img_resolution)
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs)
self.num_ws += block.num_conv
if is_last:
self.num_ws += block.num_torgb
setattr(self, f'b{res}', block)
def forward(self, ws, **block_kwargs):
block_ws = []
with torch.autograd.profiler.record_function('split_ws'):
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
ws = ws.to(torch.float32)
w_idx = 0
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
w_idx += block.num_conv
x = img = None
for res, cur_ws in zip(self.block_resolutions, block_ws):
block = getattr(self, f'b{res}')
x, img = block(x, img, cur_ws, **block_kwargs)
return img
def extra_repr(self):
return ' '.join([
f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
f'num_fp16_res={self.num_fp16_res:d}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class Generator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
mapping_kwargs = {}, # Arguments for MappingNetwork.
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
tmp_channels, # Number of intermediate channels.
out_channels, # Number of output channels.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
first_layer_idx, # Index of the first layer.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
freeze_layers = 0, # Freeze-D: Number of layers to freeze.
):
assert in_channels in [0, tmp_channels]
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.resolution = resolution
self.img_channels = img_channels
self.first_layer_idx = first_layer_idx
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_layers = 0
def trainable_gen():
while True:
layer_idx = self.first_layer_idx + self.num_layers
trainable = (layer_idx >= freeze_layers)
self.num_layers += 1
yield trainable
trainable_iter = trainable_gen()
if in_channels == 0 or architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
if architecture == 'resnet':
self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, force_fp32=False):
if (x if x is not None else img).device.type != 'cuda':
force_fp32 = True
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
# Input.
if x is not None:
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution])
x = x.to(dtype=dtype, memory_format=memory_format)
# FromRGB.
if self.in_channels == 0 or self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
y = self.fromrgb(img)
x = x + y if x is not None else y
img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None
# Main layers.
if self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x)
x = self.conv1(x, gain=np.sqrt(0.5))
x = y.add_(x)
else:
x = self.conv0(x)
x = self.conv1(x)
assert x.dtype == dtype
return x, img
def extra_repr(self):
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class MinibatchStdLayer(torch.nn.Module):
def __init__(self, group_size, num_channels=1):
super().__init__()
self.group_size = group_size
self.num_channels = num_channels
def forward(self, x):
N, C, H, W = x.shape
with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N
F = self.num_channels
c = C // F
y = x.reshape(G, -1, F, c, H, W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group.
y = y.square().mean(dim=0) # [nFcHW] Calc variance over group.
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
y = y.mean(dim=[2,3,4]) # [nF] Take average over channels and pixels.
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels.
x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels.
return x
def extra_repr(self):
return f'group_size={self.group_size}, num_channels={self.num_channels:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorEpilogue(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.cmap_dim = cmap_dim
self.resolution = resolution
self.img_channels = img_channels
self.architecture = architecture
if architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation)
self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp)
self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation)
self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim)
def forward(self, x, img, cmap, force_fp32=False):
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW]
_ = force_fp32 # unused
dtype = torch.float32
memory_format = torch.contiguous_format
# FromRGB.
x = x.to(dtype=dtype, memory_format=memory_format)
if self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
x = x + self.fromrgb(img)
# Main layers.
if self.mbstd is not None:
x = self.mbstd(x)
x = self.conv(x)
x = self.fc(x.flatten(1))
x = self.out(x)
# Conditioning.
if self.cmap_dim > 0:
misc.assert_shape(cmap, [None, self.cmap_dim])
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
assert x.dtype == dtype
return x
def extra_repr(self):
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class Discriminator(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
def forward(self, img, c, update_emas=False, **block_kwargs):
_ = update_emas # unused
x = None
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x = self.b4(x, img, cmap)
return x
def extra_repr(self):
return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
#----------------------------------------------------------------------------
| 40,302 | 49.695597 | 164 | py |
DiffProxy | DiffProxy-main/stylegan/training/networks_stylegan3.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generator architecture from the paper
"Alias-Free Generative Adversarial Networks"."""
import numpy as np
import scipy.signal
import scipy.optimize
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import filtered_lrelu
from torch_utils.ops import bias_act
#----------------------------------------------------------------------------
@misc.profiled_function
def modulated_conv2d(
x, # Input tensor: [batch_size, in_channels, in_height, in_width]
w, # Weight tensor: [out_channels, in_channels, kernel_height, kernel_width]
s, # Style tensor: [batch_size, in_channels]
demodulate = True, # Apply weight demodulation?
padding = 0, # Padding: int or [padH, padW]
input_gain = None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels]
):
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(x.shape[0])
out_channels, in_channels, kh, kw = w.shape
misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(s, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs.
if demodulate:
w = w * w.square().mean([1,2,3], keepdim=True).rsqrt()
s = s * s.square().mean().rsqrt()
# Modulate weights.
w = w.unsqueeze(0) # [NOIkk]
w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Demodulate weights.
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Apply input scaling.
if input_gain is not None:
input_gain = input_gain.expand(batch_size, in_channels) # [NI]
w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Execute as one fused op using grouped convolution.
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_gradfix.conv2d(input=x, weight=w.to(x.dtype), padding=padding, groups=batch_size)
x = x.reshape(batch_size, -1, *x.shape[2:])
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class FullyConnectedLayer(torch.nn.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
bias = True, # Apply additive bias before the activation function?
lr_multiplier = 1, # Learning rate multiplier.
weight_init = 1, # Initial standard deviation of the weight tensor.
bias_init = 0, # Initial value of the additive bias.
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.activation = activation
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) * (weight_init / lr_multiplier))
bias_init = np.broadcast_to(np.asarray(bias_init, dtype=np.float32), [out_features])
self.bias = torch.nn.Parameter(torch.from_numpy(bias_init / lr_multiplier)) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight.to(x.dtype) * self.weight_gain
b = self.bias
if b is not None:
b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
x = torch.addmm(b.unsqueeze(0), x, w.t())
else:
x = x.matmul(w.t())
x = bias_act.bias_act(x, b, act=self.activation)
return x
def extra_repr(self):
return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class MappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality, 0 = no labels.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output.
num_layers = 2, # Number of mapping layers.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.998, # Decay for tracking the moving average of W during training.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
# Construct layers.
self.embed = FullyConnectedLayer(self.c_dim, self.w_dim) if self.c_dim > 0 else None
features = [self.z_dim + (self.w_dim if self.c_dim > 0 else 0)] + [self.w_dim] * self.num_layers
for idx, in_features, out_features in zip(range(num_layers), features[:-1], features[1:]):
layer = FullyConnectedLayer(in_features, out_features, activation='lrelu', lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
misc.assert_shape(z, [None, self.z_dim])
if truncation_cutoff is None:
truncation_cutoff = self.num_ws
# Embed, normalize, and concatenate inputs.
x = z.to(torch.float32)
x = x * (x.square().mean(1, keepdim=True) + 1e-8).rsqrt()
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
y = self.embed(c.to(torch.float32))
y = y * (y.square().mean(1, keepdim=True) + 1e-8).rsqrt()
x = torch.cat([x, y], dim=1) if x is not None else y
# Execute layers.
for idx in range(self.num_layers):
x = getattr(self, f'fc{idx}')(x)
# Update moving average of W.
if update_emas:
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast and apply truncation.
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
if truncation_psi != 1:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
def extra_repr(self):
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisInput(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
channels, # Number of output channels.
size, # Output spatial size: int or [width, height].
sampling_rate, # Output sampling rate.
bandwidth, # Output bandwidth.
):
super().__init__()
self.w_dim = w_dim
self.channels = channels
self.size = np.broadcast_to(np.asarray(size), [2])
self.sampling_rate = sampling_rate
self.bandwidth = bandwidth
# Draw random frequencies from uniform 2D disc.
freqs = torch.randn([self.channels, 2])
radii = freqs.square().sum(dim=1, keepdim=True).sqrt()
freqs /= radii * radii.square().exp().pow(0.25)
freqs *= bandwidth
phases = torch.rand([self.channels]) - 0.5
# Setup parameters and buffers.
self.weight = torch.nn.Parameter(torch.randn([self.channels, self.channels]))
self.affine = FullyConnectedLayer(w_dim, 4, weight_init=0, bias_init=[1,0,0,0])
self.register_buffer('transform', torch.eye(3, 3)) # User-specified inverse transform wrt. resulting image.
self.register_buffer('freqs', freqs)
self.register_buffer('phases', phases)
def forward(self, w):
# Introduce batch dimension.
transforms = self.transform.unsqueeze(0) # [batch, row, col]
freqs = self.freqs.unsqueeze(0) # [batch, channel, xy]
phases = self.phases.unsqueeze(0) # [batch, channel]
# Apply learned transformation.
t = self.affine(w) # t = (r_c, r_s, t_x, t_y)
t = t / t[:, :2].norm(dim=1, keepdim=True) # t' = (r'_c, r'_s, t'_x, t'_y)
m_r = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse rotation wrt. resulting image.
m_r[:, 0, 0] = t[:, 0] # r'_c
m_r[:, 0, 1] = -t[:, 1] # r'_s
m_r[:, 1, 0] = t[:, 1] # r'_s
m_r[:, 1, 1] = t[:, 0] # r'_c
m_t = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse translation wrt. resulting image.
m_t[:, 0, 2] = -t[:, 2] # t'_x
m_t[:, 1, 2] = -t[:, 3] # t'_y
transforms = m_r @ m_t @ transforms # First rotate resulting image, then translate, and finally apply user-specified transform.
# Transform frequencies.
phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2)
freqs = freqs @ transforms[:, :2, :2]
# Dampen out-of-band frequencies that may occur due to the user-specified transform.
amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) / (self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1)
# Construct sampling grid.
theta = torch.eye(2, 3, device=w.device)
theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate
theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate
grids = torch.nn.functional.affine_grid(theta.unsqueeze(0), [1, 1, self.size[1], self.size[0]], align_corners=False)
# Compute Fourier features.
x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2)).squeeze(3) # [batch, height, width, channel]
x = x + phases.unsqueeze(1).unsqueeze(2)
x = torch.sin(x * (np.pi * 2))
x = x * amplitudes.unsqueeze(1).unsqueeze(2)
# Apply trainable mapping.
weight = self.weight / np.sqrt(self.channels)
x = x @ weight.t()
# Ensure correct shape.
x = x.permute(0, 3, 1, 2) # [batch, channel, height, width]
misc.assert_shape(x, [w.shape[0], self.channels, int(self.size[1]), int(self.size[0])])
return x
def extra_repr(self):
return '\n'.join([
f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},',
f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisLayer(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
is_torgb, # Is this the final ToRGB layer?
is_critically_sampled, # Does this layer use critical sampling?
use_fp16, # Does this layer use FP16?
# Input & output specifications.
in_channels, # Number of input channels.
out_channels, # Number of output channels.
in_size, # Input spatial size: int or [width, height].
out_size, # Output spatial size: int or [width, height].
in_sampling_rate, # Input sampling rate (s).
out_sampling_rate, # Output sampling rate (s).
in_cutoff, # Input cutoff frequency (f_c).
out_cutoff, # Output cutoff frequency (f_c).
in_half_width, # Input transition band half-width (f_h).
out_half_width, # Output Transition band half-width (f_h).
# Hyperparameters.
conv_kernel = 3, # Convolution kernel size. Ignored for final the ToRGB layer.
filter_size = 6, # Low-pass filter size relative to the lower resolution when up/downsampling.
lrelu_upsampling = 2, # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer.
use_radial_filters = False, # Use radially symmetric downsampling filter? Ignored for critically sampled layers.
conv_clamp = 256, # Clamp the output to [-X, +X], None = disable clamping.
magnitude_ema_beta = 0.999, # Decay rate for the moving average of input magnitudes.
):
super().__init__()
self.w_dim = w_dim
self.is_torgb = is_torgb
self.is_critically_sampled = is_critically_sampled
self.use_fp16 = use_fp16
self.in_channels = in_channels
self.out_channels = out_channels
self.in_size = np.broadcast_to(np.asarray(in_size), [2])
self.out_size = np.broadcast_to(np.asarray(out_size), [2])
self.in_sampling_rate = in_sampling_rate
self.out_sampling_rate = out_sampling_rate
self.tmp_sampling_rate = max(in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling)
self.in_cutoff = in_cutoff
self.out_cutoff = out_cutoff
self.in_half_width = in_half_width
self.out_half_width = out_half_width
self.conv_kernel = 1 if is_torgb else conv_kernel
self.conv_clamp = conv_clamp
self.magnitude_ema_beta = magnitude_ema_beta
# Setup parameters and buffers.
self.affine = FullyConnectedLayer(self.w_dim, self.in_channels, bias_init=1)
self.weight = torch.nn.Parameter(torch.randn([self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel]))
self.bias = torch.nn.Parameter(torch.zeros([self.out_channels]))
self.register_buffer('magnitude_ema', torch.ones([]))
# Design upsampling filter.
self.up_factor = int(np.rint(self.tmp_sampling_rate / self.in_sampling_rate))
assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate
self.up_taps = filter_size * self.up_factor if self.up_factor > 1 and not self.is_torgb else 1
self.register_buffer('up_filter', self.design_lowpass_filter(
numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate))
# Design downsampling filter.
self.down_factor = int(np.rint(self.tmp_sampling_rate / self.out_sampling_rate))
assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate
self.down_taps = filter_size * self.down_factor if self.down_factor > 1 and not self.is_torgb else 1
self.down_radial = use_radial_filters and not self.is_critically_sampled
self.register_buffer('down_filter', self.design_lowpass_filter(
numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial))
# Compute padding.
pad_total = (self.out_size - 1) * self.down_factor + 1 # Desired output size before downsampling.
pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor # Input size after upsampling.
pad_total += self.up_taps + self.down_taps - 2 # Size reduction caused by the filters.
pad_lo = (pad_total + self.up_factor) // 2 # Shift sample locations according to the symmetric interpretation (Appendix C.3).
pad_hi = pad_total - pad_lo
self.padding = [int(pad_lo[0]), int(pad_hi[0]), int(pad_lo[1]), int(pad_hi[1])]
def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False):
assert noise_mode in ['random', 'const', 'none'] # unused
misc.assert_shape(x, [None, self.in_channels, int(self.in_size[1]), int(self.in_size[0])])
misc.assert_shape(w, [x.shape[0], self.w_dim])
# Track input magnitude.
if update_emas:
with torch.autograd.profiler.record_function('update_magnitude_ema'):
magnitude_cur = x.detach().to(torch.float32).square().mean()
self.magnitude_ema.copy_(magnitude_cur.lerp(self.magnitude_ema, self.magnitude_ema_beta))
input_gain = self.magnitude_ema.rsqrt()
# Execute affine layer.
styles = self.affine(w)
if self.is_torgb:
weight_gain = 1 / np.sqrt(self.in_channels * (self.conv_kernel ** 2))
styles = styles * weight_gain
# Execute modulated conv2d.
dtype = torch.float16 if (self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32
x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles,
padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain)
# Execute bias, filtered leaky ReLU, and clamping.
gain = 1 if self.is_torgb else np.sqrt(2)
slope = 1 if self.is_torgb else 0.2
x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype),
up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp)
# Ensure correct shape and dtype.
misc.assert_shape(x, [None, self.out_channels, int(self.out_size[1]), int(self.out_size[0])])
assert x.dtype == dtype
return x
@staticmethod
def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False):
assert numtaps >= 1
# Identity filter.
if numtaps == 1:
return None
# Separable Kaiser low-pass filter.
if not radial:
f = scipy.signal.firwin(numtaps=numtaps, cutoff=cutoff, width=width, fs=fs)
return torch.as_tensor(f, dtype=torch.float32)
# Radially symmetric jinc-based filter.
x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs
r = np.hypot(*np.meshgrid(x, x))
f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r)
beta = scipy.signal.kaiser_beta(scipy.signal.kaiser_atten(numtaps, width / (fs / 2)))
w = np.kaiser(numtaps, beta)
f *= np.outer(w, w)
f /= np.sum(f)
return torch.as_tensor(f, dtype=torch.float32)
def extra_repr(self):
return '\n'.join([
f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},',
f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},',
f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},',
f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},',
f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},',
f'in_size={list(self.in_size)}, out_size={list(self.out_size)},',
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisNetwork(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_layers = 14, # Total number of layers, excluding Fourier features and ToRGB.
num_critical = 2, # Number of critically sampled layers at the end.
first_cutoff = 2, # Cutoff frequency of the first layer (f_{c,0}).
first_stopband = 2**2.1, # Minimum stopband of the first layer (f_{t,0}).
last_stopband_rel = 2**0.3, # Minimum stopband of the last layer, expressed relative to the cutoff.
margin_size = 10, # Number of additional pixels outside the image.
output_scale = 0.25, # Scale factor for the output image.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
**layer_kwargs, # Arguments for SynthesisLayer.
):
super().__init__()
self.w_dim = w_dim
self.num_ws = num_layers + 2
self.img_resolution = img_resolution
self.img_channels = img_channels
self.num_layers = num_layers
self.num_critical = num_critical
self.margin_size = margin_size
self.output_scale = output_scale
self.num_fp16_res = num_fp16_res
# Geometric progression of layer cutoffs and min. stopbands.
last_cutoff = self.img_resolution / 2 # f_{c,N}
last_stopband = last_cutoff * last_stopband_rel # f_{t,N}
exponents = np.minimum(np.arange(self.num_layers + 1) / (self.num_layers - self.num_critical), 1)
cutoffs = first_cutoff * (last_cutoff / first_cutoff) ** exponents # f_c[i]
stopbands = first_stopband * (last_stopband / first_stopband) ** exponents # f_t[i]
# Compute remaining layer parameters.
sampling_rates = np.exp2(np.ceil(np.log2(np.minimum(stopbands * 2, self.img_resolution)))) # s[i]
half_widths = np.maximum(stopbands, sampling_rates / 2) - cutoffs # f_h[i]
sizes = sampling_rates + self.margin_size * 2
sizes[-2:] = self.img_resolution
channels = np.rint(np.minimum((channel_base / 2) / cutoffs, channel_max))
channels[-1] = self.img_channels
# Construct layers.
self.input = SynthesisInput(
w_dim=self.w_dim, channels=int(channels[0]), size=int(sizes[0]),
sampling_rate=sampling_rates[0], bandwidth=cutoffs[0])
self.layer_names = []
for idx in range(self.num_layers + 1):
prev = max(idx - 1, 0)
is_torgb = (idx == self.num_layers)
is_critically_sampled = (idx >= self.num_layers - self.num_critical)
use_fp16 = (sampling_rates[idx] * (2 ** self.num_fp16_res) > self.img_resolution)
layer = SynthesisLayer(
w_dim=self.w_dim, is_torgb=is_torgb, is_critically_sampled=is_critically_sampled, use_fp16=use_fp16,
in_channels=int(channels[prev]), out_channels= int(channels[idx]),
in_size=int(sizes[prev]), out_size=int(sizes[idx]),
in_sampling_rate=int(sampling_rates[prev]), out_sampling_rate=int(sampling_rates[idx]),
in_cutoff=cutoffs[prev], out_cutoff=cutoffs[idx],
in_half_width=half_widths[prev], out_half_width=half_widths[idx],
**layer_kwargs)
name = f'L{idx}_{layer.out_size[0]}_{layer.out_channels}'
setattr(self, name, layer)
self.layer_names.append(name)
def forward(self, ws, **layer_kwargs):
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
ws = ws.to(torch.float32).unbind(dim=1)
# Execute layers.
x = self.input(ws[0])
for name, w in zip(self.layer_names, ws[1:]):
x = getattr(self, name)(x, w, **layer_kwargs)
if self.output_scale != 1:
x = x * self.output_scale
# Ensure correct shape and dtype.
misc.assert_shape(x, [None, self.img_channels, self.img_resolution, self.img_resolution])
x = x.to(torch.float32)
return x
def extra_repr(self):
return '\n'.join([
f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
f'num_layers={self.num_layers:d}, num_critical={self.num_critical:d},',
f'margin_size={self.margin_size:d}, num_fp16_res={self.num_fp16_res:d}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class Generator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
mapping_kwargs = {}, # Arguments for MappingNetwork.
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
return img
#----------------------------------------------------------------------------
| 26,208 | 49.792636 | 141 | py |
DiffProxy | DiffProxy-main/stylegan/torch_utils/custom_ops.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import glob
import hashlib
import importlib
import os
import re
import shutil
import uuid
import torch
import torch.utils.cpp_extension
from torch.utils.file_baton import FileBaton
#----------------------------------------------------------------------------
# Global options.
verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
patterns = [
'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',
]
for pattern in patterns:
matches = sorted(glob.glob(pattern))
if len(matches):
return matches[-1]
return None
#----------------------------------------------------------------------------
def _get_mangled_gpu_name():
name = torch.cuda.get_device_name().lower()
out = []
for c in name:
if re.match('[a-z0-9_-]+', c):
out.append(c)
else:
out.append('-')
return ''.join(out)
#----------------------------------------------------------------------------
# Main entry point for compiling and loading C++/CUDA plugins.
_cached_plugins = dict()
def get_plugin(module_name, sources, headers=None, source_dir=None, **build_kwargs):
assert verbosity in ['none', 'brief', 'full']
if headers is None:
headers = []
if source_dir is not None:
sources = [os.path.join(source_dir, fname) for fname in sources]
headers = [os.path.join(source_dir, fname) for fname in headers]
# Already cached?
if module_name in _cached_plugins:
return _cached_plugins[module_name]
# Print status.
if verbosity == 'full':
print(f'Setting up PyTorch plugin "{module_name}"...')
elif verbosity == 'brief':
print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
verbose_build = (verbosity == 'full')
# Compile and load.
try: # pylint: disable=too-many-nested-blocks
# Make sure we can find the necessary compiler binaries.
if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
os.environ['PATH'] += ';' + compiler_bindir
# Some containers set TORCH_CUDA_ARCH_LIST to a list that can either
# break the build or unnecessarily restrict what's available to nvcc.
# Unset it to let nvcc decide based on what's available on the
# machine.
os.environ['TORCH_CUDA_ARCH_LIST'] = ''
# Incremental build md5sum trickery. Copies all the input source files
# into a cached build directory under a combined md5 digest of the input
# source files. Copying is done only if the combined digest has changed.
# This keeps input file timestamps and filenames the same as in previous
# extension builds, allowing for fast incremental rebuilds.
#
# This optimization is done only in case all the source files reside in
# a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
# environment variable is set (we take this as a signal that the user
# actually cares about this.)
#
# EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work
# around the *.cu dependency bug in ninja config.
#
all_source_files = sorted(sources + headers)
all_source_dirs = set(os.path.dirname(fname) for fname in all_source_files)
if len(all_source_dirs) == 1: # and ('TORCH_EXTENSIONS_DIR' in os.environ):
# Compute combined hash digest for all source files.
hash_md5 = hashlib.md5()
for src in all_source_files:
with open(src, 'rb') as f:
hash_md5.update(f.read())
# Select cached build directory name.
source_digest = hash_md5.hexdigest()
build_top_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
cached_build_dir = os.path.join(build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}')
if not os.path.isdir(cached_build_dir):
tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}'
os.makedirs(tmpdir)
for src in all_source_files:
shutil.copyfile(src, os.path.join(tmpdir, os.path.basename(src)))
try:
os.replace(tmpdir, cached_build_dir) # atomic
except OSError:
# source directory already exists, delete tmpdir and its contents.
shutil.rmtree(tmpdir)
if not os.path.isdir(cached_build_dir): raise
# Compile.
cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources]
torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir,
verbose=verbose_build, sources=cached_sources, **build_kwargs)
else:
torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
# Load.
module = importlib.import_module(module_name)
except:
if verbosity == 'brief':
print('Failed!')
raise
# Print status and add to cache dict.
if verbosity == 'full':
print(f'Done setting up PyTorch plugin "{module_name}".')
elif verbosity == 'brief':
print('Done.')
_cached_plugins[module_name] = module
return module
#----------------------------------------------------------------------------
| 6,666 | 41.196203 | 146 | py |
DiffProxy | DiffProxy-main/stylegan/torch_utils/training_stats.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Facilities for reporting and collecting training statistics across
multiple processes and devices. The interface is designed to minimize
synchronization overhead as well as the amount of boilerplate in user
code."""
import re
import numpy as np
import torch
import dnnlib
from . import misc
#----------------------------------------------------------------------------
_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares]
_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction.
_counter_dtype = torch.float64 # Data type to use for the internal counters.
_rank = 0 # Rank of the current process.
_sync_device = None # Device to use for multiprocess communication. None = single-process.
_sync_called = False # Has _sync() been called yet?
_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor
_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor
#----------------------------------------------------------------------------
def init_multiprocessing(rank, sync_device):
r"""Initializes `torch_utils.training_stats` for collecting statistics
across multiple processes.
This function must be called after
`torch.distributed.init_process_group()` and before `Collector.update()`.
The call is not necessary if multi-process collection is not needed.
Args:
rank: Rank of the current process.
sync_device: PyTorch device to use for inter-process
communication, or None to disable multi-process
collection. Typically `torch.device('cuda', rank)`.
"""
global _rank, _sync_device
assert not _sync_called
_rank = rank
_sync_device = sync_device
#----------------------------------------------------------------------------
@misc.profiled_function
def report(name, value):
r"""Broadcasts the given set of scalars to all interested instances of
`Collector`, across device and process boundaries.
This function is expected to be extremely cheap and can be safely
called from anywhere in the training loop, loss function, or inside a
`torch.nn.Module`.
Warning: The current implementation expects the set of unique names to
be consistent across processes. Please make sure that `report()` is
called at least once for each unique name by each process, and in the
same order. If a given process has no scalars to broadcast, it can do
`report(name, [])` (empty list).
Args:
name: Arbitrary string specifying the name of the statistic.
Averages are accumulated separately for each unique name.
value: Arbitrary set of scalars. Can be a list, tuple,
NumPy array, PyTorch tensor, or Python scalar.
Returns:
The same `value` that was passed in.
"""
if name not in _counters:
_counters[name] = dict()
elems = torch.as_tensor(value)
if elems.numel() == 0:
return value
elems = elems.detach().flatten().to(_reduce_dtype)
moments = torch.stack([
torch.ones_like(elems).sum(),
elems.sum(),
elems.square().sum(),
])
assert moments.ndim == 1 and moments.shape[0] == _num_moments
moments = moments.to(_counter_dtype)
device = moments.device
if device not in _counters[name]:
_counters[name][device] = torch.zeros_like(moments)
_counters[name][device].add_(moments)
return value
#----------------------------------------------------------------------------
def report0(name, value):
r"""Broadcasts the given set of scalars by the first process (`rank = 0`),
but ignores any scalars provided by the other processes.
See `report()` for further details.
"""
report(name, value if _rank == 0 else [])
return value
#----------------------------------------------------------------------------
class Collector:
r"""Collects the scalars broadcasted by `report()` and `report0()` and
computes their long-term averages (mean and standard deviation) over
user-defined periods of time.
The averages are first collected into internal counters that are not
directly visible to the user. They are then copied to the user-visible
state as a result of calling `update()` and can then be queried using
`mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the
internal counters for the next round, so that the user-visible state
effectively reflects averages collected between the last two calls to
`update()`.
Args:
regex: Regular expression defining which statistics to
collect. The default is to collect everything.
keep_previous: Whether to retain the previous averages if no
scalars were collected on a given round
(default: True).
"""
def __init__(self, regex='.*', keep_previous=True):
self._regex = re.compile(regex)
self._keep_previous = keep_previous
self._cumulative = dict()
self._moments = dict()
self.update()
self._moments.clear()
def names(self):
r"""Returns the names of all statistics broadcasted so far that
match the regular expression specified at construction time.
"""
return [name for name in _counters if self._regex.fullmatch(name)]
def update(self):
r"""Copies current values of the internal counters to the
user-visible state and resets them for the next round.
If `keep_previous=True` was specified at construction time, the
operation is skipped for statistics that have received no scalars
since the last update, retaining their previous averages.
This method performs a number of GPU-to-CPU transfers and one
`torch.distributed.all_reduce()`. It is intended to be called
periodically in the main training loop, typically once every
N training steps.
"""
if not self._keep_previous:
self._moments.clear()
for name, cumulative in _sync(self.names()):
if name not in self._cumulative:
self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
delta = cumulative - self._cumulative[name]
self._cumulative[name].copy_(cumulative)
if float(delta[0]) != 0:
self._moments[name] = delta
def _get_delta(self, name):
r"""Returns the raw moments that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
assert self._regex.fullmatch(name)
if name not in self._moments:
self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
return self._moments[name]
def num(self, name):
r"""Returns the number of scalars that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
delta = self._get_delta(name)
return int(delta[0])
def mean(self, name):
r"""Returns the mean of the scalars that were accumulated for the
given statistic between the last two calls to `update()`, or NaN if
no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0:
return float('nan')
return float(delta[1] / delta[0])
def std(self, name):
r"""Returns the standard deviation of the scalars that were
accumulated for the given statistic between the last two calls to
`update()`, or NaN if no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0 or not np.isfinite(float(delta[1])):
return float('nan')
if int(delta[0]) == 1:
return float(0)
mean = float(delta[1] / delta[0])
raw_var = float(delta[2] / delta[0])
return np.sqrt(max(raw_var - np.square(mean), 0))
def as_dict(self):
r"""Returns the averages accumulated between the last two calls to
`update()` as an `dnnlib.EasyDict`. The contents are as follows:
dnnlib.EasyDict(
NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),
...
)
"""
stats = dnnlib.EasyDict()
for name in self.names():
stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name))
return stats
def __getitem__(self, name):
r"""Convenience getter.
`collector[name]` is a synonym for `collector.mean(name)`.
"""
return self.mean(name)
#----------------------------------------------------------------------------
def _sync(names):
r"""Synchronize the global cumulative counters across devices and
processes. Called internally by `Collector.update()`.
"""
if len(names) == 0:
return []
global _sync_called
_sync_called = True
# Collect deltas within current rank.
deltas = []
device = _sync_device if _sync_device is not None else torch.device('cpu')
for name in names:
delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device)
for counter in _counters[name].values():
delta.add_(counter.to(device))
counter.copy_(torch.zeros_like(counter))
deltas.append(delta)
deltas = torch.stack(deltas)
# Sum deltas across ranks.
if _sync_device is not None:
torch.distributed.all_reduce(deltas)
# Update cumulative values.
deltas = deltas.cpu()
for idx, name in enumerate(names):
if name not in _cumulative:
_cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
_cumulative[name].add_(deltas[idx])
# Return name-value pairs.
return [(name, _cumulative[name]) for name in names]
#----------------------------------------------------------------------------
| 10,720 | 38.855019 | 118 | py |
DiffProxy | DiffProxy-main/stylegan/torch_utils/persistence.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Facilities for pickling Python code alongside other data.
The pickled code is automatically imported into a separate Python module
during unpickling. This way, any previously exported pickles will remain
usable even if the original code is no longer available, or if the current
version of the code is not consistent with what was originally pickled."""
import sys
import pickle
import io
import inspect
import copy
import uuid
import types
import dnnlib
#----------------------------------------------------------------------------
_version = 6 # internal version number
_decorators = set() # {decorator_class, ...}
_import_hooks = [] # [hook_function, ...]
_module_to_src_dict = dict() # {module: src, ...}
_src_to_module_dict = dict() # {src: module, ...}
#----------------------------------------------------------------------------
def persistent_class(orig_class):
r"""Class decorator that extends a given class to save its source code
when pickled.
Example:
from torch_utils import persistence
@persistence.persistent_class
class MyNetwork(torch.nn.Module):
def __init__(self, num_inputs, num_outputs):
super().__init__()
self.fc = MyLayer(num_inputs, num_outputs)
...
@persistence.persistent_class
class MyLayer(torch.nn.Module):
...
When pickled, any instance of `MyNetwork` and `MyLayer` will save its
source code alongside other internal state (e.g., parameters, buffers,
and submodules). This way, any previously exported pickle will remain
usable even if the class definitions have been modified or are no
longer available.
The decorator saves the source code of the entire Python module
containing the decorated class. It does *not* save the source code of
any imported modules. Thus, the imported modules must be available
during unpickling, also including `torch_utils.persistence` itself.
It is ok to call functions defined in the same module from the
decorated class. However, if the decorated class depends on other
classes defined in the same module, they must be decorated as well.
This is illustrated in the above example in the case of `MyLayer`.
It is also possible to employ the decorator just-in-time before
calling the constructor. For example:
cls = MyLayer
if want_to_make_it_persistent:
cls = persistence.persistent_class(cls)
layer = cls(num_inputs, num_outputs)
As an additional feature, the decorator also keeps track of the
arguments that were used to construct each instance of the decorated
class. The arguments can be queried via `obj.init_args` and
`obj.init_kwargs`, and they are automatically pickled alongside other
object state. A typical use case is to first unpickle a previous
instance of a persistent class, and then upgrade it to use the latest
version of the source code:
with open('old_pickle.pkl', 'rb') as f:
old_net = pickle.load(f)
new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs)
misc.copy_params_and_buffers(old_net, new_net, require_all=True)
"""
assert isinstance(orig_class, type)
if is_persistent(orig_class):
return orig_class
assert orig_class.__module__ in sys.modules
orig_module = sys.modules[orig_class.__module__]
orig_module_src = _module_to_src(orig_module)
class Decorator(orig_class):
_orig_module_src = orig_module_src
_orig_class_name = orig_class.__name__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_args = copy.deepcopy(args)
self._init_kwargs = copy.deepcopy(kwargs)
assert orig_class.__name__ in orig_module.__dict__
_check_pickleable(self.__reduce__())
@property
def init_args(self):
return copy.deepcopy(self._init_args)
@property
def init_kwargs(self):
return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs))
def __reduce__(self):
fields = list(super().__reduce__())
fields += [None] * max(3 - len(fields), 0)
if fields[0] is not _reconstruct_persistent_obj:
meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2])
fields[0] = _reconstruct_persistent_obj # reconstruct func
fields[1] = (meta,) # reconstruct args
fields[2] = None # state dict
return tuple(fields)
Decorator.__name__ = orig_class.__name__
_decorators.add(Decorator)
return Decorator
#----------------------------------------------------------------------------
def is_persistent(obj):
r"""Test whether the given object or class is persistent, i.e.,
whether it will save its source code when pickled.
"""
try:
if obj in _decorators:
return True
except TypeError:
pass
return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck
#----------------------------------------------------------------------------
def import_hook(hook):
r"""Register an import hook that is called whenever a persistent object
is being unpickled. A typical use case is to patch the pickled source
code to avoid errors and inconsistencies when the API of some imported
module has changed.
The hook should have the following signature:
hook(meta) -> modified meta
`meta` is an instance of `dnnlib.EasyDict` with the following fields:
type: Type of the persistent object, e.g. `'class'`.
version: Internal version number of `torch_utils.persistence`.
module_src Original source code of the Python module.
class_name: Class name in the original Python module.
state: Internal state of the object.
Example:
@persistence.import_hook
def wreck_my_network(meta):
if meta.class_name == 'MyNetwork':
print('MyNetwork is being imported. I will wreck it!')
meta.module_src = meta.module_src.replace("True", "False")
return meta
"""
assert callable(hook)
_import_hooks.append(hook)
#----------------------------------------------------------------------------
def _reconstruct_persistent_obj(meta):
r"""Hook that is called internally by the `pickle` module to unpickle
a persistent object.
"""
meta = dnnlib.EasyDict(meta)
meta.state = dnnlib.EasyDict(meta.state)
for hook in _import_hooks:
meta = hook(meta)
assert meta is not None
assert meta.version == _version
module = _src_to_module(meta.module_src)
assert meta.type == 'class'
orig_class = module.__dict__[meta.class_name]
decorator_class = persistent_class(orig_class)
obj = decorator_class.__new__(decorator_class)
setstate = getattr(obj, '__setstate__', None)
if callable(setstate):
setstate(meta.state) # pylint: disable=not-callable
else:
obj.__dict__.update(meta.state)
return obj
#----------------------------------------------------------------------------
def _module_to_src(module):
r"""Query the source code of a given Python module.
"""
src = _module_to_src_dict.get(module, None)
if src is None:
src = inspect.getsource(module)
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
return src
def _src_to_module(src):
r"""Get or create a Python module for the given source code.
"""
module = _src_to_module_dict.get(src, None)
if module is None:
module_name = "_imported_module_" + uuid.uuid4().hex
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
exec(src, module.__dict__) # pylint: disable=exec-used
return module
#----------------------------------------------------------------------------
def _check_pickleable(obj):
r"""Check that the given object is pickleable, raising an exception if
it is not. This function is expected to be considerably more efficient
than actually pickling the object.
"""
def recurse(obj):
if isinstance(obj, (list, tuple, set)):
return [recurse(x) for x in obj]
if isinstance(obj, dict):
return [[recurse(x), recurse(y)] for x, y in obj.items()]
if isinstance(obj, (str, int, float, bool, bytes, bytearray)):
return None # Python primitive types are pickleable.
if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor', 'torch.nn.parameter.Parameter']:
return None # NumPy arrays and PyTorch tensors are pickleable.
if is_persistent(obj):
return None # Persistent objects are pickleable, by virtue of the constructor check.
return obj
with io.BytesIO() as f:
pickle.dump(recurse(obj), f)
#----------------------------------------------------------------------------
| 9,752 | 37.702381 | 144 | py |
DiffProxy | DiffProxy-main/stylegan/torch_utils/misc.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import re
import contextlib
import numpy as np
import torch
import warnings
import dnnlib
#----------------------------------------------------------------------------
# Cached construction of constant tensors. Avoids CPU=>GPU copy when the
# same constant is used multiple times.
_constant_cache = dict()
def constant(value, shape=None, dtype=None, device=None, memory_format=None):
value = np.asarray(value)
if shape is not None:
shape = tuple(shape)
if dtype is None:
dtype = torch.get_default_dtype()
if device is None:
device = torch.device('cpu')
if memory_format is None:
memory_format = torch.contiguous_format
key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format)
tensor = _constant_cache.get(key, None)
if tensor is None:
tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
if shape is not None:
tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
tensor = tensor.contiguous(memory_format=memory_format)
_constant_cache[key] = tensor
return tensor
#----------------------------------------------------------------------------
# Replace NaN/Inf with specified numerical values.
try:
nan_to_num = torch.nan_to_num # 1.8.0a0
except AttributeError:
def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
assert isinstance(input, torch.Tensor)
if posinf is None:
posinf = torch.finfo(input.dtype).max
if neginf is None:
neginf = torch.finfo(input.dtype).min
assert nan == 0
return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
#----------------------------------------------------------------------------
# Symbolic assert.
try:
symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access
except AttributeError:
symbolic_assert = torch.Assert # 1.7.0
#----------------------------------------------------------------------------
# Context manager to temporarily suppress known warnings in torch.jit.trace().
# Note: Cannot use catch_warnings because of https://bugs.python.org/issue29672
@contextlib.contextmanager
def suppress_tracer_warnings():
flt = ('ignore', None, torch.jit.TracerWarning, None, 0)
warnings.filters.insert(0, flt)
yield
warnings.filters.remove(flt)
#----------------------------------------------------------------------------
# Assert that the shape of a tensor matches the given list of integers.
# None indicates that the size of a dimension is allowed to vary.
# Performs symbolic assertion when used in torch.jit.trace().
def assert_shape(tensor, ref_shape):
if tensor.ndim != len(ref_shape):
raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')
for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)):
if ref_size is None:
pass
elif isinstance(ref_size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}')
elif isinstance(size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')
elif size != ref_size:
raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')
#----------------------------------------------------------------------------
# Function decorator that calls torch.autograd.profiler.record_function().
def profiled_function(fn):
def decorator(*args, **kwargs):
with torch.autograd.profiler.record_function(fn.__name__):
return fn(*args, **kwargs)
decorator.__name__ = fn.__name__
return decorator
#----------------------------------------------------------------------------
# Sampler for torch.utils.data.DataLoader that loops over the dataset
# indefinitely, shuffling items as it goes.
class InfiniteSampler(torch.utils.data.Sampler):
def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
assert len(dataset) > 0
assert num_replicas > 0
assert 0 <= rank < num_replicas
assert 0 <= window_size <= 1
super().__init__(dataset)
self.dataset = dataset
self.rank = rank
self.num_replicas = num_replicas
self.shuffle = shuffle
self.seed = seed
self.window_size = window_size
def __iter__(self):
order = np.arange(len(self.dataset))
rnd = None
window = 0
if self.shuffle:
rnd = np.random.RandomState(self.seed)
rnd.shuffle(order)
window = int(np.rint(order.size * self.window_size))
idx = 0
while True:
i = idx % order.size
if idx % self.num_replicas == self.rank:
yield order[i]
if window >= 2:
j = (i - rnd.randint(window)) % order.size
order[i], order[j] = order[j], order[i]
idx += 1
#----------------------------------------------------------------------------
# Utilities for operating with torch.nn.Module parameters and buffers.
def params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.parameters()) + list(module.buffers())
def named_params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.named_parameters()) + list(module.named_buffers())
def copy_params_and_buffers(src_module, dst_module, require_all=False):
assert isinstance(src_module, torch.nn.Module)
assert isinstance(dst_module, torch.nn.Module)
src_tensors = dict(named_params_and_buffers(src_module))
for name, tensor in named_params_and_buffers(dst_module):
assert (name in src_tensors) or (not require_all)
if name in src_tensors:
tensor.copy_(src_tensors[name].detach()).requires_grad_(tensor.requires_grad)
#----------------------------------------------------------------------------
# Context manager for easily enabling/disabling DistributedDataParallel
# synchronization.
@contextlib.contextmanager
def ddp_sync(module, sync):
assert isinstance(module, torch.nn.Module)
if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel):
yield
else:
with module.no_sync():
yield
#----------------------------------------------------------------------------
# Check DistributedDataParallel consistency across processes.
def check_ddp_consistency(module, ignore_regex=None):
assert isinstance(module, torch.nn.Module)
for name, tensor in named_params_and_buffers(module):
fullname = type(module).__name__ + '.' + name
if ignore_regex is not None and re.fullmatch(ignore_regex, fullname):
continue
tensor = tensor.detach()
if tensor.is_floating_point():
tensor = nan_to_num(tensor)
other = tensor.clone()
torch.distributed.broadcast(tensor=other, src=0)
assert (tensor == other).all(), fullname
#----------------------------------------------------------------------------
# Print summary table of module hierarchy.
def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
assert isinstance(module, torch.nn.Module)
assert not isinstance(module, torch.jit.ScriptModule)
assert isinstance(inputs, (tuple, list))
# Register hooks.
entries = []
nesting = [0]
def pre_hook(_mod, _inputs):
nesting[0] += 1
def post_hook(mod, _inputs, outputs):
nesting[0] -= 1
if nesting[0] <= max_nesting:
outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs))
hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()]
hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
# Run module.
outputs = module(*inputs)
for hook in hooks:
hook.remove()
# Identify unique outputs, parameters, and buffers.
tensors_seen = set()
for e in entries:
e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen]
e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen]
e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen]
tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs}
# Filter out redundant entries.
if skip_redundant:
entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)]
# Construct table.
rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']]
rows += [['---'] * len(rows[0])]
param_total = 0
buffer_total = 0
submodule_names = {mod: name for name, mod in module.named_modules()}
for e in entries:
name = '<top-level>' if e.mod is module else submodule_names[e.mod]
param_size = sum(t.numel() for t in e.unique_params)
buffer_size = sum(t.numel() for t in e.unique_buffers)
output_shapes = [str(list(t.shape)) for t in e.outputs]
output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs]
rows += [[
name + (':0' if len(e.outputs) >= 2 else ''),
str(param_size) if param_size else '-',
str(buffer_size) if buffer_size else '-',
(output_shapes + ['-'])[0],
(output_dtypes + ['-'])[0],
]]
for idx in range(1, len(e.outputs)):
rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]]
param_total += param_size
buffer_total += buffer_size
rows += [['---'] * len(rows[0])]
rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
# Print table.
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
print()
for row in rows:
print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths)))
print()
return outputs
#----------------------------------------------------------------------------
| 11,106 | 40.599251 | 133 | py |
DiffProxy | DiffProxy-main/stylegan/torch_utils/ops/bias_act.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom PyTorch ops for efficient bias and activation."""
import os
import numpy as np
import torch
import dnnlib
from .. import custom_ops
from .. import misc
#----------------------------------------------------------------------------
activation_funcs = {
'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
}
#----------------------------------------------------------------------------
_plugin = None
_null_tensor = torch.empty([0])
def _init():
global _plugin
if _plugin is None:
_plugin = custom_ops.get_plugin(
module_name='bias_act_plugin',
sources=['bias_act.cpp', 'bias_act.cu'],
headers=['bias_act.h'],
source_dir=os.path.dirname(__file__),
extra_cuda_cflags=['--use_fast_math'],
)
return True
#----------------------------------------------------------------------------
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can be of any shape.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `dim`.
dim: The dimension in `x` corresponding to the elements of `b`.
The value of `dim` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying 1.
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
the clamping (default).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
#----------------------------------------------------------------------------
@misc.profiled_function
def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Slow reference implementation of `bias_act()` using standard TensorFlow ops.
"""
assert isinstance(x, torch.Tensor)
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Add bias.
if b is not None:
assert isinstance(b, torch.Tensor) and b.ndim == 1
assert 0 <= dim < x.ndim
assert b.shape[0] == x.shape[dim]
x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
# Evaluate activation function.
alpha = float(alpha)
x = spec.func(x, alpha=alpha)
# Scale by gain.
gain = float(gain)
if gain != 1:
x = x * gain
# Clamp.
if clamp >= 0:
x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
return x
#----------------------------------------------------------------------------
_bias_act_cuda_cache = dict()
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Fast CUDA implementation of `bias_act()` using custom ops.
"""
# Parse arguments.
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Lookup from cache.
key = (dim, act, alpha, gain, clamp)
if key in _bias_act_cuda_cache:
return _bias_act_cuda_cache[key]
# Forward op.
class BiasActCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, b): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride(1) == 1 else torch.contiguous_format
x = x.contiguous(memory_format=ctx.memory_format)
b = b.contiguous() if b is not None else _null_tensor
y = x
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
y if 'y' in spec.ref else _null_tensor)
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
dy = dy.contiguous(memory_format=ctx.memory_format)
x, b, y = ctx.saved_tensors
dx = None
db = None
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
dx = dy
if act != 'linear' or gain != 1 or clamp >= 0:
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if i != dim])
return dx, db
# Backward op.
class BiasActCudaGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride(1) == 1 else torch.contiguous_format
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
dy if spec.has_2nd_grad else _null_tensor,
x, b, y)
return dx
@staticmethod
def backward(ctx, d_dx): # pylint: disable=arguments-differ
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
dy, x, b, y = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
return d_dy, d_x, d_b, d_y
# Add to cache.
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda
#----------------------------------------------------------------------------
| 9,813 | 45.733333 | 185 | py |
DiffProxy | DiffProxy-main/stylegan/torch_utils/ops/grid_sample_gradfix.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom replacement for `torch.nn.functional.grid_sample` that
supports arbitrarily high order gradients between the input and output.
Only works on 2D images and assumes
`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
import torch
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
#----------------------------------------------------------------------------
def grid_sample(input, grid):
if _should_use_custom_op():
return _GridSample2dForward.apply(input, grid)
return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
#----------------------------------------------------------------------------
def _should_use_custom_op():
return enabled
#----------------------------------------------------------------------------
class _GridSample2dForward(torch.autograd.Function):
@staticmethod
def forward(ctx, input, grid):
assert input.ndim == 4
assert grid.ndim == 4
output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
ctx.save_for_backward(input, grid)
return output
@staticmethod
def backward(ctx, grad_output):
input, grid = ctx.saved_tensors
grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid)
return grad_input, grad_grid
#----------------------------------------------------------------------------
class _GridSample2dBackward(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input, grid):
op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
ctx.save_for_backward(grid)
return grad_input, grad_grid
@staticmethod
def backward(ctx, grad2_grad_input, grad2_grad_grid):
_ = grad2_grad_grid # unused
grid, = ctx.saved_tensors
grad2_grad_output = None
grad2_input = None
grad2_grid = None
if ctx.needs_input_grad[0]:
grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid)
assert not ctx.needs_input_grad[2]
return grad2_grad_output, grad2_input, grad2_grid
#----------------------------------------------------------------------------
| 3,020 | 37.730769 | 132 | py |
DiffProxy | DiffProxy-main/stylegan/torch_utils/ops/conv2d_gradfix.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom replacement for `torch.nn.functional.conv2d` that supports
arbitrarily high order gradients with zero performance penalty."""
import contextlib
import torch
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights.
@contextlib.contextmanager
def no_weight_gradients(disable=True):
global weight_gradients_disabled
old = weight_gradients_disabled
if disable:
weight_gradients_disabled = True
yield
weight_gradients_disabled = old
#----------------------------------------------------------------------------
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
#----------------------------------------------------------------------------
def _should_use_custom_op(input):
assert isinstance(input, torch.Tensor)
if (not enabled) or (not torch.backends.cudnn.enabled):
return False
if input.device.type != 'cuda':
return False
return True
def _tuple_of_ints(xs, ndim):
xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
assert len(xs) == ndim
assert all(isinstance(x, int) for x in xs)
return xs
#----------------------------------------------------------------------------
_conv2d_gradfix_cache = dict()
_null_tensor = torch.empty([0])
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
# Parse arguments.
ndim = 2
weight_shape = tuple(weight_shape)
stride = _tuple_of_ints(stride, ndim)
padding = _tuple_of_ints(padding, ndim)
output_padding = _tuple_of_ints(output_padding, ndim)
dilation = _tuple_of_ints(dilation, ndim)
# Lookup from cache.
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
if key in _conv2d_gradfix_cache:
return _conv2d_gradfix_cache[key]
# Validate arguments.
assert groups >= 1
assert len(weight_shape) == ndim + 2
assert all(stride[i] >= 1 for i in range(ndim))
assert all(padding[i] >= 0 for i in range(ndim))
assert all(dilation[i] >= 0 for i in range(ndim))
if not transpose:
assert all(output_padding[i] == 0 for i in range(ndim))
else: # transpose
assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim))
# Helpers.
common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
def calc_output_padding(input_shape, output_shape):
if transpose:
return [0, 0]
return [
input_shape[i + 2]
- (output_shape[i + 2] - 1) * stride[i]
- (1 - 2 * padding[i])
- dilation[i] * (weight_shape[i + 2] - 1)
for i in range(ndim)
]
# Forward & backward.
class Conv2d(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
assert weight.shape == weight_shape
ctx.save_for_backward(
input if weight.requires_grad else _null_tensor,
weight if input.requires_grad else _null_tensor,
)
ctx.input_shape = input.shape
# Simple 1x1 convolution => cuBLAS (only on Volta, not on Ampere).
if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0) and torch.cuda.get_device_capability(input.device) < (8, 0):
a = weight.reshape(groups, weight_shape[0] // groups, weight_shape[1])
b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1)
c = (a.transpose(1, 2) if transpose else a) @ b.permute(1, 2, 0, 3).flatten(2)
c = c.reshape(-1, input.shape[0], *input.shape[2:]).transpose(0, 1)
c = c if bias is None else c + bias.unsqueeze(0).unsqueeze(2).unsqueeze(3)
return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format))
# General case => cuDNN.
if transpose:
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
input_shape = ctx.input_shape
grad_input = None
grad_weight = None
grad_bias = None
if ctx.needs_input_grad[0]:
p = calc_output_padding(input_shape=input_shape, output_shape=grad_output.shape)
op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs)
grad_input = op.apply(grad_output, weight, None)
assert grad_input.shape == input_shape
if ctx.needs_input_grad[1] and not weight_gradients_disabled:
grad_weight = Conv2dGradWeight.apply(grad_output, input)
assert grad_weight.shape == weight_shape
if ctx.needs_input_grad[2]:
grad_bias = grad_output.sum([0, 2, 3])
return grad_input, grad_weight, grad_bias
# Gradient with respect to the weights.
class Conv2dGradWeight(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input):
ctx.save_for_backward(
grad_output if input.requires_grad else _null_tensor,
input if grad_output.requires_grad else _null_tensor,
)
ctx.grad_output_shape = grad_output.shape
ctx.input_shape = input.shape
# Simple 1x1 convolution => cuBLAS (on both Volta and Ampere).
if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0):
a = grad_output.reshape(grad_output.shape[0], groups, grad_output.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2)
b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2)
c = (b @ a.transpose(1, 2) if transpose else a @ b.transpose(1, 2)).reshape(weight_shape)
return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format))
# General case => cuDNN.
name = 'aten::cudnn_convolution_transpose_backward_weight' if transpose else 'aten::cudnn_convolution_backward_weight'
flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
return torch._C._jit_get_operation(name)(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags)
@staticmethod
def backward(ctx, grad2_grad_weight):
grad_output, input = ctx.saved_tensors
grad_output_shape = ctx.grad_output_shape
input_shape = ctx.input_shape
grad2_grad_output = None
grad2_input = None
if ctx.needs_input_grad[0]:
grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None)
assert grad2_grad_output.shape == grad_output_shape
if ctx.needs_input_grad[1]:
p = calc_output_padding(input_shape=input_shape, output_shape=grad_output_shape)
op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs)
grad2_input = op.apply(grad_output, grad2_grad_weight, None)
assert grad2_input.shape == input_shape
return grad2_grad_output, grad2_input
_conv2d_gradfix_cache[key] = Conv2d
return Conv2d
#----------------------------------------------------------------------------
| 9,465 | 46.567839 | 197 | py |
DiffProxy | DiffProxy-main/stylegan/torch_utils/ops/upfirdn2d.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom PyTorch ops for efficient resampling of 2D images."""
import os
import numpy as np
import torch
from .. import custom_ops
from .. import misc
from . import conv2d_gradfix
#----------------------------------------------------------------------------
_plugin = None
def _init():
global _plugin
if _plugin is None:
_plugin = custom_ops.get_plugin(
module_name='upfirdn2d_plugin',
sources=['upfirdn2d.cpp', 'upfirdn2d.cu'],
headers=['upfirdn2d.h'],
source_dir=os.path.dirname(__file__),
extra_cuda_cflags=['--use_fast_math'],
)
return True
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
#----------------------------------------------------------------------------
def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):
r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
Args:
f: Torch tensor, numpy array, or python list of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable),
`[]` (impulse), or
`None` (identity).
device: Result device (default: cpu).
normalize: Normalize the filter so that it retains the magnitude
for constant input signal (DC)? (default: True).
flip_filter: Flip the filter? (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
separable: Return a separable filter? (default: select automatically).
Returns:
Float32 tensor of the shape
`[filter_height, filter_width]` (non-separable) or
`[filter_taps]` (separable).
"""
# Validate.
if f is None:
f = 1
f = torch.as_tensor(f, dtype=torch.float32)
assert f.ndim in [0, 1, 2]
assert f.numel() > 0
if f.ndim == 0:
f = f[np.newaxis]
# Separable?
if separable is None:
separable = (f.ndim == 1 and f.numel() >= 8)
if f.ndim == 1 and not separable:
f = f.ger(f)
assert f.ndim == (1 if separable else 2)
# Apply normalize, flip, gain, and device.
if normalize:
f /= f.sum()
if flip_filter:
f = f.flip(list(range(f.ndim)))
f = f * (gain ** (f.ndim / 2))
f = f.to(device=device)
return f
#----------------------------------------------------------------------------
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)
#----------------------------------------------------------------------------
@misc.profiled_function
def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
assert f.dtype == torch.float32 and not f.requires_grad
batch_size, num_channels, in_height, in_width = x.shape
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Check that upsampled buffer is not smaller than the filter.
upW = in_width * upx + padx0 + padx1
upH = in_height * upy + pady0 + pady1
assert upW >= f.shape[-1] and upH >= f.shape[0]
# Upsample by inserting zeros.
x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
# Pad or crop.
x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)])
x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)]
# Setup filter.
f = f * (gain ** (f.ndim / 2))
f = f.to(x.dtype)
if not flip_filter:
f = f.flip(list(range(f.ndim)))
# Convolve with the filter.
f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
if f.ndim == 4:
x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels)
else:
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
# Downsample by throwing away pixels.
x = x[:, :, ::downy, ::downx]
return x
#----------------------------------------------------------------------------
_upfirdn2d_cuda_cache = dict()
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
# Parse arguments.
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Lookup from cache.
key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
# Forward op.
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f): # pylint: disable=arguments-differ
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
if f.ndim == 1 and f.shape[0] == 1:
f = f.square().unsqueeze(0) # Convert separable-1 into full-1x1.
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, 1.0)
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, gain)
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [
fw - padx0 - 1,
iw * upx - ow * downx + padx0 - upx + 1,
fh - pady0 - 1,
ih * upy - oh * downy + pady0 - upy + 1,
]
dx = None
df = None
if ctx.needs_input_grad[0]:
dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f)
assert not ctx.needs_input_grad[1]
return dx, df
# Add to cache.
_upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
return Upfirdn2dCuda
#----------------------------------------------------------------------------
def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Filter a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape matches the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + fw // 2,
padx1 + (fw - 1) // 2,
pady0 + fh // 2,
pady1 + (fh - 1) // 2,
]
return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Upsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a multiple of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
upx, upy = _parse_scaling(up)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw + upx - 1) // 2,
padx1 + (fw - upx) // 2,
pady0 + (fh + upy - 1) // 2,
pady1 + (fh - upy) // 2,
]
return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl)
#----------------------------------------------------------------------------
def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Downsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a fraction of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the input. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw - downx + 1) // 2,
padx1 + (fw - downx) // 2,
pady0 + (fh - downy + 1) // 2,
pady1 + (fh - downy) // 2,
]
return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
| 16,392 | 41.033333 | 120 | py |
DiffProxy | DiffProxy-main/stylegan/torch_utils/ops/filtered_lrelu.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import numpy as np
import torch
import warnings
from .. import custom_ops
from .. import misc
from . import upfirdn2d
from . import bias_act
#----------------------------------------------------------------------------
_plugin = None
def _init():
global _plugin
if _plugin is None:
_plugin = custom_ops.get_plugin(
module_name='filtered_lrelu_plugin',
sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'],
headers=['filtered_lrelu.h', 'filtered_lrelu.cu'],
source_dir=os.path.dirname(__file__),
extra_cuda_cflags=['--use_fast_math'],
)
return True
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor)
assert 1 <= f.ndim <= 2
return f.shape[-1], f.shape[0] # width, height
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, (int, np.integer)) for x in padding)
padding = [int(x) for x in padding]
if len(padding) == 2:
px, py = padding
padding = [px, px, py, py]
px0, px1, py0, py1 = padding
return px0, px1, py0, py1
#----------------------------------------------------------------------------
def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'):
r"""Filtered leaky ReLU for a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Add channel-specific bias if provided (`b`).
2. Upsample the image by inserting N-1 zeros after each pixel (`up`).
3. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it
so that the footprint of all output pixels lies within the input image.
5. Multiply each value by the provided gain factor (`gain`).
6. Apply leaky ReLU activation function to each value.
7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided.
8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking
it so that the footprint of all output pixels lies within the input image.
9. Downsample the image by keeping every Nth pixel (`down`).
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float16/float64 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
fu: Float32 upsampling FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
fd: Float32 downsampling FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The length of vector must must match the channel dimension of `x`.
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor. (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
gain: Overall scaling factor for signal magnitude (default: sqrt(2)).
slope: Slope on the negative side of leaky ReLU (default: 0.2).
clamp: Maximum magnitude for leaky ReLU output (default: None).
flip_filter: False = convolution, True = correlation (default: False).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0)
return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter)
#----------------------------------------------------------------------------
@misc.profiled_function
def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
"""Slow and memory-inefficient reference implementation of `filtered_lrelu()` using
existing `upfirdn2n()` and `bias_act()` ops.
"""
assert isinstance(x, torch.Tensor) and x.ndim == 4
fu_w, fu_h = _get_filter_size(fu)
fd_w, fd_h = _get_filter_size(fd)
if b is not None:
assert isinstance(b, torch.Tensor) and b.dtype == x.dtype
misc.assert_shape(b, [x.shape[1]])
assert isinstance(up, int) and up >= 1
assert isinstance(down, int) and down >= 1
px0, px1, py0, py1 = _parse_padding(padding)
assert gain == float(gain) and gain > 0
assert slope == float(slope) and slope >= 0
assert clamp is None or (clamp == float(clamp) and clamp >= 0)
# Calculate output size.
batch_size, channels, in_h, in_w = x.shape
in_dtype = x.dtype
out_w = (in_w * up + (px0 + px1) - (fu_w - 1) - (fd_w - 1) + (down - 1)) // down
out_h = (in_h * up + (py0 + py1) - (fu_h - 1) - (fd_h - 1) + (down - 1)) // down
# Compute using existing ops.
x = bias_act.bias_act(x=x, b=b) # Apply bias.
x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.
x = bias_act.bias_act(x=x, act='lrelu', alpha=slope, gain=gain, clamp=clamp) # Bias, leaky ReLU, clamp.
x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter) # Downsample.
# Check output shape & dtype.
misc.assert_shape(x, [batch_size, channels, out_h, out_w])
assert x.dtype == in_dtype
return x
#----------------------------------------------------------------------------
_filtered_lrelu_cuda_cache = dict()
def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
"""Fast CUDA implementation of `filtered_lrelu()` using custom ops.
"""
assert isinstance(up, int) and up >= 1
assert isinstance(down, int) and down >= 1
px0, px1, py0, py1 = _parse_padding(padding)
assert gain == float(gain) and gain > 0
gain = float(gain)
assert slope == float(slope) and slope >= 0
slope = float(slope)
assert clamp is None or (clamp == float(clamp) and clamp >= 0)
clamp = float(clamp if clamp is not None else 'inf')
# Lookup from cache.
key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter)
if key in _filtered_lrelu_cuda_cache:
return _filtered_lrelu_cuda_cache[key]
# Forward op.
class FilteredLReluCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ
assert isinstance(x, torch.Tensor) and x.ndim == 4
# Replace empty up/downsample kernels with full 1x1 kernels (faster than separable).
if fu is None:
fu = torch.ones([1, 1], dtype=torch.float32, device=x.device)
if fd is None:
fd = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert 1 <= fu.ndim <= 2
assert 1 <= fd.ndim <= 2
# Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1.
if up == 1 and fu.ndim == 1 and fu.shape[0] == 1:
fu = fu.square()[None]
if down == 1 and fd.ndim == 1 and fd.shape[0] == 1:
fd = fd.square()[None]
# Missing sign input tensor.
if si is None:
si = torch.empty([0])
# Missing bias tensor.
if b is None:
b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device)
# Construct internal sign tensor only if gradients are needed.
write_signs = (si.numel() == 0) and (x.requires_grad or b.requires_grad)
# Warn if input storage strides are not in decreasing order due to e.g. channels-last layout.
strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1]
if any(a < b for a, b in zip(strides[:-1], strides[1:])):
warnings.warn("low-performance memory layout detected in filtered_lrelu input", RuntimeWarning)
# Call C++/Cuda plugin if datatype is supported.
if x.dtype in [torch.float16, torch.float32]:
if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device):
warnings.warn("filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning)
y, so, return_code = _plugin.filtered_lrelu(x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs)
else:
return_code = -1
# No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because
# only the bit-packed sign tensor is retained for gradient computation.
if return_code < 0:
warnings.warn("filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning)
y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias.
y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.
so = _plugin.filtered_lrelu_act_(y, si, sx, sy, gain, slope, clamp, write_signs) # Activation function and sign handling. Modifies y in-place.
y = upfirdn2d.upfirdn2d(x=y, f=fd, down=down, flip_filter=flip_filter) # Downsample.
# Prepare for gradient computation.
ctx.save_for_backward(fu, fd, (si if si.numel() else so))
ctx.x_shape = x.shape
ctx.y_shape = y.shape
ctx.s_ofs = sx, sy
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
fu, fd, si = ctx.saved_tensors
_, _, xh, xw = ctx.x_shape
_, _, yh, yw = ctx.y_shape
sx, sy = ctx.s_ofs
dx = None # 0
dfu = None; assert not ctx.needs_input_grad[1]
dfd = None; assert not ctx.needs_input_grad[2]
db = None # 3
dsi = None; assert not ctx.needs_input_grad[4]
dsx = None; assert not ctx.needs_input_grad[5]
dsy = None; assert not ctx.needs_input_grad[6]
if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]:
pp = [
(fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0,
xw * up - yw * down + px0 - (up - 1),
(fu.shape[0] - 1) + (fd.shape[0] - 1) - py0,
xh * up - yh * down + py0 - (up - 1),
]
gg = gain * (up ** 2) / (down ** 2)
ff = (not flip_filter)
sx = sx - (fu.shape[-1] - 1) + px0
sy = sy - (fu.shape[0] - 1) + py0
dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope, clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy)
if ctx.needs_input_grad[3]:
db = dx.sum([0, 2, 3])
return dx, dfu, dfd, db, dsi, dsx, dsy
# Add to cache.
_filtered_lrelu_cuda_cache[key] = FilteredLReluCuda
return FilteredLReluCuda
#----------------------------------------------------------------------------
| 12,884 | 45.854545 | 164 | py |
DiffProxy | DiffProxy-main/stylegan/torch_utils/ops/conv2d_resample.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""2D convolution with optional up/downsampling."""
import torch
from .. import misc
from . import conv2d_gradfix
from . import upfirdn2d
from .upfirdn2d import _parse_padding
from .upfirdn2d import _get_filter_size
#----------------------------------------------------------------------------
def _get_weight_shape(w):
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
shape = [int(sz) for sz in w.shape]
misc.assert_shape(w, shape)
return shape
#----------------------------------------------------------------------------
def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
"""Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
"""
_out_channels, _in_channels_per_group, kh, kw = _get_weight_shape(w)
# Flip weight if requested.
# Note: conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
if not flip_weight and (kw > 1 or kh > 1):
w = w.flip([2, 3])
# Execute using conv2d_gradfix.
op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
return op(x, w, stride=stride, padding=padding, groups=groups)
#----------------------------------------------------------------------------
@misc.profiled_function
def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
r"""2D convolution with optional up/downsampling.
Padding is performed only once at the beginning, not between the operations.
Args:
x: Input tensor of shape
`[batch_size, in_channels, in_height, in_width]`.
w: Weight tensor of shape
`[out_channels, in_channels//groups, kernel_height, kernel_width]`.
f: Low-pass filter for up/downsampling. Must be prepared beforehand by
calling upfirdn2d.setup_filter(). None = identity (default).
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
groups: Split input channels into N groups (default: 1).
flip_weight: False = convolution, True = correlation (default: True).
flip_filter: False = convolution, True = correlation (default: False).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and (x.ndim == 4)
assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
assert isinstance(up, int) and (up >= 1)
assert isinstance(down, int) and (down >= 1)
assert isinstance(groups, int) and (groups >= 1)
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
fw, fh = _get_filter_size(f)
px0, px1, py0, py1 = _parse_padding(padding)
# Adjust padding to account for up/downsampling.
if up > 1:
px0 += (fw + up - 1) // 2
px1 += (fw - up) // 2
py0 += (fh + up - 1) // 2
py1 += (fh - up) // 2
if down > 1:
px0 += (fw - down + 1) // 2
px1 += (fw - down) // 2
py0 += (fh - down + 1) // 2
py1 += (fh - down) // 2
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
if kw == 1 and kh == 1 and (down > 1 and up == 1):
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
return x
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
if kw == 1 and kh == 1 and (up > 1 and down == 1):
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
return x
# Fast path: downsampling only => use strided convolution.
if down > 1 and up == 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
return x
# Fast path: upsampling with optional downsampling => use transpose strided convolution.
if up > 1:
if groups == 1:
w = w.transpose(0, 1)
else:
w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
w = w.transpose(1, 2)
w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
px0 -= kw - 1
px1 -= kw - up
py0 -= kh - 1
py1 -= kh - up
pxt = max(min(-px0, -px1), 0)
pyt = max(min(-py0, -py1), 0)
x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
# Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
if up == 1 and down == 1:
if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
# Fallback: Generic reference implementation.
x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
#----------------------------------------------------------------------------
| 6,765 | 45.986111 | 130 | py |
DiffProxy | DiffProxy-main/stylegan/torch_utils/ops/fma.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`."""
import torch
#----------------------------------------------------------------------------
def fma(a, b, c): # => a * b + c
return _FusedMultiplyAdd.apply(a, b, c)
#----------------------------------------------------------------------------
class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
@staticmethod
def forward(ctx, a, b, c): # pylint: disable=arguments-differ
out = torch.addcmul(c, a, b)
ctx.save_for_backward(a, b)
ctx.c_shape = c.shape
return out
@staticmethod
def backward(ctx, dout): # pylint: disable=arguments-differ
a, b = ctx.saved_tensors
c_shape = ctx.c_shape
da = None
db = None
dc = None
if ctx.needs_input_grad[0]:
da = _unbroadcast(dout * b, a.shape)
if ctx.needs_input_grad[1]:
db = _unbroadcast(dout * a, b.shape)
if ctx.needs_input_grad[2]:
dc = _unbroadcast(dout, c_shape)
return da, db, dc
#----------------------------------------------------------------------------
def _unbroadcast(x, shape):
extra_dims = x.ndim - len(shape)
assert extra_dims >= 0
dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)]
if len(dim):
x = x.sum(dim=dim, keepdim=True)
if extra_dims:
x = x.reshape(-1, *x.shape[extra_dims+1:])
assert x.shape == shape
return x
#----------------------------------------------------------------------------
| 2,047 | 32.57377 | 105 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.