repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
RandStainNA | RandStainNA-master/classification/timm/models/layers/global_context.py | """ Global Context Attention Block
Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond`
- https://arxiv.org/abs/1904.11492
Official code consulted as reference: https://github.com/xvjiarui/GCNet
Hacked together by / Copyright 2021 Ross Wightman
"""
from torch import nn as nn
import torch.nn.functional as F
from .create_act import create_act_layer, get_act_layer
from .helpers import make_divisible
from .mlp import ConvMlp
from .norm import LayerNorm2d
class GlobalContext(nn.Module):
def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False,
rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'):
super(GlobalContext, self).__init__()
act_layer = get_act_layer(act_layer)
self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None
if rd_channels is None:
rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
if fuse_add:
self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
else:
self.mlp_add = None
if fuse_scale:
self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
else:
self.mlp_scale = None
self.gate = create_act_layer(gate_layer)
self.init_last_zero = init_last_zero
self.reset_parameters()
def reset_parameters(self):
if self.conv_attn is not None:
nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu')
if self.mlp_add is not None:
nn.init.zeros_(self.mlp_add.fc2.weight)
def forward(self, x):
B, C, H, W = x.shape
if self.conv_attn is not None:
attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W)
attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1)
context = x.reshape(B, C, H * W).unsqueeze(1) @ attn
context = context.view(B, C, 1, 1)
else:
context = x.mean(dim=(2, 3), keepdim=True)
if self.mlp_scale is not None:
mlp_x = self.mlp_scale(context)
x = x * self.gate(mlp_x)
if self.mlp_add is not None:
mlp_x = self.mlp_add(context)
x = x + mlp_x
return x
| 2,445 | 34.970588 | 105 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/activations.py | """ Activations
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
def swish(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
class Swish(nn.Module):
def __init__(self, inplace: bool = False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
def mish(x, inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
NOTE: I don't have a working inplace variant
"""
return x.mul(F.softplus(x).tanh())
class Mish(nn.Module):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
def __init__(self, inplace: bool = False):
super(Mish, self).__init__()
def forward(self, x):
return mish(x)
def sigmoid(x, inplace: bool = False):
return x.sigmoid_() if inplace else x.sigmoid()
# PyTorch has this, but not with a consistent inplace argmument interface
class Sigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.sigmoid_() if self.inplace else x.sigmoid()
def tanh(x, inplace: bool = False):
return x.tanh_() if inplace else x.tanh()
# PyTorch has this, but not with a consistent inplace argmument interface
class Tanh(nn.Module):
def __init__(self, inplace: bool = False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.tanh_() if self.inplace else x.tanh()
def hard_swish(x, inplace: bool = False):
inner = F.relu6(x + 3.).div_(6.)
return x.mul_(inner) if inplace else x.mul(inner)
class HardSwish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_swish(x, self.inplace)
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)
def hard_mish(x, inplace: bool = False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
if inplace:
return x.mul_(0.5 * (x + 2).clamp(min=0, max=2))
else:
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_mish(x, self.inplace)
class PReLU(nn.PReLU):
"""Applies PReLU (w/ dummy inplace arg)
"""
def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None:
super(PReLU, self).__init__(num_parameters=num_parameters, init=init)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.prelu(input, self.weight)
def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
return F.gelu(x)
class GELU(nn.Module):
"""Applies the Gaussian Error Linear Units function (w/ dummy inplace arg)
"""
def __init__(self, inplace: bool = False):
super(GELU, self).__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.gelu(input)
| 4,040 | 26.678082 | 107 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/eca.py | """
ECA module from ECAnet
paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks
https://arxiv.org/abs/1910.03151
Original ECA model borrowed from https://github.com/BangguWu/ECANet
Modified circular ECA implementation and adaption for use in timm package
by Chris Ha https://github.com/VRandme
Original License:
MIT License
Copyright (c) 2019 BangguWu, Qilong Wang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import math
from torch import nn
import torch.nn.functional as F
from .create_act import create_act_layer
from .helpers import make_divisible
class EcaModule(nn.Module):
"""Constructs an ECA module.
Args:
channels: Number of channels of the input feature map for use in adaptive kernel sizes
for actual calculations according to channel.
gamma, beta: when channel is given parameters of mapping function
refer to original paper https://arxiv.org/pdf/1910.03151.pdf
(default=None. if channel size not given, use k_size given for kernel size.)
kernel_size: Adaptive selection of kernel size (default=3)
gamm: used in kernel_size calc, see above
beta: used in kernel_size calc, see above
act_layer: optional non-linearity after conv, enables conv bias, this is an experiment
gate_layer: gating non-linearity to use
"""
def __init__(
self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid',
rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False):
super(EcaModule, self).__init__()
if channels is not None:
t = int(abs(math.log(channels, 2) + beta) / gamma)
kernel_size = max(t if t % 2 else t + 1, 3)
assert kernel_size % 2 == 1
padding = (kernel_size - 1) // 2
if use_mlp:
# NOTE 'mlp' mode is a timm experiment, not in paper
assert channels is not None
if rd_channels is None:
rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor)
act_layer = act_layer or nn.ReLU
self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True)
self.act = create_act_layer(act_layer)
self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True)
else:
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False)
self.act = None
self.conv2 = None
self.gate = create_act_layer(gate_layer)
def forward(self, x):
y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv
y = self.conv(y)
if self.conv2 is not None:
y = self.act(y)
y = self.conv2(y)
y = self.gate(y).view(x.shape[0], -1, 1, 1)
return x * y.expand_as(x)
EfficientChannelAttn = EcaModule # alias
class CecaModule(nn.Module):
"""Constructs a circular ECA module.
ECA module where the conv uses circular padding rather than zero padding.
Unlike the spatial dimension, the channels do not have inherent ordering nor
locality. Although this module in essence, applies such an assumption, it is unnecessary
to limit the channels on either "edge" from being circularly adapted to each other.
This will fundamentally increase connectivity and possibly increase performance metrics
(accuracy, robustness), without significantly impacting resource metrics
(parameter size, throughput,latency, etc)
Args:
channels: Number of channels of the input feature map for use in adaptive kernel sizes
for actual calculations according to channel.
gamma, beta: when channel is given parameters of mapping function
refer to original paper https://arxiv.org/pdf/1910.03151.pdf
(default=None. if channel size not given, use k_size given for kernel size.)
kernel_size: Adaptive selection of kernel size (default=3)
gamm: used in kernel_size calc, see above
beta: used in kernel_size calc, see above
act_layer: optional non-linearity after conv, enables conv bias, this is an experiment
gate_layer: gating non-linearity to use
"""
def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'):
super(CecaModule, self).__init__()
if channels is not None:
t = int(abs(math.log(channels, 2) + beta) / gamma)
kernel_size = max(t if t % 2 else t + 1, 3)
has_act = act_layer is not None
assert kernel_size % 2 == 1
# PyTorch circular padding mode is buggy as of pytorch 1.4
# see https://github.com/pytorch/pytorch/pull/17240
# implement manual circular padding
self.padding = (kernel_size - 1) // 2
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
y = x.mean((2, 3)).view(x.shape[0], 1, -1)
# Manually implement circular padding, F.pad does not seemed to be bugged
y = F.pad(y, (self.padding, self.padding), mode='circular')
y = self.conv(y)
y = self.gate(y).view(x.shape[0], -1, 1, 1)
return x * y.expand_as(x)
CircularEfficientChannelAttn = CecaModule
| 6,386 | 42.746575 | 108 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/space_to_depth.py | import torch
import torch.nn as nn
class SpaceToDepth(nn.Module):
def __init__(self, block_size=4):
super().__init__()
assert block_size == 4
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)
x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs)
return x
@torch.jit.script
class SpaceToDepthJit(object):
def __call__(self, x: torch.Tensor):
# assuming hard-coded that block_size==4 for acceleration
N, C, H, W = x.size()
x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)
x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs)
return x
class SpaceToDepthModule(nn.Module):
def __init__(self, no_jit=False):
super().__init__()
if not no_jit:
self.op = SpaceToDepthJit()
else:
self.op = SpaceToDepth()
def forward(self, x):
return self.op(x)
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs)
x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs)
return x
| 1,750 | 31.425926 | 102 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/create_attn.py | """ Attention Factory
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
from functools import partial
from .bottleneck_attn import BottleneckAttn
from .cbam import CbamModule, LightCbamModule
from .eca import EcaModule, CecaModule
from .gather_excite import GatherExcite
from .global_context import GlobalContext
from .halo_attn import HaloAttn
from .lambda_layer import LambdaLayer
from .non_local_attn import NonLocalAttn, BatNonLocalAttn
from .selective_kernel import SelectiveKernel
from .split_attn import SplitAttn
from .squeeze_excite import SEModule, EffectiveSEModule
def get_attn(attn_type):
if isinstance(attn_type, torch.nn.Module):
return attn_type
module_cls = None
if attn_type is not None:
if isinstance(attn_type, str):
attn_type = attn_type.lower()
# Lightweight attention modules (channel and/or coarse spatial).
# Typically added to existing network architecture blocks in addition to existing convolutions.
if attn_type == 'se':
module_cls = SEModule
elif attn_type == 'ese':
module_cls = EffectiveSEModule
elif attn_type == 'eca':
module_cls = EcaModule
elif attn_type == 'ecam':
module_cls = partial(EcaModule, use_mlp=True)
elif attn_type == 'ceca':
module_cls = CecaModule
elif attn_type == 'ge':
module_cls = GatherExcite
elif attn_type == 'gc':
module_cls = GlobalContext
elif attn_type == 'gca':
module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False)
elif attn_type == 'cbam':
module_cls = CbamModule
elif attn_type == 'lcbam':
module_cls = LightCbamModule
# Attention / attention-like modules w/ significant params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'sk':
module_cls = SelectiveKernel
elif attn_type == 'splat':
module_cls = SplitAttn
# Self-attention / attention-like modules w/ significant compute and/or params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'lambda':
return LambdaLayer
elif attn_type == 'bottleneck':
return BottleneckAttn
elif attn_type == 'halo':
return HaloAttn
elif attn_type == 'nl':
module_cls = NonLocalAttn
elif attn_type == 'bat':
module_cls = BatNonLocalAttn
# Woops!
else:
assert False, "Invalid attn module (%s)" % attn_type
elif isinstance(attn_type, bool):
if attn_type:
module_cls = SEModule
else:
module_cls = attn_type
return module_cls
def create_attn(attn_type, channels, **kwargs):
module_cls = get_attn(attn_type)
if module_cls is not None:
# NOTE: it's expected the first (positional) argument of all attention layers is the # input channels
return module_cls(channels, **kwargs)
return None
| 3,526 | 38.188889 | 109 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/median_pool.py | """ Median Pool
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch.nn as nn
import torch.nn.functional as F
from .helpers import to_2tuple, to_4tuple
class MedianPool2d(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
super(MedianPool2d, self).__init__()
self.k = to_2tuple(kernel_size)
self.stride = to_2tuple(stride)
self.padding = to_4tuple(padding) # convert to l, r, t, b
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - (ih % self.stride[0]), 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - (iw % self.stride[1]), 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = (pl, pr, pt, pb)
else:
padding = self.padding
return padding
def forward(self, x):
x = F.pad(x, self._padding(x), mode='reflect')
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1])
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
return x
| 1,737 | 33.76 | 87 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/test_time_pool.py | """ Test Time Pooling (Average-Max Pool)
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
from torch import nn
import torch.nn.functional as F
from .adaptive_avgmax_pool import adaptive_avgmax_pool2d
_logger = logging.getLogger(__name__)
class TestTimePoolHead(nn.Module):
def __init__(self, base, original_pool=7):
super(TestTimePoolHead, self).__init__()
self.base = base
self.original_pool = original_pool
base_fc = self.base.get_classifier()
if isinstance(base_fc, nn.Conv2d):
self.fc = base_fc
else:
self.fc = nn.Conv2d(
self.base.num_features, self.base.num_classes, kernel_size=1, bias=True)
self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size()))
self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size()))
self.base.reset_classifier(0) # delete original fc layer
def forward(self, x):
x = self.base.forward_features(x)
x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1)
x = self.fc(x)
x = adaptive_avgmax_pool2d(x, 1)
return x.view(x.size(0), -1)
def apply_test_time_pool(model, config, use_test_size=True):
test_time_pool = False
if not hasattr(model, 'default_cfg') or not model.default_cfg:
return model, False
if use_test_size and 'test_input_size' in model.default_cfg:
df_input_size = model.default_cfg['test_input_size']
else:
df_input_size = model.default_cfg['input_size']
if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]:
_logger.info('Target input size %s > pretrained default %s, using test time pooling' %
(str(config['input_size'][-2:]), str(df_input_size[-2:])))
model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size'])
test_time_pool = True
return model, test_time_pool
| 1,995 | 36.660377 | 101 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/selective_kernel.py | """ Selective Kernel Convolution/Attention
Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586)
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from .conv_bn_act import ConvBnAct
from .helpers import make_divisible
from .trace_utils import _assert
def _kernel_valid(k):
if isinstance(k, (list, tuple)):
for ki in k:
return _kernel_valid(ki)
assert k >= 3 and k % 2
class SelectiveKernelAttn(nn.Module):
def __init__(self, channels, num_paths=2, attn_channels=32,
act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
""" Selective Kernel Attention Module
Selective Kernel attention mechanism factored out into its own module.
"""
super(SelectiveKernelAttn, self).__init__()
self.num_paths = num_paths
self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False)
self.bn = norm_layer(attn_channels)
self.act = act_layer(inplace=True)
self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False)
def forward(self, x):
_assert(x.shape[1] == self.num_paths, '')
x = x.sum(1).mean((2, 3), keepdim=True)
x = self.fc_reduce(x)
x = self.bn(x)
x = self.act(x)
x = self.fc_select(x)
B, C, H, W = x.shape
x = x.view(B, self.num_paths, C // self.num_paths, H, W)
x = torch.softmax(x, dim=1)
return x
class SelectiveKernel(nn.Module):
def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1,
rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True,
drop_block=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None):
""" Selective Kernel Convolution Module
As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications.
Largest change is the input split, which divides the input channels across each convolution path, this can
be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps
the parameter count from ballooning when the convolutions themselves don't have groups, but still provides
a noteworthy increase in performance over similar param count models without this attention layer. -Ross W
Args:
in_channels (int): module input (feature) channel count
out_channels (int): module output (feature) channel count
kernel_size (int, list): kernel size for each convolution branch
stride (int): stride for convolutions
dilation (int): dilation for module as a whole, impacts dilation of each branch
groups (int): number of groups for each branch
rd_ratio (int, float): reduction factor for attention features
keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations
split_input (bool): split input channels evenly across each convolution branch, keeps param count lower,
can be viewed as grouping by path, output expands to module out_channels count
drop_block (nn.Module): drop block module
act_layer (nn.Module): activation layer to use
norm_layer (nn.Module): batchnorm/norm layer to use
"""
super(SelectiveKernel, self).__init__()
out_channels = out_channels or in_channels
kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation
_kernel_valid(kernel_size)
if not isinstance(kernel_size, list):
kernel_size = [kernel_size] * 2
if keep_3x3:
dilation = [dilation * (k - 1) // 2 for k in kernel_size]
kernel_size = [3] * len(kernel_size)
else:
dilation = [dilation] * len(kernel_size)
self.num_paths = len(kernel_size)
self.in_channels = in_channels
self.out_channels = out_channels
self.split_input = split_input
if self.split_input:
assert in_channels % self.num_paths == 0
in_channels = in_channels // self.num_paths
groups = min(out_channels, groups)
conv_kwargs = dict(
stride=stride, groups=groups, drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer,
aa_layer=aa_layer)
self.paths = nn.ModuleList([
ConvBnAct(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs)
for k, d in zip(kernel_size, dilation)])
attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor)
self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels)
self.drop_block = drop_block
def forward(self, x):
if self.split_input:
x_split = torch.split(x, self.in_channels // self.num_paths, 1)
x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)]
else:
x_paths = [op(x) for op in self.paths]
x = torch.stack(x_paths, dim=1)
x_attn = self.attn(x)
x = x * x_attn
x = torch.sum(x, dim=1)
return x
| 5,349 | 43.214876 | 116 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/norm_act.py | """ Normalization + Activation Layers
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
from .create_act import get_act_layer
class BatchNormAct2d(nn.BatchNorm2d):
"""BatchNorm + Activation
This module performs BatchNorm + Activation in a manner that will remain backwards
compatible with weights trained with separate bn, act. This is why we inherit from BN
instead of composing it as a .bn member.
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,
apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None):
super(BatchNormAct2d, self).__init__(
num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
if isinstance(act_layer, str):
act_layer = get_act_layer(act_layer)
if act_layer is not None and apply_act:
act_args = dict(inplace=True) if inplace else {}
self.act = act_layer(**act_args)
else:
self.act = nn.Identity()
def _forward_jit(self, x):
""" A cut & paste of the contents of the PyTorch BatchNorm2d forward function
"""
# exponential_average_factor is self.momentum set to
# (when it is available) only so that if gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
x = F.batch_norm(
x, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
return x
@torch.jit.ignore
def _forward_python(self, x):
return super(BatchNormAct2d, self).forward(x)
def forward(self, x):
# FIXME cannot call parent forward() and maintain jit.script compatibility?
if torch.jit.is_scripting():
x = self._forward_jit(x)
else:
x = self._forward_python(x)
x = self.act(x)
return x
class GroupNormAct(nn.GroupNorm):
# NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args
def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True,
apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None):
super(GroupNormAct, self).__init__(num_groups, num_channels, eps=eps, affine=affine)
if isinstance(act_layer, str):
act_layer = get_act_layer(act_layer)
if act_layer is not None and apply_act:
act_args = dict(inplace=True) if inplace else {}
self.act = act_layer(**act_args)
else:
self.act = nn.Identity()
def forward(self, x):
x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
x = self.act(x)
return x
| 3,545 | 40.232558 | 109 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/create_conv2d.py | """ Create Conv2d Factory Method
Hacked together by / Copyright 2020 Ross Wightman
"""
from .mixed_conv2d import MixedConv2d
from .cond_conv2d import CondConv2d
from .conv2d_same import create_conv2d_pad
def create_conv2d(in_channels, out_channels, kernel_size, **kwargs):
""" Select a 2d convolution implementation based on arguments
Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.
Used extensively by EfficientNet, MobileNetv3 and related networks.
"""
if isinstance(kernel_size, list):
assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently
assert 'groups' not in kwargs # MixedConv groups are defined by kernel list
# We're going to use only lists for defining the MixedConv2d kernel groups,
# ints, tuples, other iterables will continue to pass to normal conv and specify h, w.
m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs)
else:
depthwise = kwargs.pop('depthwise', False)
# for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0
groups = in_channels if depthwise else kwargs.pop('groups', 1)
if 'num_experts' in kwargs and kwargs['num_experts'] > 0:
m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
else:
m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
return m
| 1,500 | 45.90625 | 101 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/create_norm_act.py | """ NormAct (Normalizaiton + Activation Layer) Factory
Create norm + act combo modules that attempt to be backwards compatible with separate norm + act
isntances in models. Where these are used it will be possible to swap separate BN + act layers with
combined modules like IABN or EvoNorms.
Hacked together by / Copyright 2020 Ross Wightman
"""
import types
import functools
import torch
import torch.nn as nn
from .evo_norm import EvoNormBatch2d, EvoNormSample2d
from .norm_act import BatchNormAct2d, GroupNormAct
from .inplace_abn import InplaceAbn
_NORM_ACT_TYPES = {BatchNormAct2d, GroupNormAct, EvoNormBatch2d, EvoNormSample2d, InplaceAbn}
_NORM_ACT_REQUIRES_ARG = {BatchNormAct2d, GroupNormAct, InplaceAbn} # requires act_layer arg to define act type
def get_norm_act_layer(layer_class):
layer_class = layer_class.replace('_', '').lower()
if layer_class.startswith("batchnorm"):
layer = BatchNormAct2d
elif layer_class.startswith("groupnorm"):
layer = GroupNormAct
elif layer_class == "evonormbatch":
layer = EvoNormBatch2d
elif layer_class == "evonormsample":
layer = EvoNormSample2d
elif layer_class == "iabn" or layer_class == "inplaceabn":
layer = InplaceAbn
else:
assert False, "Invalid norm_act layer (%s)" % layer_class
return layer
def create_norm_act(layer_type, num_features, apply_act=True, jit=False, **kwargs):
layer_parts = layer_type.split('-') # e.g. batchnorm-leaky_relu
assert len(layer_parts) in (1, 2)
layer = get_norm_act_layer(layer_parts[0])
#activation_class = layer_parts[1].lower() if len(layer_parts) > 1 else '' # FIXME support string act selection?
layer_instance = layer(num_features, apply_act=apply_act, **kwargs)
if jit:
layer_instance = torch.jit.script(layer_instance)
return layer_instance
def convert_norm_act(norm_layer, act_layer):
assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))
assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial))
norm_act_kwargs = {}
# unbind partial fn, so args can be rebound later
if isinstance(norm_layer, functools.partial):
norm_act_kwargs.update(norm_layer.keywords)
norm_layer = norm_layer.func
if isinstance(norm_layer, str):
norm_act_layer = get_norm_act_layer(norm_layer)
elif norm_layer in _NORM_ACT_TYPES:
norm_act_layer = norm_layer
elif isinstance(norm_layer, types.FunctionType):
# if function type, must be a lambda/fn that creates a norm_act layer
norm_act_layer = norm_layer
else:
type_name = norm_layer.__name__.lower()
if type_name.startswith('batchnorm'):
norm_act_layer = BatchNormAct2d
elif type_name.startswith('groupnorm'):
norm_act_layer = GroupNormAct
else:
assert False, f"No equivalent norm_act layer for {type_name}"
if norm_act_layer in _NORM_ACT_REQUIRES_ARG:
# pass `act_layer` through for backwards compat where `act_layer=None` implies no activation.
# In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types
norm_act_kwargs.setdefault('act_layer', act_layer)
if norm_act_kwargs:
norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args
return norm_act_layer
| 3,450 | 40.083333 | 118 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/padding.py | """ Padding Helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from typing import List, Tuple
import torch.nn.functional as F
# Calculate symmetric padding for a convolution
def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int:
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
def get_same_padding(x: int, k: int, s: int, d: int):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
# Can SAME padding for given args be done statically?
def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_):
return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
# Dynamically pad input x with 'SAME' padding for conv with specified args
def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value)
return x
def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]:
dynamic = False
if isinstance(padding, str):
# for any string padding, the padding will be calculated for you, one of three ways
padding = padding.lower()
if padding == 'same':
# TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
if is_static_pad(kernel_size, **kwargs):
# static case, no extra overhead
padding = get_padding(kernel_size, **kwargs)
else:
# dynamic 'SAME' padding, has runtime/GPU memory overhead
padding = 0
dynamic = True
elif padding == 'valid':
# 'VALID' padding, same as padding=0
padding = 0
else:
# Default to PyTorch style 'same'-ish symmetric padding
padding = get_padding(kernel_size, **kwargs)
return padding, dynamic
| 2,167 | 37.035088 | 99 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/drop.py | """ DropBlock, DropPath
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
Papers:
DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
Code:
DropBlock impl inspired by two Tensorflow impl that I liked:
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def drop_block_2d(
x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0,
with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
runs with success, but needs further validation and possibly optimization for lower runtime impact.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
# seed_drop_rate, the gamma parameter
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(W - block_size + 1) * (H - block_size + 1))
# Forces the block to be inside the feature map.
w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device))
valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \
((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
if batchwise:
# one mask for whole batch, quite a bit faster
uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
else:
uniform_noise = torch.rand_like(x)
block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
block_mask = -F.max_pool2d(
-block_mask,
kernel_size=clipped_block_size, # block_size,
stride=1,
padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
if inplace:
x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
else:
x = x * block_mask + normal_noise * (1 - block_mask)
else:
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
def drop_block_fast_2d(
x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7,
gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
block mask at edges.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(W - block_size + 1) * (H - block_size + 1))
block_mask = torch.empty_like(x).bernoulli_(gamma)
block_mask = F.max_pool2d(
block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.empty_like(x).normal_()
if inplace:
x.mul_(1. - block_mask).add_(normal_noise * block_mask)
else:
x = x * (1. - block_mask) + normal_noise * block_mask
else:
block_mask = 1 - block_mask
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
class DropBlock2d(nn.Module):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
"""
def __init__(self,
drop_prob=0.1,
block_size=7,
gamma_scale=1.0,
with_noise=False,
inplace=False,
batchwise=False,
fast=True):
super(DropBlock2d, self).__init__()
self.drop_prob = drop_prob
self.gamma_scale = gamma_scale
self.block_size = block_size
self.with_noise = with_noise
self.inplace = inplace
self.batchwise = batchwise
self.fast = fast # FIXME finish comparisons of fast vs not
def forward(self, x):
if not self.training or not self.drop_prob:
return x
if self.fast:
return drop_block_fast_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace)
else:
return drop_block_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None, scale_by_keep=True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
| 6,732 | 39.806061 | 118 | py |
RandStainNA | RandStainNA-master/classification/timm/models/layers/inplace_abn.py | import torch
from torch import nn as nn
try:
from inplace_abn.functions import inplace_abn, inplace_abn_sync
has_iabn = True
except ImportError:
has_iabn = False
def inplace_abn(x, weight, bias, running_mean, running_var,
training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01):
raise ImportError(
"Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.12'")
def inplace_abn_sync(**kwargs):
inplace_abn(**kwargs)
class InplaceAbn(nn.Module):
"""Activated Batch Normalization
This gathers a BatchNorm and an activation function in a single module
Parameters
----------
num_features : int
Number of feature channels in the input and output.
eps : float
Small constant to prevent numerical issues.
momentum : float
Momentum factor applied to compute running statistics.
affine : bool
If `True` apply learned scale and shift transformation after normalization.
act_layer : str or nn.Module type
Name or type of the activation functions, one of: `leaky_relu`, `elu`
act_param : float
Negative slope for the `leaky_relu` activation.
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True,
act_layer="leaky_relu", act_param=0.01, drop_block=None):
super(InplaceAbn, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.momentum = momentum
if apply_act:
if isinstance(act_layer, str):
assert act_layer in ('leaky_relu', 'elu', 'identity', '')
self.act_name = act_layer if act_layer else 'identity'
else:
# convert act layer passed as type to string
if act_layer == nn.ELU:
self.act_name = 'elu'
elif act_layer == nn.LeakyReLU:
self.act_name = 'leaky_relu'
elif act_layer == nn.Identity:
self.act_name = 'identity'
else:
assert False, f'Invalid act layer {act_layer.__name__} for IABN'
else:
self.act_name = 'identity'
self.act_param = act_param
if self.affine:
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.running_mean, 0)
nn.init.constant_(self.running_var, 1)
if self.affine:
nn.init.constant_(self.weight, 1)
nn.init.constant_(self.bias, 0)
def forward(self, x):
output = inplace_abn(
x, self.weight, self.bias, self.running_mean, self.running_var,
self.training, self.momentum, self.eps, self.act_name, self.act_param)
if isinstance(output, tuple):
output = output[0]
return output
| 3,353 | 37.113636 | 111 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/madgrad.py | """ PyTorch MADGRAD optimizer
MADGRAD: https://arxiv.org/abs/2101.11075
Code from: https://github.com/facebookresearch/madgrad
"""
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import TYPE_CHECKING, Any, Callable, Optional
import torch
import torch.optim
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class MADGRAD(torch.optim.Optimizer):
"""
MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic
Optimization.
.. _MADGRAD: https://arxiv.org/abs/2101.11075
MADGRAD is a general purpose optimizer that can be used in place of SGD or
Adam may converge faster and generalize better. Currently GPU-only.
Typically, the same learning rate schedule that is used for SGD or Adam may
be used. The overall learning rate is not comparable to either method and
should be determined by a hyper-parameter sweep.
MADGRAD requires less weight decay than other methods, often as little as
zero. Momentum values used for SGD or Adam's beta1 should work here also.
On sparse problems both weight_decay and momentum should be set to 0.
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate (default: 1e-2).
momentum (float):
Momentum value in the range [0,1) (default: 0.9).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0).
eps (float):
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6).
"""
def __init__(
self,
params: _params_t,
lr: float = 1e-2,
momentum: float = 0.9,
weight_decay: float = 0,
eps: float = 1e-6,
decoupled_decay: bool = False,
):
if momentum < 0 or momentum >= 1:
raise ValueError(f"Momentum {momentum} must be in the range [0,1]")
if lr <= 0:
raise ValueError(f"Learning rate {lr} must be positive")
if weight_decay < 0:
raise ValueError(f"Weight decay {weight_decay} must be non-negative")
if eps < 0:
raise ValueError(f"Eps must be non-negative")
defaults = dict(
lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self) -> bool:
return False
@property
def supports_flat_params(self) -> bool:
return True
@torch.no_grad()
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
eps = group['eps']
lr = group['lr'] + eps
weight_decay = group['weight_decay']
momentum = group['momentum']
ck = 1 - momentum
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad
if momentum != 0.0 and grad.is_sparse:
raise RuntimeError("momentum != 0 is not compatible with sparse gradients")
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['grad_sum_sq'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
if momentum != 0:
state['x0'] = torch.clone(p).detach()
state['step'] += 1
grad_sum_sq = state['grad_sum_sq']
s = state['s']
lamb = lr * math.sqrt(state['step'])
# Apply weight decay
if weight_decay != 0:
if group['decoupled_decay']:
p.mul_(1.0 - group['lr'] * weight_decay)
else:
if grad.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad.add_(p, alpha=weight_decay)
if grad.is_sparse:
grad = grad.coalesce()
grad_val = grad._values()
p_masked = p.sparse_mask(grad)
grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad)
s_masked = s.sparse_mask(grad)
# Compute x_0 from other known quantities
rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps)
x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1)
# Dense + sparse op
grad_sq = grad * grad
grad_sum_sq.add_(grad_sq, alpha=lamb)
grad_sum_sq_masked.add_(grad_sq, alpha=lamb)
rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps)
s.add_(grad, alpha=lamb)
s_masked._values().add_(grad_val, alpha=lamb)
# update masked copy of p
p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1)
# Copy updated masked p to dense p using an add operation
p_masked._values().add_(p_kp1_masked_vals, alpha=-1)
p.add_(p_masked, alpha=-1)
else:
if momentum == 0:
# Compute x_0 from other known quantities
rms = grad_sum_sq.pow(1 / 3).add_(eps)
x0 = p.addcdiv(s, rms, value=1)
else:
x0 = state['x0']
# Accumulate second moments
grad_sum_sq.addcmul_(grad, grad, value=lamb)
rms = grad_sum_sq.pow(1 / 3).add_(eps)
# Update s
s.add_(grad, alpha=lamb)
# Step
if momentum == 0:
p.copy_(x0.addcdiv(s, rms, value=-1))
else:
z = x0.addcdiv(s, rms, value=-1)
# p is a moving average of z
p.mul_(1 - ck).add_(z, alpha=ck)
return loss
| 6,893 | 36.264865 | 120 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/adahessian.py | """ AdaHessian Optimizer
Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py
Originally licensed MIT, Copyright 2020, David Samuel
"""
import torch
class Adahessian(torch.optim.Optimizer):
"""
Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning"
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): learning rate (default: 0.1)
betas ((float, float), optional): coefficients used for computing running averages of gradient and the
squared hessian trace (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0)
hessian_power (float, optional): exponent of the hessian trace (default: 1.0)
update_each (int, optional): compute the hessian trace approximation only after *this* number of steps
(to save time) (default: 1)
n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1)
"""
def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0,
hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
if not 0.0 <= hessian_power <= 1.0:
raise ValueError(f"Invalid Hessian power value: {hessian_power}")
self.n_samples = n_samples
self.update_each = update_each
self.avg_conv_kernel = avg_conv_kernel
# use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training
self.seed = 2147483647
self.generator = torch.Generator().manual_seed(self.seed)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power)
super(Adahessian, self).__init__(params, defaults)
for p in self.get_params():
p.hess = 0.0
self.state[p]["hessian step"] = 0
@property
def is_second_order(self):
return True
def get_params(self):
"""
Gets all parameters in all param_groups with gradients
"""
return (p for group in self.param_groups for p in group['params'] if p.requires_grad)
def zero_hessian(self):
"""
Zeros out the accumalated hessian traces.
"""
for p in self.get_params():
if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0:
p.hess.zero_()
@torch.no_grad()
def set_hessian(self):
"""
Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter.
"""
params = []
for p in filter(lambda p: p.grad is not None, self.get_params()):
if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step
params.append(p)
self.state[p]["hessian step"] += 1
if len(params) == 0:
return
if self.generator.device != params[0].device: # hackish way of casting the generator to the right device
self.generator = torch.Generator(params[0].device).manual_seed(self.seed)
grads = [p.grad for p in params]
for i in range(self.n_samples):
# Rademacher distribution {-1.0, 1.0}
zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params]
h_zs = torch.autograd.grad(
grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1)
for h_z, z, p in zip(h_zs, zs, params):
p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z)
@torch.no_grad()
def step(self, closure=None):
"""
Performs a single optimization step.
Arguments:
closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None)
"""
loss = None
if closure is not None:
loss = closure()
self.zero_hessian()
self.set_hessian()
for group in self.param_groups:
for p in group['params']:
if p.grad is None or p.hess is None:
continue
if self.avg_conv_kernel and p.dim() == 4:
p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone()
# Perform correct stepweight decay as in AdamW
p.mul_(1 - group['lr'] * group['weight_decay'])
state = self.state[p]
# State initialization
if len(state) == 1:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of Hessian diagonal square values
state['exp_hessian_diag_sq'] = torch.zeros_like(p)
exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1)
exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2)
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
k = group['hessian_power']
denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps'])
# make update
step_size = group['lr'] / bias_correction1
p.addcdiv_(exp_avg, denom, value=-step_size)
return loss
| 6,535 | 40.630573 | 129 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/radam.py | """RAdam Optimizer.
Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam
Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265
"""
import math
import torch
from torch.optim.optimizer import Optimizer
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
buffer=[[None, None, None] for _ in range(10)])
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_fp32 = p.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
state['step'] += 1
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
num_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
num_sma_max = 2 / (1 - beta2) - 1
num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = num_sma
# more conservative since it's an approximated value
if num_sma >= 5:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) *
(num_sma - 4) / (num_sma_max - 4) *
(num_sma - 2) / num_sma *
num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr'])
# more conservative since it's an approximated value
if num_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_fp32.addcdiv_(exp_avg, denom, value=-step_size)
else:
p_fp32.add_(exp_avg, alpha=-step_size)
p.copy_(p_fp32)
return loss
| 3,468 | 37.544444 | 100 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/nvnovograd.py | """ Nvidia NovoGrad Optimizer.
Original impl by Nvidia from Jasper example:
- https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper
Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks`
- https://arxiv.org/abs/1905.11286
"""
import torch
from torch.optim.optimizer import Optimizer
import math
class NvNovoGrad(Optimizer):
"""
Implements Novograd algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.95, 0.98))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
grad_averaging: gradient averaging
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
"""
def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8,
weight_decay=0, grad_averaging=False, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
grad_averaging=grad_averaging,
amsgrad=amsgrad)
super(NvNovoGrad, self).__init__(params, defaults)
def __setstate__(self, state):
super(NvNovoGrad, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Sparse gradients are not supported.')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
norm = torch.sum(torch.pow(grad, 2))
if exp_avg_sq == 0:
exp_avg_sq.copy_(norm)
else:
exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
grad.div_(denom)
if group['weight_decay'] != 0:
grad.add_(p, alpha=group['weight_decay'])
if group['grad_averaging']:
grad.mul_(1 - beta1)
exp_avg.mul_(beta1).add_(grad)
p.add_(exp_avg, alpha=-group['lr'])
return loss
| 4,856 | 39.140496 | 99 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/adabelief.py | import math
import torch
from torch.optim.optimizer import Optimizer
class AdaBelief(Optimizer):
r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-16)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
decoupled_decay (boolean, optional): (default: True) If set as True, then
the optimizer uses decoupled weight decay as in AdamW
fixed_decay (boolean, optional): (default: False) This is used when weight_decouple
is set as True.
When fixed_decay == True, the weight decay is performed as
$W_{new} = W_{old} - W_{old} \times decay$.
When fixed_decay == False, the weight decay is performed as
$W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the
weight decay ratio decreases with learning rate (lr).
rectify (boolean, optional): (default: True) If set as True, then perform the rectified
update similar to RAdam
degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update
when variance of gradient is high
reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020
For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer'
For example train/args for EfficientNet see these gists
- link to train_scipt: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037
- link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3
"""
def __init__(
self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False,
decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
for param in params:
if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad,
degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify,
fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)])
super(AdaBelief, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaBelief, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
amsgrad = group['amsgrad']
# State initialization
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_var'] = torch.zeros_like(p)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_var'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
'AdaBelief does not support sparse gradients, please consider SparseAdam instead')
p_fp32 = p
if p.dtype in {torch.float16, torch.bfloat16}:
p_fp32 = p_fp32.float()
amsgrad = group['amsgrad']
beta1, beta2 = group['betas']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p_fp32)
# Exponential moving average of squared gradient values
state['exp_avg_var'] = torch.zeros_like(p_fp32)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_var'] = torch.zeros_like(p_fp32)
# perform weight decay, check if decoupled weight decay
if group['decoupled_decay']:
if not group['fixed_decay']:
p_fp32.mul_(1.0 - group['lr'] * group['weight_decay'])
else:
p_fp32.mul_(1.0 - group['weight_decay'])
else:
if group['weight_decay'] != 0:
grad.add_(p_fp32, alpha=group['weight_decay'])
# get current state variable
exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Update first and second moment running average
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
grad_residual = grad - exp_avg
exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2)
if amsgrad:
max_exp_avg_var = state['max_exp_avg_var']
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
# update
if not group['rectify']:
# Default update
step_size = group['lr'] / bias_correction1
p_fp32.addcdiv_(exp_avg, denom, value=-step_size)
else:
# Rectified update, forked from RAdam
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
num_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
num_sma_max = 2 / (1 - beta2) - 1
num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = num_sma
# more conservative since it's an approximated value
if num_sma >= 5:
step_size = math.sqrt(
(1 - beta2_t) *
(num_sma - 4) / (num_sma_max - 4) *
(num_sma - 2) / num_sma *
num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step'])
elif group['degenerated_to_sgd']:
step_size = 1.0 / (1 - beta1 ** state['step'])
else:
step_size = -1
buffered[2] = step_size
if num_sma >= 5:
denom = exp_avg_var.sqrt().add_(group['eps'])
p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr'])
elif step_size > 0:
p_fp32.add_(exp_avg, alpha=-step_size * group['lr'])
if p.dtype in {torch.float16, torch.bfloat16}:
p.copy_(p_fp32)
return loss
| 9,827 | 47.653465 | 116 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/adamp.py | """
AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py
Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217
Code: https://github.com/clovaai/AdamP
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
import torch
import torch.nn.functional as F
from torch.optim.optimizer import Optimizer
import math
def _channel_view(x) -> torch.Tensor:
return x.reshape(x.size(0), -1)
def _layer_view(x) -> torch.Tensor:
return x.reshape(1, -1)
def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float):
wd = 1.
expand_size = (-1,) + (1,) * (len(p.shape) - 1)
for view_func in [_channel_view, _layer_view]:
param_view = view_func(p)
grad_view = view_func(grad)
cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_()
# FIXME this is a problem for PyTorch XLA
if cosine_sim.max() < delta / math.sqrt(param_view.size(1)):
p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size)
perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size)
wd = wd_ratio
return perturb, wd
return perturb, wd
class AdamP(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False):
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
delta=delta, wd_ratio=wd_ratio, nesterov=nesterov)
super(AdamP, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
beta1, beta2 = group['betas']
nesterov = group['nesterov']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
# Adam
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
if nesterov:
perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom
else:
perturb = exp_avg / denom
# Projection
wd_ratio = 1.
if len(p.shape) > 1:
perturb, wd_ratio = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps'])
# Weight decay
if group['weight_decay'] > 0:
p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio)
# Step
p.add_(perturb, alpha=-step_size)
return loss
| 3,574 | 32.726415 | 117 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/nadam.py | import math
import torch
from torch.optim.optimizer import Optimizer
class Nadam(Optimizer):
"""Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).
It has been proposed in `Incorporating Nesterov Momentum into Adam`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
schedule_decay (float, optional): momentum schedule decay (default: 4e-3)
__ http://cs229.stanford.edu/proj2015/054_report.pdf
__ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
Originally taken from: https://github.com/pytorch/pytorch/pull/1408
NOTE: Has potential issues but does work well on some problems.
"""
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, schedule_decay=4e-3):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, schedule_decay=schedule_decay)
super(Nadam, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['m_schedule'] = 1.
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
# Warming momentum schedule
m_schedule = state['m_schedule']
schedule_decay = group['schedule_decay']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
eps = group['eps']
state['step'] += 1
t = state['step']
bias_correction2 = 1 - beta2 ** t
if group['weight_decay'] != 0:
grad = grad.add(p, alpha=group['weight_decay'])
momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay)))
momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay)))
m_schedule_new = m_schedule * momentum_cache_t
m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1
state['m_schedule'] = m_schedule_new
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new))
p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next))
return loss
| 3,871 | 40.634409 | 109 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/adamw.py | """ AdamW Optimizer
Impl copied from PyTorch master
NOTE: Builtin optim.AdamW is used by the factory, this impl only serves as a Python based reference, will be removed
someday
"""
import math
import torch
from torch.optim.optimizer import Optimizer
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# Perform stepweight decay
p.data.mul_(1 - group['lr'] * group['weight_decay'])
# Perform optimization step
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
p.addcdiv_(exp_avg, denom, value=-step_size)
return loss
| 5,147 | 40.853659 | 116 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/adafactor.py | """ Adafactor Optimizer
Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py
Original header/copyright below.
"""
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import math
class Adafactor(torch.optim.Optimizer):
"""Implements Adafactor algorithm.
This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
(see https://arxiv.org/abs/1804.04235)
Note that this optimizer internally adjusts the learning rate depending on the
*scale_parameter*, *relative_step* and *warmup_init* options.
To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
`relative_step=False`.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): external learning rate (default: None)
eps (tuple[float, float]): regularization constants for square gradient
and parameter scale respectively (default: (1e-30, 1e-3))
clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0)
decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8)
beta1 (float): coefficient used for computing running averages of gradient (default: None)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True)
warmup_init (bool): time-dependent learning rate computation depends on
whether warm-up initialization is being used (default: False)
"""
def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0,
decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False):
relative_step = not lr
if warmup_init and not relative_step:
raise ValueError('warmup_init requires relative_step=True')
beta1 = None if betas is None else betas[0] # make it compat with standard betas arg
defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate,
beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter,
relative_step=relative_step, warmup_init=warmup_init)
super(Adafactor, self).__init__(params, defaults)
@staticmethod
def _get_lr(param_group, param_state):
if param_group['relative_step']:
min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2
lr_t = min(min_step, 1.0 / math.sqrt(param_state['step']))
param_scale = 1.0
if param_group['scale_parameter']:
param_scale = max(param_group['eps_scale'], param_state['RMS'])
param_group['lr'] = lr_t * param_scale
return param_group['lr']
@staticmethod
def _get_options(param_group, param_shape):
factored = len(param_shape) >= 2
use_first_moment = param_group['beta1'] is not None
return factored, use_first_moment
@staticmethod
def _rms(tensor):
return tensor.norm(2) / (tensor.numel() ** 0.5)
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
return torch.mul(r_factor, c_factor)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
factored, use_first_moment = self._get_options(group, grad.shape)
# State Initialization
if len(state) == 0:
state['step'] = 0
if use_first_moment:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad.shape[:-1]).to(grad)
state['exp_avg_sq_col'] = torch.zeros(grad.shape[:-2] + grad.shape[-1:]).to(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
else:
if use_first_moment:
state['exp_avg'] = state['exp_avg'].to(grad)
if factored:
state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)
state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)
else:
state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)
p_fp32 = p
if p.dtype in {torch.float16, torch.bfloat16}:
p_fp32 = p_fp32.float()
state['step'] += 1
state['RMS'] = self._rms(p_fp32)
lr_t = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])
update = grad ** 2 + group['eps']
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t)
exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t)
# Approximation of exponential moving average of square of gradient
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))
update.mul_(lr_t)
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1'])
update = exp_avg
if group['weight_decay'] != 0:
p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t)
p_fp32.add_(-update)
if p.dtype in {torch.float16, torch.bfloat16}:
p.copy_(p_fp32)
return loss
| 7,459 | 43.404762 | 114 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/rmsprop_tf.py | """ RMSProp modified to behave like Tensorflow impl
Originally cut & paste from PyTorch RMSProp
https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py
Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE
Modifications Copyright 2021 Ross Wightman
"""
import torch
from torch.optim import Optimizer
class RMSpropTF(Optimizer):
"""Implements RMSprop algorithm (TensorFlow style epsilon)
NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt
and a few other modifications to closer match Tensorflow for matching hyper-params.
Noteworthy changes include:
1. Epsilon applied inside square-root
2. square_avg initialized to ones
3. LR scaling of update accumulated in momentum buffer
Proposed by G. Hinton in his
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
The centered version first appears in `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
momentum (float, optional): momentum factor (default: 0)
alpha (float, optional): smoothing (decay) constant (default: 0.9)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-10)
centered (bool, optional) : if ``True``, compute the centered RMSProp,
the gradient is normalized by an estimation of its variance
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101
lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer
update as per defaults in Tensorflow
"""
def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False,
decoupled_decay=False, lr_in_momentum=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
defaults = dict(
lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay,
decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum)
super(RMSpropTF, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSpropTF, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p)
square_avg = state['square_avg']
one_minus_alpha = 1. - group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
if group['decoupled_decay']:
p.mul_(1. - group['lr'] * group['weight_decay'])
else:
grad = grad.add(p, alpha=group['weight_decay'])
# Tensorflow order of ops for updating squared avg
square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha)
# square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha)
avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt
# grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original
else:
avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt
if group['momentum'] > 0:
buf = state['momentum_buffer']
# Tensorflow accumulates the LR scaling in the momentum buffer
if group['lr_in_momentum']:
buf.mul_(group['momentum']).addcdiv_(grad, avg, value=group['lr'])
p.add_(-buf)
else:
# PyTorch scales the param update by LR
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.add_(buf, alpha=-group['lr'])
else:
p.addcdiv_(grad, avg, value=-group['lr'])
return loss
| 6,143 | 42.885714 | 115 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/sgdp.py | """
SGDP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/sgdp.py
Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217
Code: https://github.com/clovaai/AdamP
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
import torch
import torch.nn.functional as F
from torch.optim.optimizer import Optimizer, required
import math
from .adamp import projection
class SGDP(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1):
defaults = dict(
lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay,
nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio)
super(SGDP, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
# State initialization
if len(state) == 0:
state['momentum'] = torch.zeros_like(p)
# SGD
buf = state['momentum']
buf.mul_(momentum).add_(grad, alpha=1. - dampening)
if nesterov:
d_p = grad + momentum * buf
else:
d_p = buf
# Projection
wd_ratio = 1.
if len(p.shape) > 1:
d_p, wd_ratio = projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps'])
# Weight decay
if weight_decay != 0:
p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum))
# Step
p.add_(d_p, alpha=-group['lr'])
return loss
| 2,296 | 31.352113 | 110 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/lars.py | """ PyTorch LARS / LARC Optimizer
An implementation of LARS (SGD) + LARC in PyTorch
Based on:
* PyTorch SGD: https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100
* NVIDIA APEX LARC: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py
Additional cleanup and modifications to properly support PyTorch XLA.
Copyright 2021 Ross Wightman
"""
import torch
from torch.optim.optimizer import Optimizer
class Lars(Optimizer):
""" LARS for PyTorch
Paper: `Large batch training of Convolutional Networks` - https://arxiv.org/pdf/1708.03888.pdf
Args:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups.
lr (float, optional): learning rate (default: 1.0).
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
trust_coeff (float): trust coefficient for computing adaptive lr / trust_ratio (default: 0.001)
eps (float): eps for division denominator (default: 1e-8)
trust_clip (bool): enable LARC trust ratio clipping (default: False)
always_adapt (bool): always apply LARS LR adapt, otherwise only when group weight_decay != 0 (default: False)
"""
def __init__(
self,
params,
lr=1.0,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
trust_coeff=0.001,
eps=1e-8,
trust_clip=False,
always_adapt=False,
):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
trust_coeff=trust_coeff,
eps=eps,
trust_clip=trust_clip,
always_adapt=always_adapt,
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
device = self.param_groups[0]['params'][0].device
one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
trust_coeff = group['trust_coeff']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
# apply LARS LR adaptation, LARC clipping, weight decay
# ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py
if weight_decay != 0 or group['always_adapt']:
w_norm = p.norm(2.0)
g_norm = grad.norm(2.0)
trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps)
# FIXME nested where required since logical and/or not working in PT XLA
trust_ratio = torch.where(
w_norm > 0,
torch.where(g_norm > 0, trust_ratio, one_tensor),
one_tensor,
)
if group['trust_clip']:
trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor)
grad.add(p, alpha=weight_decay)
grad.mul_(trust_ratio)
# apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(grad).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(grad, alpha=1. - dampening)
if nesterov:
grad = grad.add(buf, alpha=momentum)
else:
grad = buf
p.add_(grad, alpha=-group['lr'])
return loss | 5,255 | 37.933333 | 117 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/lookahead.py | """ Lookahead Optimizer Wrapper.
Implementation modified from: https://github.com/alphadl/lookahead.pytorch
Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch.optim.optimizer import Optimizer
from collections import defaultdict
class Lookahead(Optimizer):
def __init__(self, base_optimizer, alpha=0.5, k=6):
# NOTE super().__init__() not called on purpose
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0)
self._base_optimizer = base_optimizer
self.param_groups = base_optimizer.param_groups
self.defaults = base_optimizer.defaults
self.defaults.update(defaults)
self.state = defaultdict(dict)
# manually add our defaults to the param groups
for name, default in defaults.items():
for group in self._base_optimizer.param_groups:
group.setdefault(name, default)
@torch.no_grad()
def update_slow(self, group):
for fast_p in group["params"]:
if fast_p.grad is None:
continue
param_state = self._base_optimizer.state[fast_p]
if 'lookahead_slow_buff' not in param_state:
param_state['lookahead_slow_buff'] = torch.empty_like(fast_p)
param_state['lookahead_slow_buff'].copy_(fast_p)
slow = param_state['lookahead_slow_buff']
slow.add_(fast_p - slow, alpha=group['lookahead_alpha'])
fast_p.copy_(slow)
def sync_lookahead(self):
for group in self._base_optimizer.param_groups:
self.update_slow(group)
@torch.no_grad()
def step(self, closure=None):
loss = self._base_optimizer.step(closure)
for group in self._base_optimizer.param_groups:
group['lookahead_step'] += 1
if group['lookahead_step'] % group['lookahead_k'] == 0:
self.update_slow(group)
return loss
def state_dict(self):
return self._base_optimizer.state_dict()
def load_state_dict(self, state_dict):
self._base_optimizer.load_state_dict(state_dict)
self.param_groups = self._base_optimizer.param_groups
| 2,463 | 38.741935 | 93 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/optim_factory.py | """ Optimizer Factory w/ Custom Weight Decay
Hacked together by / Copyright 2021 Ross Wightman
"""
from typing import Optional
import torch
import torch.nn as nn
import torch.optim as optim
from .adabelief import AdaBelief
from .adafactor import Adafactor
from .adahessian import Adahessian
from .adamp import AdamP
from .lamb import Lamb
from .lars import Lars
from .lookahead import Lookahead
from .madgrad import MADGRAD
from .nadam import Nadam
from .nvnovograd import NvNovoGrad
from .radam import RAdam
from .rmsprop_tf import RMSpropTF
from .sgdp import SGDP
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def add_weight_decay(model, weight_decay=1e-5, skip_list=()):
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
def optimizer_kwargs(cfg):
""" cfg/argparse to kwargs helper
Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn.
"""
kwargs = dict(
opt=cfg.opt,
lr=cfg.lr,
weight_decay=cfg.weight_decay,
momentum=cfg.momentum)
if getattr(cfg, 'opt_eps', None) is not None:
kwargs['eps'] = cfg.opt_eps
if getattr(cfg, 'opt_betas', None) is not None:
kwargs['betas'] = cfg.opt_betas
if getattr(cfg, 'opt_args', None) is not None:
kwargs.update(cfg.opt_args)
return kwargs
def create_optimizer(args, model, filter_bias_and_bn=True):
""" Legacy optimizer factory for backwards compatibility.
NOTE: Use create_optimizer_v2 for new code.
"""
return create_optimizer_v2(
model,
**optimizer_kwargs(cfg=args),
filter_bias_and_bn=filter_bias_and_bn,
)
def create_optimizer_v2(
model_or_params,
opt: str = 'sgd',
lr: Optional[float] = None,
weight_decay: float = 0.,
momentum: float = 0.9,
filter_bias_and_bn: bool = True,
**kwargs):
""" Create an optimizer.
TODO currently the model is passed in and all parameters are selected for optimization.
For more general use an interface that allows selection of parameters to optimize and lr groups, one of:
* a filter fn interface that further breaks params into groups in a weight_decay compatible fashion
* expose the parameters interface and leave it up to caller
Args:
model_or_params (nn.Module): model containing parameters to optimize
opt: name of optimizer to create
lr: initial learning rate
weight_decay: weight decay to apply in optimizer
momentum: momentum for momentum based optimizers (others may use betas via kwargs)
filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay
**kwargs: extra optimizer specific kwargs to pass through
Returns:
Optimizer
"""
if isinstance(model_or_params, nn.Module):
# a model was passed in, extract parameters and add weight decays to appropriate layers
if weight_decay and filter_bias_and_bn:
skip = {}
if hasattr(model_or_params, 'no_weight_decay'):
skip = model_or_params.no_weight_decay()
parameters = add_weight_decay(model_or_params, weight_decay, skip)
weight_decay = 0.
else:
parameters = model_or_params.parameters()
else:
# iterable of parameters or param groups passed in
parameters = model_or_params
opt_lower = opt.lower()
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(weight_decay=weight_decay, **kwargs)
if lr is not None:
opt_args.setdefault('lr', lr)
# basic SGD & related
if opt_lower == 'sgd' or opt_lower == 'nesterov':
# NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args)
# adaptive
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'nadam':
try:
# NOTE PyTorch >= 1.10 should have native NAdam
optimizer = optim.Nadam(parameters, **opt_args)
except AttributeError:
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamax':
optimizer = optim.Adamax(parameters, **opt_args)
elif opt_lower == 'adabelief':
optimizer = AdaBelief(parameters, rectify=False, **opt_args)
elif opt_lower == 'radabelief':
optimizer = AdaBelief(parameters, rectify=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adagrad':
opt_args.setdefault('eps', 1e-8)
optimizer = optim.Adagrad(parameters, **opt_args)
elif opt_lower == 'adafactor':
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'lamb':
optimizer = Lamb(parameters, **opt_args)
elif opt_lower == 'lambc':
optimizer = Lamb(parameters, trust_clip=True, **opt_args)
elif opt_lower == 'larc':
optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args)
elif opt_lower == 'lars':
optimizer = Lars(parameters, momentum=momentum, **opt_args)
elif opt_lower == 'nlarc':
optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args)
elif opt_lower == 'nlars':
optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'madgrad':
optimizer = MADGRAD(parameters, momentum=momentum, **opt_args)
elif opt_lower == 'madgradw':
optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args)
elif opt_lower == 'novograd' or opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args)
# second order
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
# NVIDIA fused optimizers, require APEX to be installed
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
| 8,415 | 37.605505 | 108 | py |
RandStainNA | RandStainNA-master/classification/timm/optim/lamb.py | """ PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb
This optimizer code was adapted from the following (starting with latest)
* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py
* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
* https://github.com/cybertronai/pytorch-lamb
Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is
similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX.
In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU.
Original copyrights for above sources are below.
Modifications Copyright 2021 Ross Wightman
"""
# Copyright (c) 2021, Habana Labs Ltd. All rights reserved.
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2019 cybertronai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
from torch.optim import Optimizer
class Lamb(Optimizer):
"""Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB
reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
grad_averaging (bool, optional): whether apply (1-beta2) to grad when
calculating running averages of gradient. (default: True)
max_grad_norm (float, optional): value used to clip global grad norm (default: 1.0)
trust_clip (bool): enable LAMBC trust ratio clipping (default: False)
always_adapt (boolean, optional): Apply adaptive learning rate to 0.0
weight decay parameter (default: False)
.. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0.01, grad_averaging=True, max_grad_norm=1.0, trust_clip=False, always_adapt=False):
defaults = dict(
lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay,
grad_averaging=grad_averaging, max_grad_norm=max_grad_norm,
trust_clip=trust_clip, always_adapt=always_adapt)
super().__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
device = self.param_groups[0]['params'][0].device
one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly
global_grad_norm = torch.zeros(1, device=device)
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
global_grad_norm.add_(grad.pow(2).sum())
global_grad_norm = torch.sqrt(global_grad_norm)
# FIXME it'd be nice to remove explicit tensor conversion of scalars when torch.where promotes
# scalar types properly https://github.com/pytorch/pytorch/issues/9190
max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device)
clip_global_grad_norm = torch.where(
global_grad_norm > max_grad_norm,
global_grad_norm / max_grad_norm,
one_tensor)
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
grad_averaging = 1 if group['grad_averaging'] else 0
beta3 = 1 - beta1 if grad_averaging else 1.0
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
if bias_correction:
bias_correction1 = 1 - beta1 ** group['step']
bias_correction2 = 1 - beta2 ** group['step']
else:
bias_correction1, bias_correction2 = 1.0, 1.0
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.div_(clip_global_grad_norm)
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient valuesa
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
update = (exp_avg / bias_correction1).div_(denom)
weight_decay = group['weight_decay']
if weight_decay != 0:
update.add_(p, alpha=weight_decay)
if weight_decay != 0 or group['always_adapt']:
# Layer-wise LR adaptation. By default, skip adaptation on parameters that are
# excluded from weight decay, unless always_adapt == True, then always enabled.
w_norm = p.norm(2.0)
g_norm = update.norm(2.0)
# FIXME nested where required since logical and/or not working in PT XLA
trust_ratio = torch.where(
w_norm > 0,
torch.where(g_norm > 0, w_norm / g_norm, one_tensor),
one_tensor,
)
if group['trust_clip']:
# LAMBC trust clipping, upper bound fixed at one
trust_ratio = torch.minimum(trust_ratio, one_tensor)
update.mul_(trust_ratio)
p.add_(update, alpha=-group['lr'])
return loss
| 9,184 | 46.590674 | 129 | py |
RandStainNA | RandStainNA-master/classification/timm/loss/asymmetric_loss.py | import torch
import torch.nn as nn
class AsymmetricLossMultiLabel(nn.Module):
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False):
super(AsymmetricLossMultiLabel, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
# Calculating Probabilities
x_sigmoid = torch.sigmoid(x)
xs_pos = x_sigmoid
xs_neg = 1 - x_sigmoid
# Asymmetric Clipping
if self.clip is not None and self.clip > 0:
xs_neg = (xs_neg + self.clip).clamp(max=1)
# Basic CE calculation
los_pos = y * torch.log(xs_pos.clamp(min=self.eps))
los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps))
loss = los_pos + los_neg
# Asymmetric Focusing
if self.gamma_neg > 0 or self.gamma_pos > 0:
if self.disable_torch_grad_focal_loss:
torch._C.set_grad_enabled(False)
pt0 = xs_pos * y
pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p
pt = pt0 + pt1
one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y)
one_sided_w = torch.pow(1 - pt, one_sided_gamma)
if self.disable_torch_grad_focal_loss:
torch._C.set_grad_enabled(True)
loss *= one_sided_w
return -loss.sum()
class AsymmetricLossSingleLabel(nn.Module):
def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'):
super(AsymmetricLossSingleLabel, self).__init__()
self.eps = eps
self.logsoftmax = nn.LogSoftmax(dim=-1)
self.targets_classes = [] # prevent gpu repeated memory allocation
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.reduction = reduction
def forward(self, inputs, target, reduction=None):
""""
Parameters
----------
x: input logits
y: targets (1-hot vector)
"""
num_classes = inputs.size()[-1]
log_preds = self.logsoftmax(inputs)
self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1)
# ASL weights
targets = self.targets_classes
anti_targets = 1 - targets
xs_pos = torch.exp(log_preds)
xs_neg = 1 - xs_pos
xs_pos = xs_pos * targets
xs_neg = xs_neg * anti_targets
asymmetric_w = torch.pow(1 - xs_pos - xs_neg,
self.gamma_pos * targets + self.gamma_neg * anti_targets)
log_preds = log_preds * asymmetric_w
if self.eps > 0: # label smoothing
self.targets_classes.mul_(1 - self.eps).add_(self.eps / num_classes)
# loss calculation
loss = - self.targets_classes.mul(log_preds)
loss = loss.sum(dim=-1)
if self.reduction == 'mean':
loss = loss.mean()
return loss
| 3,225 | 31.918367 | 107 | py |
RandStainNA | RandStainNA-master/classification/timm/loss/cross_entropy.py | """ Cross Entropy w/ smoothing or soft targets
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class LabelSmoothingCrossEntropy(nn.Module):
""" NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.1):
super(LabelSmoothingCrossEntropy, self).__init__()
assert smoothing < 1.0
self.smoothing = smoothing
self.confidence = 1. - smoothing
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
class SoftTargetCrossEntropy(nn.Module):
def __init__(self):
super(SoftTargetCrossEntropy, self).__init__()
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
return loss.mean()
| 1,145 | 29.972973 | 77 | py |
RandStainNA | RandStainNA-master/classification/timm/loss/jsd.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .cross_entropy import LabelSmoothingCrossEntropy
class JsdCrossEntropy(nn.Module):
""" Jensen-Shannon Divergence + Cross-Entropy Loss
Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py
From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty -
https://arxiv.org/abs/1912.02781
Hacked together by / Copyright 2020 Ross Wightman
"""
def __init__(self, num_splits=3, alpha=12, smoothing=0.1):
super().__init__()
self.num_splits = num_splits
self.alpha = alpha
if smoothing is not None and smoothing > 0:
self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing)
else:
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def __call__(self, output, target):
split_size = output.shape[0] // self.num_splits
assert split_size * self.num_splits == output.shape[0]
logits_split = torch.split(output, split_size)
# Cross-entropy is only computed on clean images
loss = self.cross_entropy_loss(logits_split[0], target[:split_size])
probs = [F.softmax(logits, dim=1) for logits in logits_split]
# Clamp mixture distribution to avoid exploding KL divergence
logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log()
loss += self.alpha * sum([F.kl_div(
logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs)
return loss
| 1,595 | 38.9 | 96 | py |
RandStainNA | RandStainNA-master/classification/timm/loss/binary_cross_entropy.py | """ Binary Cross Entropy w/ a few extras
Hacked together by / Copyright 2021 Ross Wightman
"""
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
class BinaryCrossEntropy(nn.Module):
""" BCE with optional one-hot from dense targets, label smoothing, thresholding
NOTE for experiments comparing CE to BCE /w label smoothing, may remove
"""
def __init__(
self, smoothing=0.1, target_threshold: Optional[float] = None, weight: Optional[torch.Tensor] = None,
reduction: str = 'mean', pos_weight: Optional[torch.Tensor] = None):
super(BinaryCrossEntropy, self).__init__()
assert 0. <= smoothing < 1.0
self.smoothing = smoothing
self.target_threshold = target_threshold
self.reduction = reduction
self.register_buffer('weight', weight)
self.register_buffer('pos_weight', pos_weight)
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
assert x.shape[0] == target.shape[0]
if target.shape != x.shape:
# NOTE currently assume smoothing or other label softening is applied upstream if targets are already sparse
num_classes = x.shape[-1]
# FIXME should off/on be different for smoothing w/ BCE? Other impl out there differ
off_value = self.smoothing / num_classes
on_value = 1. - self.smoothing + off_value
target = target.long().view(-1, 1)
target = torch.full(
(target.size()[0], num_classes),
off_value,
device=x.device, dtype=x.dtype).scatter_(1, target, on_value)
if self.target_threshold is not None:
# Make target 0, or 1 if threshold set
target = target.gt(self.target_threshold).to(dtype=target.dtype)
return F.binary_cross_entropy_with_logits(
x, target,
self.weight,
pos_weight=self.pos_weight,
reduction=self.reduction)
| 2,030 | 41.3125 | 120 | py |
RandStainNA | RandStainNA-master/classification/timm/utils/jit.py | """ JIT scripting/tracing utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
import torch
def set_jit_legacy():
""" Set JIT executor to legacy w/ support for op fusion
This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes
in the JIT exectutor. These API are not supported so could change.
"""
#
assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!"
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
#torch._C._jit_set_texpr_fuser_enabled(True)
def set_jit_fuser(fuser):
if fuser == "te":
# default fuser should be == 'te'
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser == "old" or fuser == "legacy":
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
elif fuser == "nvfuser" or fuser == "nvf":
os.environ['PYTORCH_CUDA_FUSER_DISABLE_FALLBACK'] = '1'
os.environ['PYTORCH_CUDA_FUSER_DISABLE_FMA'] = '1'
os.environ['PYTORCH_CUDA_FUSER_JIT_OPT_LEVEL'] = '0'
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_can_fuse_on_cpu()
torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_nvfuser_enabled(True)
else:
assert False, f"Invalid jit fuser ({fuser})"
| 1,992 | 38.078431 | 94 | py |
RandStainNA | RandStainNA-master/classification/timm/utils/clip_grad.py | import torch
from timm.utils.agc import adaptive_clip_grad
def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0):
""" Dispatch to gradient clipping method
Args:
parameters (Iterable): model parameters to clip
value (float): clipping value/factor/norm, mode dependant
mode (str): clipping mode, one of 'norm', 'value', 'agc'
norm_type (float): p-norm, default 2.0
"""
if mode == 'norm':
torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type)
elif mode == 'value':
torch.nn.utils.clip_grad_value_(parameters, value)
elif mode == 'agc':
adaptive_clip_grad(parameters, value, norm_type=norm_type)
else:
assert False, f"Unknown clip mode ({mode})."
| 796 | 32.208333 | 93 | py |
RandStainNA | RandStainNA-master/classification/timm/utils/cuda.py | """ CUDA / AMP utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
try:
from apex import amp
has_apex = True
except ImportError:
amp = None
has_apex = False
from .clip_grad import dispatch_clip_grad
class ApexScaler:
state_dict_key = "amp"
def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False):
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(create_graph=create_graph)
if clip_grad is not None:
dispatch_clip_grad(amp.master_params(optimizer), clip_grad, mode=clip_mode)
optimizer.step()
def state_dict(self):
if 'state_dict' in amp.__dict__:
return amp.state_dict()
def load_state_dict(self, state_dict):
if 'load_state_dict' in amp.__dict__:
amp.load_state_dict(state_dict)
class NativeScaler:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False):
self._scaler.scale(loss).backward(create_graph=create_graph)
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
dispatch_clip_grad(parameters, clip_grad, mode=clip_mode)
self._scaler.step(optimizer)
self._scaler.update()
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
| 1,703 | 29.428571 | 111 | py |
RandStainNA | RandStainNA-master/classification/timm/utils/model.py | """ Model / state_dict utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import fnmatch
import torch
from torchvision.ops.misc import FrozenBatchNorm2d
from .model_ema import ModelEma
def unwrap_model(model):
if isinstance(model, ModelEma):
return unwrap_model(model.ema)
else:
return model.module if hasattr(model, 'module') else model
def get_state_dict(model, unwrap_fn=unwrap_model):
return unwrap_fn(model).state_dict()
def avg_sq_ch_mean(model, input, output):
""" calculate average channel square mean of output activations
"""
return torch.mean(output.mean(axis=[0, 2, 3]) ** 2).item()
def avg_ch_var(model, input, output):
""" calculate average channel variance of output activations
"""
return torch.mean(output.var(axis=[0, 2, 3])).item()
def avg_ch_var_residual(model, input, output):
""" calculate average channel variance of output activations
"""
return torch.mean(output.var(axis=[0, 2, 3])).item()
class ActivationStatsHook:
"""Iterates through each of `model`'s modules and matches modules using unix pattern
matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is
a match.
Arguments:
model (nn.Module): model from which we will extract the activation stats
hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string
matching with the name of model's modules.
hook_fns (List[Callable]): List of hook functions to be registered at every
module in `layer_names`.
Inspiration from https://docs.fast.ai/callback.hook.html.
Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example
on how to plot Signal Propogation Plots using `ActivationStatsHook`.
"""
def __init__(self, model, hook_fn_locs, hook_fns):
self.model = model
self.hook_fn_locs = hook_fn_locs
self.hook_fns = hook_fns
if len(hook_fn_locs) != len(hook_fns):
raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \
their lengths are different.")
self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns)
for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns):
self.register_hook(hook_fn_loc, hook_fn)
def _create_hook(self, hook_fn):
def append_activation_stats(module, input, output):
out = hook_fn(module, input, output)
self.stats[hook_fn.__name__].append(out)
return append_activation_stats
def register_hook(self, hook_fn_loc, hook_fn):
for name, module in self.model.named_modules():
if not fnmatch.fnmatch(name, hook_fn_loc):
continue
module.register_forward_hook(self._create_hook(hook_fn))
def extract_spp_stats(
model,
hook_fn_locs,
hook_fns,
input_shape=[8, 3, 224, 224]):
"""Extract average square channel mean and variance of activations during
forward pass to plot Signal Propogation Plots (SPP).
Paper: https://arxiv.org/abs/2101.08692
Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950
"""
x = torch.normal(0., 1., input_shape)
hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns)
_ = model(x)
return hook.stats
def freeze_batch_norm_2d(module):
"""
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
returned. Otherwise, the module is walked recursively and submodules are converted in place.
Args:
module (torch.nn.Module): Any PyTorch module.
Returns:
torch.nn.Module: Resulting module
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
"""
res = module
if isinstance(module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)):
res = FrozenBatchNorm2d(module.num_features)
res.num_features = module.num_features
res.affine = module.affine
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = freeze_batch_norm_2d(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def unfreeze_batch_norm_2d(module):
"""
Converts all `FrozenBatchNorm2d` layers of provided module into `BatchNorm2d`. If `module` is itself and instance
of `FrozenBatchNorm2d`, it is converted into `BatchNorm2d` and returned. Otherwise, the module is walked
recursively and submodules are converted in place.
Args:
module (torch.nn.Module): Any PyTorch module.
Returns:
torch.nn.Module: Resulting module
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
"""
res = module
if isinstance(module, FrozenBatchNorm2d):
res = torch.nn.BatchNorm2d(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = unfreeze_batch_norm_2d(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def _freeze_unfreeze(root_module, submodules=[], include_bn_running_stats=True, mode='freeze'):
"""
Freeze or unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is
done in place.
Args:
root_module (nn.Module, optional): Root module relative to which the `submodules` are referenced.
submodules (list[str]): List of modules for which the parameters will be (un)frozen. They are to be provided as
named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list
means that the whole root module will be (un)frozen. Defaults to []
include_bn_running_stats (bool): Whether to also (un)freeze the running statistics of batch norm 2d layers.
Defaults to `True`.
mode (bool): Whether to freeze ("freeze") or unfreeze ("unfreeze"). Defaults to `"freeze"`.
"""
assert mode in ["freeze", "unfreeze"], '`mode` must be one of "freeze" or "unfreeze"'
if isinstance(root_module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)):
# Raise assertion here because we can't convert it in place
raise AssertionError(
"You have provided a batch norm layer as the `root module`. Please use "
"`timm.utils.model.freeze_batch_norm_2d` or `timm.utils.model.unfreeze_batch_norm_2d` instead.")
if isinstance(submodules, str):
submodules = [submodules]
named_modules = submodules
submodules = [root_module.get_submodule(m) for m in submodules]
if not len(submodules):
named_modules, submodules = list(zip(*root_module.named_children()))
for n, m in zip(named_modules, submodules):
# (Un)freeze parameters
for p in m.parameters():
p.requires_grad = False if mode == 'freeze' else True
if include_bn_running_stats:
# Helper to add submodule specified as a named_module
def _add_submodule(module, name, submodule):
split = name.rsplit('.', 1)
if len(split) > 1:
module.get_submodule(split[0]).add_module(split[1], submodule)
else:
module.add_module(name, submodule)
# Freeze batch norm
if mode == 'freeze':
res = freeze_batch_norm_2d(m)
# It's possible that `m` is a type of BatchNorm in itself, in which case `unfreeze_batch_norm_2d` won't
# convert it in place, but will return the converted result. In this case `res` holds the converted
# result and we may try to re-assign the named module
if isinstance(m, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)):
_add_submodule(root_module, n, res)
# Unfreeze batch norm
else:
res = unfreeze_batch_norm_2d(m)
# Ditto. See note above in mode == 'freeze' branch
if isinstance(m, FrozenBatchNorm2d):
_add_submodule(root_module, n, res)
def freeze(root_module, submodules=[], include_bn_running_stats=True):
"""
Freeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place.
Args:
root_module (nn.Module): Root module relative to which `submodules` are referenced.
submodules (list[str]): List of modules for which the parameters will be frozen. They are to be provided as
named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list
means that the whole root module will be frozen. Defaults to `[]`.
include_bn_running_stats (bool): Whether to also freeze the running statistics of `BatchNorm2d` and
`SyncBatchNorm` layers. These will be converted to `FrozenBatchNorm2d` in place. Hint: During fine tuning,
it's good practice to freeze batch norm stats. And note that these are different to the affine parameters
which are just normal PyTorch parameters. Defaults to `True`.
Hint: If you want to freeze batch norm ONLY, use `timm.utils.model.freeze_batch_norm_2d`.
Examples::
>>> model = timm.create_model('resnet18')
>>> # Freeze up to and including layer2
>>> submodules = [n for n, _ in model.named_children()]
>>> print(submodules)
['conv1', 'bn1', 'act1', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4', 'global_pool', 'fc']
>>> freeze(model, submodules[:submodules.index('layer2') + 1])
>>> # Check for yourself that it works as expected
>>> print(model.layer2[0].conv1.weight.requires_grad)
False
>>> print(model.layer3[0].conv1.weight.requires_grad)
True
>>> # Unfreeze
>>> unfreeze(model)
"""
_freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="freeze")
def unfreeze(root_module, submodules=[], include_bn_running_stats=True):
"""
Unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place.
Args:
root_module (nn.Module): Root module relative to which `submodules` are referenced.
submodules (list[str]): List of submodules for which the parameters will be (un)frozen. They are to be provided
as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty
list means that the whole root module will be unfrozen. Defaults to `[]`.
include_bn_running_stats (bool): Whether to also unfreeze the running statistics of `FrozenBatchNorm2d` layers.
These will be converted to `BatchNorm2d` in place. Defaults to `True`.
See example in docstring for `freeze`.
"""
_freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="unfreeze")
| 12,085 | 43.109489 | 131 | py |
RandStainNA | RandStainNA-master/classification/timm/utils/random.py | import random
import numpy as np
import torch
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
| 178 | 16.9 | 34 | py |
RandStainNA | RandStainNA-master/classification/timm/utils/distributed.py | """ Distributed training/validation utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import distributed as dist
from .model import unwrap_model
def reduce_tensor(tensor, n):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= n
return rt
def distribute_bn(model, world_size, reduce=False):
# ensure every node has the same running bn stats
for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True):
if ('running_mean' in bn_name) or ('running_var' in bn_name):
if reduce:
# average bn stats across whole group
torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM)
bn_buf /= float(world_size)
else:
# broadcast bn stats from rank 0 to whole group
torch.distributed.broadcast(bn_buf, 0)
| 896 | 29.931034 | 75 | py |
RandStainNA | RandStainNA-master/classification/timm/utils/checkpoint_saver.py | """ Checkpoint Saver
Track top-n training checkpoints and maintain recovery checkpoints on specified intervals.
Hacked together by / Copyright 2020 Ross Wightman
"""
import glob
import operator
import os
import logging
import json #12.24加入
import torch
from .model import unwrap_model, get_state_dict
_logger = logging.getLogger(__name__)
class CheckpointSaver:
def __init__(
self,
model,
optimizer,
args=None,
model_ema=None,
amp_scaler=None,
checkpoint_prefix='checkpoint',
recovery_prefix='recovery',
checkpoint_dir='',
recovery_dir='',
decreasing=False,
max_history=10,
unwrap_fn=unwrap_model):
# objects to save state_dicts of
self.model = model
self.optimizer = optimizer
self.args = args
self.model_ema = model_ema
self.amp_scaler = amp_scaler
# state
self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness
self.best_epoch = None
self.best_metric = None
self.curr_recovery_file = ''
self.last_recovery_file = ''
# 12.20增加,记录最好的best epoch对应的test和单独最好的test
self.best_metric_val_test = None
self.best_epoch_test = None
self.best_metric_test = None
#2.5增加
self.best_f1_test = None #记录best时的f1
self.best_auc_test = None #记录best时的auc
self.best_f1 = None #记录val时的f1
self.best_auc = None #记录val时的auc
# config
self.checkpoint_dir = checkpoint_dir
self.recovery_dir = recovery_dir
self.save_prefix = checkpoint_prefix
self.recovery_prefix = recovery_prefix
self.extension = '.pth.tar'
self.decreasing = decreasing # a lower metric is better if True
self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs
self.max_history = max_history
self.unwrap_fn = unwrap_fn
assert self.max_history >= 1
# 12.24 增加一个num_epochs参数,如果相同了,就保存最好10轮的epoch的验证集和测试集结果,并取平均\
# 2.5 增加了f1和auc
def save_checkpoint(self, num_epochs, output_dir, epoch, metric=None, metric_test=None, metric_f1=None, metric_auc=None):
assert epoch >= 0
tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension)
last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension)
# self._save(tmp_save_path, epoch, metric) # 1.20不保存任何参数信息
# if os.path.exists(last_save_path):
# os.unlink(last_save_path) # required for Windows support.
# os.rename(tmp_save_path, last_save_path)
worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None
if (len(self.checkpoint_files) < self.max_history
or metric is None or self.cmp(metric_test, worst_file[2])): #metric, worst_file[1], 12.20 后面记录test的max #1.22metric->metric_test
if len(self.checkpoint_files) >= self.max_history:
self._cleanup_checkpoints(1)
filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension
save_path = os.path.join(self.checkpoint_dir, filename)
# os.link(last_save_path, save_path) #12.20修改,不保存pt了
# 12.20,增加最优10个的test的metric
# self.checkpoint_files.append((save_path, metric))
self.checkpoint_files.append((epoch, metric, metric_test, metric_f1, metric_auc)) #12.24直接改为epoch
self.checkpoint_files = sorted(
self.checkpoint_files, key=lambda x: x[2], #1,12.20原来按照val高低来排,现在按照test的
reverse=not self.decreasing) # sort in descending order if a lower metric is not better
checkpoints_str = "Current checkpoints:\n"
for c in self.checkpoint_files:
checkpoints_str += ' {}\n'.format(c)
_logger.info(checkpoints_str)
# 12.24增加最好n轮的平均
# 每次更新都替换掉之前的json文件,这样随时都可以看得到
metric_best10_dict = {}
best10_mean = 0
best10_f1 = 0
best10_auc = 0
for i in range(len(self.checkpoint_files)):
metric_best10_dict['epoch:{}'.format(self.checkpoint_files[i][0])] = {
'acc: ': self.checkpoint_files[i][2],
'f1: ': self.checkpoint_files[i][3],
'auc: ': self.checkpoint_files[i][4]
} #0是epoch,2是best, 3是f1,4是auc
best10_mean += self.checkpoint_files[i][2]
best10_f1 += self.checkpoint_files[i][3]
best10_auc += self.checkpoint_files[i][4]
metric_best10_dict['best10_mean'] = best10_mean / len(self.checkpoint_files)
metric_best10_dict['best10_f1_mean'] = best10_f1 / len(self.checkpoint_files)
metric_best10_dict['best10_auc_mean'] = best10_auc / len(self.checkpoint_files)
submit = os.path.join(output_dir, 'best_10.json')
# print(submit)
with open(submit, 'w') as f: #以写入的方式打开文件,存在则覆盖,不存在则创建
json.dump(metric_best10_dict, f, indent=2)
if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)):
self.best_epoch = epoch
self.best_metric = metric
self.best_metric_val_test = metric_test #验证集最好时的测试集准确率
self.best_f1 = metric_f1
self.best_auc = metric_auc
if metric_test is not None and (self.best_metric_test is None or self.cmp(metric_test, self.best_metric_test)):
self.best_epoch_test = epoch
self.best_metric_test = metric_test #测试集最好准确率
self.best_f1_test = metric_f1
self.best_auc_test = metric_auc
#12.20 不保存模型
# if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)):
# self.best_epoch = epoch
# self.best_metric = metric
# best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension)
# if os.path.exists(best_save_path):
# os.unlink(best_save_path)
# os.link(last_save_path, best_save_path)
# return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch)
#12.20返回多个值
return (None, None, None, None, None, None, None, None, None) if self.best_metric is None else (self.best_metric, self.best_metric_val_test, self.best_epoch, self.best_metric_test, self.best_epoch_test, self.best_f1, self.best_auc, self.best_f1_test, self.best_auc_test)
def _save(self, save_path, epoch, metric=None):
save_state = {
'epoch': epoch,
'arch': type(self.model).__name__.lower(),
'state_dict': get_state_dict(self.model, self.unwrap_fn),
'optimizer': self.optimizer.state_dict(),
'version': 2, # version < 2 increments epoch before save
}
if self.args is not None:
save_state['arch'] = self.args.model
save_state['args'] = self.args
if self.amp_scaler is not None:
save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict()
if self.model_ema is not None:
save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn)
if metric is not None:
save_state['metric'] = metric
torch.save(save_state, save_path) #1.20 tmp.pth也不需要保存
def _cleanup_checkpoints(self, trim=0):
trim = min(len(self.checkpoint_files), trim)
delete_index = self.max_history - trim
if delete_index < 0 or len(self.checkpoint_files) <= delete_index:
return
to_delete = self.checkpoint_files[delete_index:]
for d in to_delete:
try:
_logger.debug("Cleaning checkpoint: {}".format(d))
os.remove(d[0])
except Exception as e:
_logger.error("Exception '{}' while deleting checkpoint".format(e))
self.checkpoint_files = self.checkpoint_files[:delete_index]
def save_recovery(self, epoch, batch_idx=0):
assert epoch >= 0
filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension
save_path = os.path.join(self.recovery_dir, filename)
self._save(save_path, epoch)
if os.path.exists(self.last_recovery_file):
try:
_logger.debug("Cleaning recovery: {}".format(self.last_recovery_file))
os.remove(self.last_recovery_file)
except Exception as e:
_logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file))
self.last_recovery_file = self.curr_recovery_file
self.curr_recovery_file = save_path
def find_recovery(self):
recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix)
files = glob.glob(recovery_path + '*' + self.extension)
files = sorted(files)
return files[0] if len(files) else ''
| 9,243 | 44.313725 | 278 | py |
RandStainNA | RandStainNA-master/classification/timm/utils/agc.py | """ Adaptive Gradient Clipping
An impl of AGC, as per (https://arxiv.org/abs/2102.06171):
@article{brock2021high,
author={Andrew Brock and Soham De and Samuel L. Smith and Karen Simonyan},
title={High-Performance Large-Scale Image Recognition Without Normalization},
journal={arXiv preprint arXiv:},
year={2021}
}
Code references:
* Official JAX impl (paper authors): https://github.com/deepmind/deepmind-research/tree/master/nfnets
* Phil Wang's PyTorch gist: https://gist.github.com/lucidrains/0d6560077edac419ab5d3aa29e674d5c
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
def unitwise_norm(x, norm_type=2.0):
if x.ndim <= 1:
return x.norm(norm_type)
else:
# works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor
# might need special cases for other weights (possibly MHA) where this may not be true
return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True)
def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
for p in parameters:
if p.grad is None:
continue
p_data = p.detach()
g_data = p.grad.detach()
max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor)
grad_norm = unitwise_norm(g_data, norm_type=norm_type)
clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6))
new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad)
p.grad.detach().copy_(new_grads)
| 1,624 | 36.790698 | 103 | py |
RandStainNA | RandStainNA-master/classification/timm/utils/model_ema.py | """ Exponential Moving Average (EMA) of model updates
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
_logger = logging.getLogger(__name__)
class ModelEma:
""" Model Exponential Moving Average (DEPRECATED)
Keep a moving average of everything in the model state_dict (parameters and buffers).
This version is deprecated, it does not work with scripted models. Will be removed eventually.
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device='', resume=''):
# make a copy of the model for accumulating moving average of weights
self.ema = deepcopy(model)
self.ema.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if device:
self.ema.to(device=device)
self.ema_has_module = hasattr(self.ema, 'module')
if resume:
self._load_checkpoint(resume)
for p in self.ema.parameters():
p.requires_grad_(False)
def _load_checkpoint(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
assert isinstance(checkpoint, dict)
if 'state_dict_ema' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict_ema'].items():
# ema model may have been wrapped by DataParallel, and need module prefix
if self.ema_has_module:
name = 'module.' + k if not k.startswith('module') else k
else:
name = k
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
_logger.info("Loaded state_dict_ema")
else:
_logger.warning("Failed to find state_dict_ema, starting from loaded model weights")
def update(self, model):
# correct a mismatch in state dict keys
needs_module = hasattr(model, 'module') and not self.ema_has_module
with torch.no_grad():
msd = model.state_dict()
for k, ema_v in self.ema.state_dict().items():
if needs_module:
k = 'module.' + k
model_v = msd[k].detach()
if self.device:
model_v = model_v.to(device=self.device)
ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v)
class ModelEmaV2(nn.Module):
""" Model Exponential Moving Average V2
Keep a moving average of everything in the model state_dict (parameters and buffers).
V2 of this module is simpler, it does not match params/buffers based on name but simply
iterates in order. It works with torchscript (JIT of full model).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device=None):
super(ModelEmaV2, self).__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if self.device is not None:
self.module.to(device=device)
def _update(self, model, update_fn):
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(update_fn(ema_v, model_v))
def update(self, model):
self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m)
def set(self, model):
self._update(model, update_fn=lambda e, m: m)
| 5,670 | 43.653543 | 102 | py |
RandStainNA | RandStainNA-master/classification/timm/data/dataset_factory.py | """ Dataset Factory
Hacked together by / Copyright 2021, Ross Wightman
"""
import os
from torchvision.datasets import CIFAR100, CIFAR10, MNIST, QMNIST, KMNIST, FashionMNIST, ImageNet, ImageFolder
try:
from torchvision.datasets import Places365
has_places365 = True
except ImportError:
has_places365 = False
try:
from torchvision.datasets import INaturalist
has_inaturalist = True
except ImportError:
has_inaturalist = False
from .dataset import IterableImageDataset, ImageDataset
_TORCH_BASIC_DS = dict(
cifar10=CIFAR10,
cifar100=CIFAR100,
mnist=MNIST,
qmist=QMNIST,
kmnist=KMNIST,
fashion_mnist=FashionMNIST,
)
_TRAIN_SYNONYM = {'train', 'training'}
_EVAL_SYNONYM = {'val', 'valid', 'validation', 'eval', 'evaluation'}
_TEST_SYNONYM = {'test'} #12.20修改
def _search_split(root, split):
# look for sub-folder with name of split in root and use that if it exists
split_name = split.split('[')[0]
try_root = os.path.join(root, split_name)
if os.path.exists(try_root):
return try_root
def _try(syn):
for s in syn:
try_root = os.path.join(root, s)
if os.path.exists(try_root):
return try_root
return root
if split_name in _TRAIN_SYNONYM:
root = _try(_TRAIN_SYNONYM)
elif split_name in _EVAL_SYNONYM:
root = _try(_EVAL_SYNONYM)
# 12.20增加测试集分支
elif split_name in _TEST_SYNONYM:
root = _try(_TEST_SYNONYM)
return root
def create_dataset(
name,
root,
split='validation',
search_split=True,
class_map=None,
load_bytes=False,
is_training=False,
download=False,
batch_size=None,
repeats=0,
**kwargs
):
""" Dataset factory method
In parenthesis after each arg are the type of dataset supported for each arg, one of:
* folder - default, timm folder (or tar) based ImageDataset
* torch - torchvision based datasets
* TFDS - Tensorflow-datasets wrapper in IterabeDataset interface via IterableImageDataset
* all - any of the above
Args:
name: dataset name, empty is okay for folder based datasets
root: root folder of dataset (all)
split: dataset split (all)
search_split: search for split specific child fold from root so one can specify
`imagenet/` instead of `/imagenet/val`, etc on cmd line / config. (folder, torch/folder)
class_map: specify class -> index mapping via text file or dict (folder)
load_bytes: load data, return images as undecoded bytes (folder)
download: download dataset if not present and supported (TFDS, torch)
is_training: create dataset in train mode, this is different from the split.
For Iterable / TDFS it enables shuffle, ignored for other datasets. (TFDS)
batch_size: batch size hint for (TFDS)
repeats: dataset repeats per iteration i.e. epoch (TFDS)
**kwargs: other args to pass to dataset
Returns:
Dataset object
"""
name = name.lower()
if name.startswith('torch/'):
name = name.split('/', 2)[-1]
torch_kwargs = dict(root=root, download=download, **kwargs)
if name in _TORCH_BASIC_DS:
ds_class = _TORCH_BASIC_DS[name]
use_train = split in _TRAIN_SYNONYM
ds = ds_class(train=use_train, **torch_kwargs)
elif name == 'inaturalist' or name == 'inat':
assert has_inaturalist, 'Please update to PyTorch 1.10, torchvision 0.11+ for Inaturalist'
target_type = 'full'
split_split = split.split('/')
if len(split_split) > 1:
target_type = split_split[0].split('_')
if len(target_type) == 1:
target_type = target_type[0]
split = split_split[-1]
if split in _TRAIN_SYNONYM:
split = '2021_train'
elif split in _EVAL_SYNONYM:
split = '2021_valid'
ds = INaturalist(version=split, target_type=target_type, **torch_kwargs)
elif name == 'places365':
assert has_places365, 'Please update to a newer PyTorch and torchvision for Places365 dataset.'
if split in _TRAIN_SYNONYM:
split = 'train-standard'
elif split in _EVAL_SYNONYM:
split = 'val'
ds = Places365(split=split, **torch_kwargs)
elif name == 'imagenet':
if split in _EVAL_SYNONYM:
split = 'val'
ds = ImageNet(split=split, **torch_kwargs)
elif name == 'image_folder' or name == 'folder':
# in case torchvision ImageFolder is preferred over timm ImageDataset for some reason
if search_split and os.path.isdir(root):
# look for split specific sub-folder in root
root = _search_split(root, split)
ds = ImageFolder(root, **kwargs)
print('ImageFoder dataset') #1.20添加
else:
assert False, f"Unknown torchvision dataset {name}"
elif name.startswith('tfds/'):
ds = IterableImageDataset(
root, parser=name, split=split, is_training=is_training,
download=download, batch_size=batch_size, repeats=repeats, **kwargs)
else:
# FIXME support more advance split cfg for ImageFolder/Tar datasets in the future
if search_split and os.path.isdir(root):
# look for split specific sub-folder in root
root = _search_split(root, split)
ds = ImageDataset(root, parser=name, class_map=class_map, load_bytes=load_bytes, **kwargs)
return ds
| 5,706 | 37.560811 | 110 | py |
RandStainNA | RandStainNA-master/classification/timm/data/dataset.py | """ Quick n Simple Image Folder, Tarfile based DataSet
Hacked together by / Copyright 2019, Ross Wightman
"""
import torch.utils.data as data
import os
import torch
import logging
from PIL import Image
from .parsers import create_parser
_logger = logging.getLogger(__name__)
_ERROR_RETRY = 50
class ImageDataset(data.Dataset):
def __init__(
self,
root,
parser=None,
class_map=None,
load_bytes=False,
transform=None,
target_transform=None,
):
if parser is None or isinstance(parser, str):
parser = create_parser(parser or '', root=root, class_map=class_map)
self.parser = parser
self.load_bytes = load_bytes
self.transform = transform
self.target_transform = target_transform
self._consecutive_errors = 0
def __getitem__(self, index):
img, target = self.parser[index]
try:
img = img.read() if self.load_bytes else Image.open(img).convert('RGB')
except Exception as e:
_logger.warning(f'Skipped sample (index {index}, file {self.parser.filename(index)}). {str(e)}')
self._consecutive_errors += 1
if self._consecutive_errors < _ERROR_RETRY:
return self.__getitem__((index + 1) % len(self.parser))
else:
raise e
self._consecutive_errors = 0
if self.transform is not None:
img = self.transform(img)
if target is None:
target = -1
elif self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.parser)
def filename(self, index, basename=False, absolute=False):
return self.parser.filename(index, basename, absolute)
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class IterableImageDataset(data.IterableDataset):
def __init__(
self,
root,
parser=None,
split='train',
is_training=False,
batch_size=None,
repeats=0,
download=False,
transform=None,
target_transform=None,
):
assert parser is not None
if isinstance(parser, str):
self.parser = create_parser(
parser, root=root, split=split, is_training=is_training,
batch_size=batch_size, repeats=repeats, download=download)
else:
self.parser = parser
self.transform = transform
self.target_transform = target_transform
self._consecutive_errors = 0
def __iter__(self):
for img, target in self.parser:
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
yield img, target
def __len__(self):
if hasattr(self.parser, '__len__'):
return len(self.parser)
else:
return 0
def filename(self, index, basename=False, absolute=False):
assert False, 'Filename lookup by index not supported, use filenames().'
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix or other clean/augmentation mixes"""
def __init__(self, dataset, num_splits=2):
self.augmentation = None
self.normalize = None
self.dataset = dataset
if self.dataset.transform is not None:
self._set_transforms(self.dataset.transform)
self.num_splits = num_splits
def _set_transforms(self, x):
assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms'
self.dataset.transform = x[0]
self.augmentation = x[1]
self.normalize = x[2]
@property
def transform(self):
return self.dataset.transform
@transform.setter
def transform(self, x):
self._set_transforms(x)
def _normalize(self, x):
return x if self.normalize is None else self.normalize(x)
def __getitem__(self, i):
x, y = self.dataset[i] # all splits share the same dataset base transform
x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split)
# run the full augmentation on the remaining splits
for _ in range(self.num_splits - 1):
x_list.append(self._normalize(self.augmentation(x)))
return tuple(x_list), y
def __len__(self):
return len(self.dataset)
| 4,805 | 30.411765 | 108 | py |
RandStainNA | RandStainNA-master/classification/timm/data/mixup.py | """ Mixup and Cutmix
Papers:
mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899)
Code Reference:
CutMix: https://github.com/clovaai/CutMix-PyTorch
Hacked together by / Copyright 2019, Ross Wightman
"""
import numpy as np
import torch
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)
def mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda'):
off_value = smoothing / num_classes
on_value = 1. - smoothing + off_value
y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device)
y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device)
return y1 * lam + y2 * (1. - lam)
def rand_bbox(img_shape, lam, margin=0., count=None):
""" Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
def rand_bbox_minmax(img_shape, minmax, count=None):
""" Min-Max CutMix bounding-box
Inspired by Darknet cutmix impl, generates a random rectangular bbox
based on min/max percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
minmax (tuple or list): Min and max bbox ratios (as percent of image size)
count (int): Number of bbox to generate
"""
assert len(minmax) == 2
img_h, img_w = img_shape[-2:]
cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count)
cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count)
yl = np.random.randint(0, img_h - cut_h, size=count)
xl = np.random.randint(0, img_w - cut_w, size=count)
yu = yl + cut_h
xu = xl + cut_w
return yl, yu, xl, xu
def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None):
""" Generate bbox and apply lambda correction.
"""
if ratio_minmax is not None:
yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)
else:
yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)
if correct_lam or ratio_minmax is not None:
bbox_area = (yu - yl) * (xu - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return (yl, yu, xl, xu), lam
class Mixup:
""" Mixup/Cutmix that applies different params to each element or whole batch
Args:
mixup_alpha (float): mixup alpha value, mixup is active if > 0.
cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.
cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.
prob (float): probability of applying mixup or cutmix per batch or element
switch_prob (float): probability of switching to cutmix instead of mixup when both are active
mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)
correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders
label_smoothing (float): apply label smoothing to the mixed target tensor
num_classes (int): number of classes for target
"""
def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,
mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000):
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.cutmix_minmax = cutmix_minmax
if self.cutmix_minmax is not None:
assert len(self.cutmix_minmax) == 2
# force cutmix alpha == 1.0 when minmax active to keep logic simple & safe
self.cutmix_alpha = 1.0
self.mix_prob = prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
self.mode = mode
self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix
self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)
def _params_per_elem(self, batch_size):
lam = np.ones(batch_size, dtype=np.float32)
use_cutmix = np.zeros(batch_size, dtype=np.bool)
if self.mixup_enabled:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand(batch_size) < self.switch_prob
lam_mix = np.where(
use_cutmix,
np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),
np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)
elif self.cutmix_alpha > 0.:
use_cutmix = np.ones(batch_size, dtype=np.bool)
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)
return lam, use_cutmix
def _params_per_batch(self):
lam = 1.
use_cutmix = False
if self.mixup_enabled and np.random.rand() < self.mix_prob:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand() < self.switch_prob
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \
np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.cutmix_alpha > 0.:
use_cutmix = True
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = float(lam_mix)
return lam, use_cutmix
def _mix_elem(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_pair(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
x[j] = x[j] * lam + x_orig[i] * (1 - lam)
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_batch(self, x):
lam, use_cutmix = self._params_per_batch()
if lam == 1.:
return 1.
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]
else:
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
return lam
def __call__(self, x, target):
assert len(x) % 2 == 0, 'Batch size should be even when using this'
if self.mode == 'elem':
lam = self._mix_elem(x)
elif self.mode == 'pair':
lam = self._mix_pair(x)
else:
lam = self._mix_batch(x)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device)
return x, target
class FastCollateMixup(Mixup):
""" Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch
A Mixup impl that's performed while collating the batches.
"""
def _mix_elem_collate(self, output, batch, half=False):
batch_size = len(batch)
num_elem = batch_size // 2 if half else batch_size
assert len(output) == num_elem
lam_batch, use_cutmix = self._params_per_elem(num_elem)
for i in range(num_elem):
j = batch_size - i - 1
lam = lam_batch[i]
mixed = batch[i][0]
if lam != 1.:
if use_cutmix[i]:
if not half:
mixed = mixed.copy()
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
if half:
lam_batch = np.concatenate((lam_batch, np.ones(num_elem)))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_pair_collate(self, output, batch):
batch_size = len(batch)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
mixed_i = batch[i][0]
mixed_j = batch[j][0]
assert 0 <= lam <= 1.0
if lam < 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
patch_i = mixed_i[:, yl:yh, xl:xh].copy()
mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh]
mixed_j[:, yl:yh, xl:xh] = patch_i
lam_batch[i] = lam
else:
mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam)
mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam)
mixed_i = mixed_temp
np.rint(mixed_j, out=mixed_j)
np.rint(mixed_i, out=mixed_i)
output[i] += torch.from_numpy(mixed_i.astype(np.uint8))
output[j] += torch.from_numpy(mixed_j.astype(np.uint8))
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_batch_collate(self, output, batch):
batch_size = len(batch)
lam, use_cutmix = self._params_per_batch()
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
for i in range(batch_size):
j = batch_size - i - 1
mixed = batch[i][0]
if lam != 1.:
if use_cutmix:
mixed = mixed.copy() # don't want to modify the original while iterating
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
return lam
def __call__(self, batch, _=None):
batch_size = len(batch)
assert batch_size % 2 == 0, 'Batch size should be even when using this'
half = 'half' in self.mode
if half:
batch_size //= 2
output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
if self.mode == 'elem' or self.mode == 'half':
lam = self._mix_elem_collate(output, batch, half=half)
elif self.mode == 'pair':
lam = self._mix_pair_collate(output, batch)
else:
lam = self._mix_batch_collate(output, batch)
target = torch.tensor([b[1] for b in batch], dtype=torch.int64)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing, device='cpu')
target = target[:batch_size]
return output, target
| 14,722 | 45.444795 | 120 | py |
RandStainNA | RandStainNA-master/classification/timm/data/transforms_factory.py | """ Transforms Factory
Factory methods for building image transforms for use with TIMM (PyTorch Image Models)
Hacked together by / Copyright 2019, Ross Wightman
"""
import math
import torch
from torchvision import transforms
from torchvision.transforms import RandomErasing as RandomErasing_torch #1.23添加
from torchvision.transforms import RandomAffine
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT
from timm.data.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform
from timm.data.transforms import str_to_interp_mode, str_to_pil_interp, RandomResizedCropAndInterpolation, ToNumpy
from timm.data.random_erasing import RandomErasing
from timm.data.transforms import color_norm_jitter, hed_norm_jitter, HEDJitter, LABJitter, LABJitter_hsv, HSVJitter, RandomGaussBlur, RandomGaussianNoise#, Normalizer_transform #12.30修改 #1.21添加LABJitter, #2.6添加LABJitter_hsv #2.13加入HSVJitter,对cj封装
def transforms_noaug_train(
img_size=224,
interpolation='bilinear',
use_prefetcher=False,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
):
if interpolation == 'random':
# random interpolation not supported with no-aug
interpolation = 'bilinear'
tfl = [
transforms.Resize(img_size, interpolation=str_to_interp_mode(interpolation)),
transforms.CenterCrop(img_size)
]
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
tfl += [ToNumpy()]
else:
tfl += [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
]
return transforms.Compose(tfl)
def transforms_imagenet_train(
img_size=224, #注意,这边其实没有用到,因为并没有resize,输入图片是多大这边就是多大,torchvision的model可以兼容不同分辨率 1.30
scale=None,
ratio=None,
hflip=0.5,
vflip=0.,
morphology=None, #12.26添加,是否增加形态学处理
color_jitter=0.4,
norm_jitter=None, #12.20加入,norm&jitter数据增强方法
hed_jitter=None, #12.26添加,一个theta参数
lab_jitter=None, #1.21添加,一个theta参数
random_jitter=None, #1.30添加,jitter上是否需要randomaug
cj_p=1.0, #2.13添加,jitter的概率
auto_augment=None,
interpolation='random',
use_prefetcher=False,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
re_prob=0.,
re_mode='const',
re_count=1,
re_num_splits=0,
separate=False,
logger=None
):
"""
If separate==True, the transforms are returned as a tuple of 3 separate transforms
for use in a mixing dataset that passes
* all data through the first (primary) transform, called the 'clean' data
* a portion of the data through the secondary transform
* normalizes and converts the branches above with the third, final transform
"""
scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range
ratio = tuple(ratio or (3./4., 4./3.)) # default imagenet ratio range
# 12.20,修改为pytorch自带的transforms
# primary_tfl = [transforms.RandomResizedCrop(img_size)]
primary_tfl = [] #12.24修改,发现可能是RandomResizedCrop会损害baseline性能(先剔除,后面再说)
# [RandomResizedCropAndInterpolation(img_size, scale=scale, ratio=ratio, interpolation=interpolation)]
if hflip > 0.:
primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)]
if vflip > 0.:
primary_tfl += [transforms.RandomVerticalFlip(p=vflip)]
special_tfl = [] #1.10修改,增加一个special_tfl,因为这个部分可能需要混合
# 12.20修改,增加norm&jitter数据增强方法
# 如果norm_jitter有值
# 12.20修改,增加HED方法
if norm_jitter is not None:
# LAB / HED
# 根据config文件的空间来决定是哪种
# 12.26好像有问题,之前不知道顺序,很可怕
if norm_jitter['methods'] == 'Reinhard':
# 1.10修改,nj方法的lab和hsv进行统一
# 1.30修改,lab,hsv和hed方法统一,根据color_space确定
if norm_jitter['color_space'] == 'LAB' or norm_jitter['color_space'] == 'HSV' or norm_jitter['color_space'] == 'HED':
color_space = norm_jitter['color_space'] #获取颜色空间名称
#1.30修改,avg和std已经自带分布,在transform里面修改即可
mean_dataset = [norm_jitter[color_space[0]]['avg'],norm_jitter[color_space[1]]['avg'],norm_jitter[color_space[2]]['avg']]
std_dataset = [norm_jitter[color_space[0]]['std'],norm_jitter[color_space[1]]['std'],norm_jitter[color_space[2]]['std']]
std_hyper = norm_jitter['std_hyper']
distribution = norm_jitter['distribution'] #1.30添加,手工指定分布
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
special_tfl += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space=color_space, distribution=distribution)]
# elif norm_jitter['color_space'] == 'HED':
# mean_dataset = [norm_jitter['H']['avg'],norm_jitter['E']['avg'],norm_jitter['D']['avg']]
# std_dataset = [norm_jitter['H']['std'],norm_jitter['E']['std'],norm_jitter['D']['std']]
# std_hyper = norm_jitter['std_hyper']
# special_tfl += [hed_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=1)]
elif norm_jitter['color_space'] == 'Random': #1.10增加,混合多种方法,等概率随机进行选取
distribution = norm_jitter['distribution'] #1.30添加,手工指定分布
if 'L' in list(norm_jitter.keys()): #2.8修改,测试HED,lab,hsv三者的排列组合
mean_dataset = [norm_jitter['L']['avg'],norm_jitter['A']['avg'],norm_jitter['B']['avg']]
std_dataset = [norm_jitter['L']['std'],norm_jitter['A']['std'],norm_jitter['B']['std']]
std_hyper = norm_jitter['std_hyper']
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
special_tfl += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space='LAB',distribution=distribution)]
if 'E' in list(norm_jitter.keys()): #2.8修改,测试HED,lab,hsv三者的排列组合
mean_dataset = [norm_jitter['H']['avg'],norm_jitter['E']['avg'],norm_jitter['D']['avg']]
std_dataset = [norm_jitter['H']['std'],norm_jitter['E']['std'],norm_jitter['D']['std']]
std_hyper = norm_jitter['std_hyper']
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
# special_tfl += [hed_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=1)]
# 1.30修改,nj方法统一lab和hed,所以统一用一个即可
special_tfl += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space='HED',distribution=distribution)]
# 2.6修改,增加hsv来random
if 'h' in list(norm_jitter.keys()): #2.8修改,测试HED,lab,hsv三者的排列组合
mean_dataset = [norm_jitter['h']['avg'],norm_jitter['S']['avg'],norm_jitter['V']['avg']]
std_dataset = [norm_jitter['h']['std'],norm_jitter['S']['std'],norm_jitter['V']['std']]
std_hyper = norm_jitter['std_hyper']
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
special_tfl += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space='HSV',distribution=distribution)]
if logger is not None:
logger.info('norm_jitter:', norm_jitter)
else:
print('norm_jitter:', norm_jitter)
secondary_tfl = []
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, (tuple, list)):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = dict(
translate_const=int(img_size_min * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
if interpolation and interpolation != 'random':
aa_params['interpolation'] = str_to_pil_interp(interpolation)
if auto_augment.startswith('rand'):
secondary_tfl += [rand_augment_transform(auto_augment, aa_params)]
elif auto_augment.startswith('augmix'):
aa_params['translate_pct'] = 0.3
secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)]
else:
secondary_tfl += [auto_augment_transform(auto_augment, aa_params)]
# 12.26 顺序很重要
# 12.26修改,增加HED_jitter方法
if hed_jitter > 0.001:
secondary_tfl += [HEDJitter(hed_jitter, p=cj_p)]
print('hed_jitter:', hed_jitter)
# 1.21修改,增加LAB_jitter方法
# 2.6修改,增加LAB_jitter_hsv方法,lab_jitter改为list,如果是1个,则是hed方法,3个是hsv方法
if lab_jitter is not None:
if len(lab_jitter) == 1:
secondary_tfl += [LABJitter(lab_jitter[0], p=cj_p)] #2.13只有一个时,就只能取第一个
elif len(lab_jitter) == 3 :
l_factor = lab_jitter[0]
a_factor = lab_jitter[1]
b_factor = lab_jitter[2]
secondary_tfl += [LABJitter_hsv(l_factor,a_factor,b_factor, p=cj_p)]
print('lab_jitter:', lab_jitter)
# 12.20修改,修改cj的引入规则,固定亮度,对比度,饱和度=0.4,cj参数可变
# 12.20修改,没有nj时才启用cj
# 12.25修改,这样HSV过于强势,所以改成Quantify论文中的配置,BC+HSV_light_strong
if color_jitter is not None : #默认是None
brightness = color_jitter[0]
contrast = color_jitter[1]
saturation = color_jitter[2]
hue = color_jitter[3]
if brightness > 0.001 or contrast > 0.001 or saturation > 0.001 or hue > 0.001:
# 2.13修改,对cj方法封装,增加概率p
# secondary_tfl += [transforms.ColorJitter(brightness=brightness, contrast=contrast, saturation=saturation, hue=hue)]
secondary_tfl += [HSVJitter(brightness=brightness, contrast=contrast, saturation=saturation, hue=hue, p=cj_p)]
if logger is not None:
logger.info('color_jitter:',secondary_tfl)
else:
print('color_jitter:',secondary_tfl)
final_tfl = []
# 12.26添加
if morphology:
final_tfl += [
RandomAffine(degrees=0,scale=(0.8,1.2)), #和quantify匹配 #不加弹性变化了
RandomGaussBlur(radius=[0,0.1]), #GaussBlur的标准差变化范围[0,0.1],固定死了
RandomGaussianNoise(mean=0.0, variance=0.1, amplitude=1.0), #GaussNoise均值方差和幅度,固定mean,std[0,0.1]
]
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
final_tfl += [ToNumpy()]
else:
final_tfl += [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
]
# 12.26修改,re_mode增加功能,如果是torch str,则用torchvision的
if re_prob > 0. and re_mode == 'torch':
final_tfl.append(
RandomErasing_torch(p=re_prob, value='random')) #随机填值
print('RandomErasing_torch')
elif re_prob > 0.:
final_tfl.append(
RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu'))
print('RandomErasing_timm')
#########12.20 debug#######
# 12.20
#表明现在创建的是什么的transform
if logger is not None:
logger.info('train_transform:\n')
else:
print('train_transform:\n')
##########################
if norm_jitter is not None: #1.14修改,判定是否有用nj
if norm_jitter['color_space'] == 'Random': #1.10修改,增加随机性,随机选择一组执行,各组之间差异在于随机增强的空间不同
transforms_list = []
for i in range(len(special_tfl)):
transforms_list.append(transforms.Compose(primary_tfl + [special_tfl[i]] + secondary_tfl + final_tfl))
transforms_ = transforms.RandomChoice(transforms_list)
else:
transforms_ = transforms.Compose(primary_tfl + special_tfl + secondary_tfl + final_tfl)
elif random_jitter == True : # 1.30添加,HEDJitter和HSVJitter进行randomaug,注意此时如果
transforms_list = []
for i in range(len(secondary_tfl)):
transforms_list.append(transforms.Compose(primary_tfl + special_tfl + [secondary_tfl[i]] + final_tfl))
transforms_ = transforms.RandomChoice(transforms_list)
else:
transforms_ = transforms.Compose(primary_tfl + special_tfl + secondary_tfl + final_tfl)
if separate: #1.10修改,增加一个randomchoice
return transforms.Compose(primary_tfl), transforms.Compose(special_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl)
else:
return transforms_ #transforms.Compose(primary_tfl + special_tfl + secondary_tfl + final_tfl)
def transforms_imagenet_eval(
img_size=224,
crop_pct=None,
interpolation='bilinear',
use_prefetcher=False,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
logger=None):
crop_pct = crop_pct or DEFAULT_CROP_PCT
# 12.20 测试的时候不需要centor crop
# if isinstance(img_size, (tuple, list)):
# assert len(img_size) == 2
# if img_size[-1] == img_size[-2]:
# # fall-back to older behaviour so Resize scales to shortest edge if target is square
# scale_size = int(math.floor(img_size[0] / crop_pct))
# else:
# scale_size = tuple([int(x / crop_pct) for x in img_size])
# else:
# scale_size = int(math.floor(img_size / crop_pct))
# tfl = [
# transforms.Resize(scale_size, interpolation=str_to_interp_mode(interpolation)),
# transforms.CenterCrop(img_size),
# ]
tfl = []
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
tfl += [ToNumpy()]
else:
tfl += [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
]
########### 12.16 debug #######3
# 12.20
if logger is not None:
logger.info('\nval_transform:\n')
else :
print('\nval_transform:\n')
#######################
return transforms.Compose(tfl)
def create_transform(
input_size,
is_training=False,
use_prefetcher=False,
no_aug=False,
scale=None,
ratio=None,
hflip=0.5,
vflip=0.,
morphology=None, #12.26添加,是否增加形态学处理
color_jitter=0.4,
norm_jitter=None, #12.20加入 nj方法
hed_jitter=None, #12.26添加,一个theta参数
lab_jitter=None, #1.21添加,一个theta参数
random_jitter=None, #1.30添加,jitter是否需要randomaug
cj_p=1.0, #2.13加入,jitter概率
auto_augment=None,
interpolation='bilinear',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
re_prob=0.,
re_mode='const',
re_count=1,
re_num_splits=0,
crop_pct=None,
tf_preprocessing=False,
separate=False,
logger=None):
if isinstance(input_size, (tuple, list)):
img_size = input_size[-2:]
else:
img_size = input_size
if tf_preprocessing and use_prefetcher:
assert not separate, "Separate transforms not supported for TF preprocessing"
from timm.data.tf_preprocessing import TfPreprocessTransform
transform = TfPreprocessTransform(
is_training=is_training, size=img_size, interpolation=interpolation)
else:
if is_training and no_aug:
assert not separate, "Cannot perform split augmentation with no_aug"
transform = transforms_noaug_train(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std)
elif is_training:
transform = transforms_imagenet_train(
img_size,
scale=scale,
ratio=ratio,
hflip=hflip,
vflip=vflip,
morphology=morphology, #12.26添加,是否增加形态学处理
color_jitter=color_jitter,
norm_jitter=norm_jitter, #12.20加入nj方法,norm_jitter是一个包含所有信息的字典
hed_jitter=hed_jitter, #12.26添加,一个theta参数
lab_jitter=lab_jitter, #1.21添加,一个theta参数
random_jitter=random_jitter, #1.30添加,jitter上是否需要randomaug
cj_p=cj_p, #2.13加入,jitter概率
auto_augment=auto_augment,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std,
re_prob=re_prob,
re_mode=re_mode,
re_count=re_count,
re_num_splits=re_num_splits,
separate=separate,
logger=None)
else:
assert not separate, "Separate transforms not supported for validation preprocessing"
transform = transforms_imagenet_eval(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std,
crop_pct=crop_pct,
logger=None)
return transform
| 17,307 | 42.27 | 246 | py |
RandStainNA | RandStainNA-master/classification/timm/data/distributed_sampler.py | import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
class OrderedDistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
class RepeatAugSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU). Heavily based on torch.utils.data.DistributedSampler
This sampler was taken from https://github.com/facebookresearch/deit/blob/0c4b8f60/samplers.py
Used in
Copyright (c) 2015-present, Facebook, Inc.
"""
def __init__(
self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
num_repeats=3,
selected_round=256,
selected_ratio=0,
):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.shuffle = shuffle
self.num_repeats = num_repeats
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * num_repeats / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# Determine the number of samples to select per epoch for each rank.
# num_selected logic defaults to be the same as original RASampler impl, but this one can be tweaked
# via selected_ratio and selected_round args.
selected_ratio = selected_ratio or num_replicas # ratio to reduce selected samples by, num_replicas if 0
if selected_round:
self.num_selected_samples = int(math.floor(
len(self.dataset) // selected_round * selected_round / selected_ratio))
else:
self.num_selected_samples = int(math.ceil(len(self.dataset) / selected_ratio))
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g)
else:
indices = torch.arange(start=0, end=len(self.dataset))
# produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....]
indices = torch.repeat_interleave(indices, repeats=self.num_repeats, dim=0).tolist()
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size > 0:
indices += indices[:padding_size]
assert len(indices) == self.total_size
# subsample per rank
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
# return up to num selected samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 5,125 | 38.430769 | 113 | py |
RandStainNA | RandStainNA-master/classification/timm/data/random_erasing.py | """ Random Erasing (Cutout)
Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
Copyright Zhun Zhong & Liang Zheng
Hacked together by / Copyright 2019, Ross Wightman
"""
import random
import math
import torch
def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'):
# NOTE I've seen CUDA illegal memory access errors being caused by the normal_()
# paths, flip the order so normal is run on CPU if this becomes a problem
# Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508
if per_pixel:
return torch.empty(patch_size, dtype=dtype, device=device).normal_()
elif rand_color:
return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_()
else:
return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device)
class RandomErasing:
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
This variant of RandomErasing is intended to be applied to either a batch
or single image tensor after it has been normalized by dataset mean and std.
Args:
probability: Probability that the Random Erasing operation will be performed.
min_area: Minimum percentage of erased area wrt input image area.
max_area: Maximum percentage of erased area wrt input image area.
min_aspect: Minimum aspect ratio of erased area.
mode: pixel color mode, one of 'const', 'rand', or 'pixel'
'const' - erase block is constant color of 0 for all channels
'rand' - erase block is same per-channel random (normal) color
'pixel' - erase block is per-pixel random (normal) color
max_count: maximum number of erasing blocks per image, area per box is scaled by count.
per-image count is randomly chosen between 1 and this value.
"""
def __init__(
self,
probability=0.5, min_area=0.02, max_area=1/3, min_aspect=0.3, max_aspect=None,
mode='const', min_count=1, max_count=None, num_splits=0, device='cuda'):
self.probability = probability
self.min_area = min_area
self.max_area = max_area
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
self.min_count = min_count
self.max_count = max_count or min_count
self.num_splits = num_splits
self.mode = mode.lower()
self.rand_color = False
self.per_pixel = False
if self.mode == 'rand':
self.rand_color = True # per block random normal
elif self.mode == 'pixel':
self.per_pixel = True # per pixel random normal
else:
assert not self.mode or self.mode == 'const'
self.device = device
def _erase(self, img, chan, img_h, img_w, dtype):
if random.random() > self.probability:
return
area = img_h * img_w
count = self.min_count if self.min_count == self.max_count else \
random.randint(self.min_count, self.max_count)
for _ in range(count):
for attempt in range(10):
target_area = random.uniform(self.min_area, self.max_area) * area / count
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
img[:, top:top + h, left:left + w] = _get_pixels(
self.per_pixel, self.rand_color, (chan, h, w),
dtype=dtype, device=self.device)
break
def __call__(self, input):
if len(input.size()) == 3:
self._erase(input, *input.size(), input.dtype)
else:
batch_size, chan, img_h, img_w = input.size()
# skip first slice of batch if num_splits is set (for clean portion of samples)
batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0
for i in range(batch_start, batch_size):
self._erase(input[i], chan, img_h, img_w, input.dtype)
return input
def __repr__(self):
# NOTE simplified state for repr
fs = self.__class__.__name__ + f'(p={self.probability}, mode={self.mode}'
fs += f', count=({self.min_count}, {self.max_count}))'
return fs
| 4,767 | 44.846154 | 95 | py |
RandStainNA | RandStainNA-master/classification/timm/data/loader.py | """ Loader Factory, Fast Collate, CUDA Prefetcher
Prefetcher and Fast Collate inspired by NVIDIA APEX example at
https://github.com/NVIDIA/apex/commit/d5e2bb4bdeedd27b1dfaf5bb2b24d6c000dee9be#diff-cf86c282ff7fba81fad27a559379d5bf
Hacked together by / Copyright 2019, Ross Wightman
"""
import random
from functools import partial
from typing import Callable
import torch.utils.data
from torch.utils.data.sampler import SubsetRandomSampler #2.17添加
import numpy as np
import copy #2.17添加
from .transforms_factory import create_transform
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .distributed_sampler import OrderedDistributedSampler, RepeatAugSampler
from .random_erasing import RandomErasing
from .mixup import FastCollateMixup
def fast_collate(batch):
""" A fast collation function optimized for uint8 images (np array or torch) and int64 targets (labels)"""
assert isinstance(batch[0], tuple)
batch_size = len(batch)
if isinstance(batch[0][0], tuple):
# This branch 'deinterleaves' and flattens tuples of input tensors into one tensor ordered by position
# such that all tuple of position n will end up in a torch.split(tensor, batch_size) in nth position
inner_tuple_size = len(batch[0][0])
flattened_batch_size = batch_size * inner_tuple_size
targets = torch.zeros(flattened_batch_size, dtype=torch.int64)
tensor = torch.zeros((flattened_batch_size, *batch[0][0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
assert len(batch[i][0]) == inner_tuple_size # all input tensor tuples must be same length
for j in range(inner_tuple_size):
targets[i + j * batch_size] = batch[i][1]
tensor[i + j * batch_size] += torch.from_numpy(batch[i][0][j])
return tensor, targets
elif isinstance(batch[0][0], np.ndarray):
targets = torch.tensor([b[1] for b in batch], dtype=torch.int64)
assert len(targets) == batch_size
tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
tensor[i] += torch.from_numpy(batch[i][0])
return tensor, targets
elif isinstance(batch[0][0], torch.Tensor):
targets = torch.tensor([b[1] for b in batch], dtype=torch.int64)
assert len(targets) == batch_size
tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
tensor[i].copy_(batch[i][0])
return tensor, targets
else:
assert False
class PrefetchLoader:
def __init__(self,
loader,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
fp16=False,
re_prob=0.,
re_mode='const',
re_count=1,
re_num_splits=0):
self.loader = loader
self.mean = torch.tensor([x * 255 for x in mean]).cuda().view(1, 3, 1, 1)
self.std = torch.tensor([x * 255 for x in std]).cuda().view(1, 3, 1, 1)
self.fp16 = fp16
if fp16:
self.mean = self.mean.half()
self.std = self.std.half()
if re_prob > 0.:
self.random_erasing = RandomErasing(
probability=re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits)
else:
self.random_erasing = None
def __iter__(self):
stream = torch.cuda.Stream()
first = True
for next_input, next_target in self.loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
if self.fp16:
next_input = next_input.half().sub_(self.mean).div_(self.std)
else:
next_input = next_input.float().sub_(self.mean).div_(self.std)
if self.random_erasing is not None:
next_input = self.random_erasing(next_input)
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __len__(self):
return len(self.loader)
@property
def sampler(self):
return self.loader.sampler
@property
def dataset(self):
return self.loader.dataset
@property
def mixup_enabled(self):
if isinstance(self.loader.collate_fn, FastCollateMixup):
return self.loader.collate_fn.mixup_enabled
else:
return False
@mixup_enabled.setter
def mixup_enabled(self, x):
if isinstance(self.loader.collate_fn, FastCollateMixup):
self.loader.collate_fn.mixup_enabled = x
def _worker_init(worker_id, worker_seeding='all'):
worker_info = torch.utils.data.get_worker_info()
assert worker_info.id == worker_id
if isinstance(worker_seeding, Callable):
seed = worker_seeding(worker_info)
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed % (2 ** 32 - 1))
else:
assert worker_seeding in ('all', 'part')
# random / torch seed already called in dataloader iter class w/ worker_info.seed
# to reproduce some old results (same seed + hparam combo), partial seeding is required (skip numpy re-seed)
if worker_seeding == 'all':
np.random.seed(worker_info.seed % (2 ** 32 - 1))
def create_loader(
dataset,
input_size,
batch_size,
is_training=False,
use_prefetcher=True,
no_aug=False,
re_prob=0.,
re_mode='const',
re_count=1,
re_split=False,
scale=None,
ratio=None,
hflip=0.5,
vflip=0.,
# color_jitter=0.4,
morphology=None, #12.26添加,是否增加形态学处理
color_jitter=[0,0,0,0],
norm_jitter=None, #12.20修改,lab通道统计值+stdhyper,都是字典形式
hed_jitter=None, #12.26添加,一个theta参数
lab_jitter=None, #1.21添加,一个theta参数
random_jitter=None, #1.30添加,jitter是否需要randomaug
cj_p=1.0, #2.13添加,jitter的概率
nj_dynamic=False, #2.17添加,dynamic是否开启
auto_augment=None,
num_aug_repeats=0,
num_aug_splits=0,
interpolation='bilinear',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
num_workers=1,
distributed=False,
crop_pct=None,
collate_fn=None,
pin_memory=False,
fp16=False,
tf_preprocessing=False,
use_multi_epochs_loader=False,
persistent_workers=True,
worker_seeding='all',
logger=None
):
re_num_splits = 0
if re_split:
# apply RE to second half of batch if no aug split otherwise line up with aug split
re_num_splits = num_aug_splits or 2
dataset.transform = create_transform(
input_size,
is_training=is_training,
use_prefetcher=use_prefetcher,
no_aug=no_aug,
scale=scale,
ratio=ratio,
hflip=hflip,
vflip=vflip,
morphology=morphology, #12.26添加,是否增加形态学处理
color_jitter=color_jitter,
norm_jitter=norm_jitter, #12.20修改,增加nj方法
hed_jitter=hed_jitter, #12.26添加,一个theta参数
lab_jitter=lab_jitter, #1.21添加,一个theta参数
random_jitter=random_jitter, #1.30添加,jitter是否需要randomaug
cj_p=cj_p, #2.13添加,color jitter的概率
auto_augment=auto_augment,
interpolation=interpolation,
mean=mean,
std=std,
crop_pct=crop_pct,
tf_preprocessing=tf_preprocessing,
re_prob=re_prob,
re_mode=re_mode,
re_count=re_count,
re_num_splits=re_num_splits,
separate=num_aug_splits > 0,
logger=None
)
if nj_dynamic: #2.17添加
dataset_copy = copy.deepcopy(dataset)
dataset_copy.transform = create_transform(
input_size,
is_training=is_training,
use_prefetcher=use_prefetcher,
no_aug=no_aug,
scale=scale,
ratio=ratio,
hflip=hflip,
vflip=vflip,
morphology=morphology, #12.26添加,是否增加形态学处理
color_jitter=color_jitter,
norm_jitter=None, #norm_jitter, #12.20修改,增加nj方法 #2.17,如果是dynamic,则不需要nj
hed_jitter=hed_jitter, #12.26添加,一个theta参数
lab_jitter=lab_jitter, #1.21添加,一个theta参数
random_jitter=random_jitter, #1.30添加,jitter是否需要randomaug
cj_p=cj_p, #2.13添加,color jitter的概率
auto_augment=auto_augment,
interpolation=interpolation,
mean=mean,
std=std,
crop_pct=crop_pct,
tf_preprocessing=tf_preprocessing,
re_prob=re_prob,
re_mode=re_mode,
re_count=re_count,
re_num_splits=re_num_splits,
separate=num_aug_splits > 0,
logger=None
)
#1.21添加,输出对应的transform
if logger is not None:
logger.info('final dataset.transform:',dataset.transform)
else:
print('final dataset.transform:',dataset.transform)
sampler = None
if distributed and not isinstance(dataset, torch.utils.data.IterableDataset):
if is_training:
if num_aug_repeats:
sampler = RepeatAugSampler(dataset, num_repeats=num_aug_repeats)
else:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
# This will add extra duplicate entries to result in equal num
# of samples per-process, will slightly alter validation results
sampler = OrderedDistributedSampler(dataset)
elif nj_dynamic : #2.17添加,修改sampler for concat dataset
if is_training:
train_idx = list(range(len(dataset)))
sampler = SubsetRandomSampler(train_idx)
else:
assert num_aug_repeats == 0, "RepeatAugment not currently supported in non-distributed or IterableDataset use"
if collate_fn is None:
collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate
loader_class = torch.utils.data.DataLoader
if use_multi_epochs_loader:
loader_class = MultiEpochsDataLoader
loader_args = dict(
batch_size=batch_size,
shuffle=not isinstance(dataset, torch.utils.data.IterableDataset) and sampler is None and is_training,
num_workers=num_workers,
sampler=sampler,
collate_fn=collate_fn,
pin_memory=pin_memory,
drop_last=is_training,
worker_init_fn=partial(_worker_init, worker_seeding=worker_seeding),
persistent_workers=persistent_workers
)
# 1.20添加logger信息
if logger is not None:
logger.info('loader_args:\n',loader_args)
else:
print('loader_args:\n',loader_args)
try:
# 2.17添加,concat dataset
if nj_dynamic:
dataset = ConcatDataset(dataset,dataset_copy)
loader = loader_class(dataset, **loader_args)
except TypeError as e:
loader_args.pop('persistent_workers') # only in Pytorch 1.7+
loader = loader_class(dataset, **loader_args)
if use_prefetcher:
prefetch_re_prob = re_prob if is_training and not no_aug else 0.
loader = PrefetchLoader(
loader,
mean=mean,
std=std,
fp16=fp16,
re_prob=prefetch_re_prob,
re_mode=re_mode,
re_count=re_count,
re_num_splits=re_num_splits
)
return loader
class MultiEpochsDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._DataLoader__initialized = False
self.batch_sampler = _RepeatSampler(self.batch_sampler)
self._DataLoader__initialized = True
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
# 2.17添加
class ConcatDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets) | 12,817 | 33.926431 | 118 | py |
RandStainNA | RandStainNA-master/classification/timm/data/transforms.py | import torch
import torchvision.transforms.functional as F
from torchvision import transforms #2.13加入
try:
from torchvision.transforms.functional import InterpolationMode
has_interpolation_mode = True
except ImportError:
has_interpolation_mode = False
from PIL import Image, ImageFilter #1.20加入
import warnings
import math
import random
import numpy as np
import cv2 #12.20加入
from skimage import color #12.26加入
import os #12.20加入
# 2.27引入,t分布
def single_t_rvs(loc, scale):
'''generate random variables of multivariate t distribution
Parameters
----------
m : array_like
mean of random variable, length determines dimension of random variable
S : array_like
square array of covariance matrix
df : int or float
degrees of freedom #自由度
n : int
number of observations, return random array will be (n, len(m))
Returns
-------
rvs : ndarray, (n, len(m))
each row is an independent draw of a multivariate t distributed
random variable
'''
loc = np.array(loc)
df = 2000 #给一个差不多的值即可
x = (np.random.chisquare(df, 1)/df)[0] # 卡方分布,所以t分布是自己实现的
# https://baike.baidu.com/item/t%E5%88%86%E5%B8%83/299142
# z = np.random.multivariate_normal(np.zeros(d),S,(n,)) #目前是多元正态,搞一元即可
z = np.random.normal(loc=0, scale=scale)
return loc + z/np.sqrt(x) # same output format as random.multivariate_normal
# m = 140
# S = 40
# x = single_t_rvs(loc, scale) ,和np.randon.distribution统一
#12.17 norm&jitter引入方法
class color_norm_jitter(object):
'''
参数:
1.lab的三个channel的mean和std(这个一般是在外面算完传入进来的,在里面算分布)
2.Reinhard_cn方法
3.概率p
'''
def __init__(self, mean, std, std_hyper=0, probability=0, color_space=None, distribution=None):
self.mean = mean # [l,a,b] 是l_mean的正态分布的均值和方差,是一个字典
self.std = std # [l,a,b]
self.std_adjust = std_hyper #=0时按照统计规则
self.p = probability # 一半概率选一个
self.color_space = color_space
self.distribution = distribution #1.30添加,手工指定分布
def getavgstd(self, image):
avg = []
std = []
image_avg_l = np.mean(image[:, :, 0])
image_std_l = np.std(image[:, :, 0])
image_avg_a = np.mean(image[:, :, 1])
image_std_a = np.std(image[:, :, 1])
image_avg_b = np.mean(image[:, :, 2])
image_std_b = np.std(image[:, :, 2])
avg.append(image_avg_l)
avg.append(image_avg_a)
avg.append(image_avg_b)
std.append(image_std_l)
std.append(image_std_a)
std.append(image_std_b)
return (avg, std)
def quick_loop(self, image1, image_avg, image_std, temp_avg, temp_std):
if self.color_space != 'HED': #LAB和HSV
image_std = np.clip(np.array(image_std), 0.001, 255)
image1 = (image1 - np.array(image_avg)) * (np.array(temp_std) / np.array(image_std)) + np.array(temp_avg)
image1 = np.clip(image1, 0, 255).astype(np.uint8)
else: #HED
image_std = np.clip(np.array(image_std), 0.0001, 255) #经常容易除0,保护一下
image1 = (image1 - np.array(image_avg)) * (np.array(temp_std) / np.array(image_std)) + np.array(temp_avg)
return image1
def __call__(self, img):
# 这边应该考虑单张图就好了
if np.random.rand(1) < self.p:
image = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) # 注意颜色空间转换
if self.color_space == 'LAB':
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) # 注意颜色空间转换
elif self.color_space == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # 注意颜色空间转换
elif self.color_space == 'HED': #1.30将HED空间扰动也加入
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #hed的变化是在rgb上变化
image = color.rgb2hed(img) #rgb, [0,1]
image_avg, image_std = self.getavgstd(image)
l_mean, a_mean, b_mean = self.mean[0], self.mean[1], self.mean[2]
l_std, a_std, b_std = self.std[0], self.std[1], self.std[2]
std_adjust = self.std_adjust
#1.30修改,l_mean已经有'mean','std','distribution'3个参数
if self.distribution != None: #1.30添加,如果有手工指定分布,则全部按照分布的来,否则按照统计的来
if self.distribution == 'uniform':
np_distribution = np.random.uniform #均匀分布时,按照3Σ原则来确定采样范围
template_avg_l = np_distribution(low=l_mean['mean']-3*l_mean['std'], high=l_mean['mean']+3*l_mean['std'])
template_std_l = np_distribution(low=l_std['mean']-3*l_std['std'], high=l_std['mean']+3*l_std['std'])
template_avg_a = np_distribution(low=a_mean['mean']-3*a_mean['std'], high=a_mean['mean']+3*a_mean['std'])
template_std_a = np_distribution(low=a_std['mean']-3*a_std['std'], high=a_std['mean']+3*a_std['std'])
template_avg_b = np_distribution(low=b_mean['mean']-3*b_mean['std'], high=b_mean['mean']+3*b_mean['std'])
template_std_b = np_distribution(low=b_std['mean']-3*b_std['std'], high=b_std['mean']+3*b_std['std'])
else: #不是均匀分布时,考虑的是均值和方差
if self.distribution == 'normal':
np_distribution = np.random.normal
elif self.distribution == 'laplace':
np_distribution = np.random.laplace
# 2.05添加,1+std调整为全部的
template_avg_l = np_distribution(loc=l_mean['mean'], scale=l_mean['std']*(1+std_adjust))
template_std_l = np_distribution(loc=l_std['mean'], scale=l_std['std']*(1+std_adjust))
template_avg_a = np_distribution(loc=a_mean['mean'], scale=a_mean['std']*(1+std_adjust))
template_std_a = np_distribution(loc=a_std['mean'], scale=a_std['std']*(1+std_adjust))
template_avg_b = np_distribution(loc=b_mean['mean'], scale=b_mean['std']*(1+std_adjust))
template_std_b = np_distribution(loc=b_std['mean'], scale=b_std['std']*(1+std_adjust))
else: #如果没有指定分布,则需要根据nj参数来确定各分布
np_d_true_list = [l_mean['distribution'], l_std['distribution'], a_mean['distribution'], a_std['distribution'], b_mean['distribution'], b_std['distribution']]
# print(np_d_true_list)
np_d_sample_list = []
for np_d_true in np_d_true_list:
if np_d_true == 'norm':
np_d_sample_list.append(np.random.normal)
elif np_d_true == 'laplace':
np_d_sample_list.append(np.random.laplace)
elif self.distribution == 't':
np_distribution = single_t_rvs #2.27添加,t分布
# print(np_d_sample_list)
# 2.5修改,1+std改为全部
template_avg_l = np_d_sample_list[0](loc=l_mean['mean'], scale=l_mean['std']*(1+std_adjust))
template_std_l = np_d_sample_list[1](loc=l_std['mean'], scale=l_std['std']*(1+std_adjust))
template_avg_a = np_d_sample_list[2](loc=a_mean['mean'], scale=a_mean['std']*(1+std_adjust))
template_std_a = np_d_sample_list[3](loc=a_std['mean'], scale=a_std['std']*(1+std_adjust))
template_avg_b = np_d_sample_list[4](loc=b_mean['mean'], scale=b_mean['std']*(1+std_adjust))
template_std_b = np_d_sample_list[5](loc=b_std['mean'], scale=b_std['std']*(1+std_adjust))
template_avg = [float(template_avg_l), float(template_avg_a), float(template_avg_b)]
template_std = [float(template_std_l), float(template_std_a), float(template_std_b)]
image = self.quick_loop(image, image_avg, image_std, template_avg, template_std)
if self.color_space == 'LAB':
image = cv2.cvtColor(image, cv2.COLOR_LAB2BGR)
return Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
elif self.color_space == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB)) #这个算是调整好了
elif self.color_space == 'HED':
nimg = color.hed2rgb(image)
imin = nimg.min()
imax = nimg.max()
rsimg = (255 * (nimg - imin) / (imax - imin)).astype('uint8') # rescale to [0,255]
return Image.fromarray(rsimg)
else:
return img
# 1.21引入,print内容添加
def __repr__(self):
format_string = self.__class__.__name__ + "("
format_string += f"methods=Reinhard"
format_string += f", colorspace={self.color_space}"
format_string += f", mean={self.mean}"
format_string += f", std={self.std}"
format_string += f", std_adjust={self.std_adjust}"
format_string += f", distribution={self.distribution}" #1.30添加,print期望分布
format_string += f", p={self.p})"
return format_string
# 12.26 引入hed_nrom_jitter
class hed_norm_jitter(object):
"""Randomly perturbe the HED color space value an RGB image.
First, it disentangled the hematoxylin and eosin color channels by color deconvolution method using a fixed matrix.
Second, it perturbed the hematoxylin, eosin and DAB stains independently.
Third, it transformed the resulting stains into regular RGB color space.
Args:
theta (float): How much to jitter HED color space,
alpha is chosen from a uniform distribution [1-theta, 1+theta]
betti is chosen from a uniform distribution [-theta, theta]
the jitter formula is **s' = \alpha * s + \betti**
"""
def __init__(self, mean, std, std_hyper=0, probability=0): # mean是正态分布的均值,std是正态分布的方差?标准差?
self.mean = mean # [l,a,b] 是l_mean的正态分布的均值和方差,是一个字典
self.std = std # [l,a,b]
self.std_adjust = std_hyper #=0时按照统计规则
self.p = probability # 一半概率选一个
def getavgstd(self, image):
avg = []
std = []
image_avg_l = np.mean(image[:, :, 0])
image_std_l = np.std(image[:, :, 0])
image_avg_a = np.mean(image[:, :, 1])
image_std_a = np.std(image[:, :, 1])
image_avg_b = np.mean(image[:, :, 2])
image_std_b = np.std(image[:, :, 2])
avg.append(image_avg_l)
avg.append(image_avg_a)
avg.append(image_avg_b)
std.append(image_std_l)
std.append(image_std_a)
std.append(image_std_b)
return (avg, std)
def quick_loop(self, image1, image_avg, image_std, temp_avg, temp_std):
image1 = (image1 - np.array(image_avg)) * (np.array(temp_std) / np.array(image_std)) + np.array(temp_avg)
# image1 = np.clip(image1, 0, 255).astype(np.uint8) #hed的范围很小
return image1
def __call__(self, img):
img = np.array(img) #rgb,1.10发现bug,这边是直接rgb转的hed,但是统计的是bgr2hed,所以对应不上,结果不够理想
img_hed = color.rgb2hed(img) #rgb, [0,1]
image_avg, image_std = self.getavgstd(img_hed)
h_mean, e_mean, d_mean = self.mean[0], self.mean[1], self.mean[2]
h_std, e_std, d_std = self.std[0], self.std[1], self.std[2]
std_adjust = self.std_adjust
template_avg_h, template_std_h = np.random.normal(loc=h_mean['mean'], scale=h_mean['std']*(1+std_adjust),
size=1), np.random.laplace(loc=h_std['mean'],
scale=h_std['std'], size=1)
template_avg_e, template_std_e = np.random.laplace(loc=e_mean['mean'], scale=e_mean['std']*(1+std_adjust),
size=1), np.random.laplace(loc=e_std['mean'],
scale=e_std['std'], size=1)
template_avg_d, template_std_d = np.random.laplace(loc=d_mean['mean'], scale=d_mean['std']*(1+std_adjust),
size=1), np.random.laplace(loc=d_std['mean'],
scale=d_std['std'], size=1)
template_avg = [float(template_avg_h), float(template_avg_e), float(template_avg_d)]
template_std = [float(template_std_h), float(template_std_e), float(template_std_d)]
image = self.quick_loop(img_hed, image_avg, image_std, template_avg, template_std) #返回也是[0,1]
nimg = color.hed2rgb(image)
imin = nimg.min()
imax = nimg.max()
rsimg = (255 * (nimg - imin) / (imax - imin)).astype('uint8') # rescale to [0,255]
return Image.fromarray(rsimg)
# 1.21引入,print内容添加
def __repr__(self):
format_string = self.__class__.__name__ + "("
format_string += f"methods=Reinhard"
format_string += f", colorspace=HED"
format_string += f", mean={self.mean}"
format_string += f", std={self.std}"
format_string += f", std_adjust={self.std_adjust}"
format_string += f", p={self.p})"
return format_string
# 2.13加入hsvjitter
# 对transform.colorJitter封装
# 其实可以random.apply
class HSVJitter(object):
def __init__(self, brightness=0.0, contrast=0.0, saturation=0.0, hue=0.0, p=1.0):
self.brightness=brightness
self.contrast=contrast
self.saturation=saturation
self.hue=hue
self.p=p
self.colorJitter=transforms.ColorJitter(brightness=brightness, contrast=contrast, saturation=saturation, hue=hue)
def __call__(self, img):
if np.random.rand(1) < self.p: #2.13加入概率
img_process = self.colorJitter(img)
return img_process
else:
return img
def __repr__(self):
format_string = "("
format_string += self.colorJitter.__repr__()
format_string += f", p={self.p})"
return format_string
#12.25引入HEDJitter方法
class HEDJitter(object):
"""Randomly perturbe the HED color space value an RGB image.
First, it disentangled the hematoxylin and eosin color channels by color deconvolution method using a fixed matrix.
Second, it perturbed the hematoxylin, eosin and DAB stains independently.
Third, it transformed the resulting stains into regular RGB color space.
Args:
theta (float): How much to jitter HED color space,
alpha is chosen from a uniform distribution [1-theta, 1+theta]
betti is chosen from a uniform distribution [-theta, theta]
the jitter formula is **s' = \alpha * s + \betti**
"""
def __init__(self, theta=0., p=1.0): # HED_light: theta=0.05; HED_strong: theta=0.2
# assert isinstance(theta, numbers.Number), "theta should be a single number."
self.theta = theta
# 12.26这边的随机采样不应该是这样的,应该是每次都随机
self.alpha = 0 # np.random.uniform(1-theta, 1+theta, (1, 3))
self.betti = 0 # np.random.uniform(-theta, theta, (1, 3))
self.p = p #2.13加入
@staticmethod
def adjust_HED(img, alpha, betti):
img = np.array(img)
s = np.reshape(color.rgb2hed(img), (-1, 3))
ns = alpha * s + betti # perturbations on HED color space
nimg = color.hed2rgb(np.reshape(ns, img.shape))
imin = nimg.min()
imax = nimg.max()
rsimg = (255 * (nimg - imin) / (imax - imin)).astype('uint8') # rescale to [0,255]
# transfer to PIL image
return Image.fromarray(rsimg)
def __call__(self, img):
# 每张图片都重新来弄,既可以记录,又可以更新
if np.random.rand(1) < self.p: #2.13加入概率
self.alpha = np.random.uniform(1-self.theta, 1+self.theta, (1, 3))
self.betti = np.random.uniform(-self.theta, self.theta, (1, 3))
return self.adjust_HED(img, self.alpha, self.betti)
else:
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'theta={0}'.format(self.theta)
format_string += ', alpha={0}'.format(self.alpha)
format_string += ', betti={0}'.format(self.betti)
format_string += ', p={0})'.format(self.p)
return format_string
#12.25引入LABJitter方法
class LABJitter(object):
"""Randomly perturbe the LAB color space value an RGB image.
First, it disentangled the hematoxylin and eosin color channels by color deconvolution method using a fixed matrix.
Second, it perturbed the hematoxylin, eosin and DAB stains independently.
Third, it transformed the resulting stains into regular RGB color space.
Args:
theta (float): How much to jitter HED color space,
alpha is chosen from a uniform distribution [1-theta, 1+theta]
betti is chosen from a uniform distribution [-theta, theta]
the jitter formula is **s' = \alpha * s + \betti**
"""
def __init__(self, theta=0., p=1.0): # LAB_light: theta=0.05; LAB_strong: theta=0.2
# assert isinstance(theta, numbers.Number), "theta should be a single number."
self.theta = theta
# 12.26这边的随机采样不应该是这样的,应该是每次都随机
self.alpha = 0 # np.random.uniform(1-theta, 1+theta, (1, 3))
self.betti = 0 # np.random.uniform(-theta, theta, (1, 3))
self.p = p #2.13加入概率
@staticmethod
def adjust_LAB(img, alpha, betti):
img = np.array(img)
s = np.reshape(color.rgb2lab(img), (-1, 3)) #1.21修改,rgb2hed改为rgb2lab
ns = alpha * s + betti # perturbations on LAB color space
nimg = color.lab2rgb(np.reshape(ns, img.shape)) #1.21修改,hed2rgb改为lab2rgb
imin = nimg.min()
imax = nimg.max()
rsimg = (255 * (nimg - imin) / (imax - imin)).astype('uint8') # rescale to [0,255]
# transfer to PIL image
return Image.fromarray(rsimg)
def __call__(self, img):
# 每张图片都重新来弄,既可以记录,又可以更新
if np.random.rand(1) < self.p: #2.13加入概率
self.alpha = np.random.uniform(1-self.theta, 1+self.theta, (1, 3))
self.betti = np.random.uniform(-self.theta, self.theta, (1, 3))
return self.adjust_LAB(img, self.alpha, self.betti)
else:
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'theta={0}'.format(self.theta)
format_string += ', alpha={0}'.format(self.alpha)
format_string += ', betti={0}'.format(self.betti)
format_string += ', p={0})'.format(self.p)
return format_string
# 2.6 加入labjitter-hsv策略
# 借鉴pytorch调整hue来修改各通道参数
class LABJitter_hsv(object):
def __init__(self, l_factor, a_factor, b_factor, p=1.0):
self.l_factor = l_factor
self.a_factor = a_factor
self.b_factor = b_factor
self.p = p #2.13加入概率
def adjust_channel(self, channel, factor) -> Image.Image:
if not (0.5 <= factor <= 1.5):
raise ValueError(f"factor ({factor}) is not in [-0.5, 0.5].")
# h, s, v = img.convert("HSV").split()
channel = np.array(channel, dtype=np.uint8) #确保整型
# uint8 addition take cares of rotation across boundaries
with np.errstate(over="ignore"):
channel += np.uint8(factor * 255)
channel = np.array(channel, dtype=np.uint8) #将超出范围的限制在0-255
return channel
def __call__(self, img):
# 每张图片都重新来弄,既可以记录,又可以更新
if np.random.rand(1) < self.p: #2.13加入概率
l_factor = np.random.uniform(1-self.l_factor, 1+self.l_factor)
a_factor = np.random.uniform(1-self.a_factor, 1+self.a_factor)
b_factor = np.random.uniform(1-self.b_factor, 1+self.b_factor)
img_bgr = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
img_lab = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LAB)
np_l, np_a, np_b = cv2.split(img_lab)
np_l = self.adjust_channel(np_l, l_factor)
np_a = self.adjust_channel(np_a, a_factor)
np_b = self.adjust_channel(np_b, b_factor)
LAB = cv2.merge([np_l, np_a, np_b])
image = cv2.cvtColor(LAB, cv2.COLOR_LAB2BGR)
return Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
else:
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'l_factor={0}'.format(self.l_factor)
format_string += ', a_factor={0}'.format(self.a_factor)
format_string += ', b_factor={0}'.format(self.b_factor)
format_string += ', p={0})'.format(self.p)
return format_string
#12.25引入GaussBlur
class RandomGaussBlur(object):
"""Random GaussBlurring on image by radius parameter.
Args:
radius (list, tuple): radius range for selecting from; you'd better set it < 2
"""
def __init__(self, radius=None):
self.radius = radius
def __call__(self, img):
radius = random.uniform(self.radius[0], self.radius[1]) #随机模糊
return img.filter(ImageFilter.GaussianBlur(radius=radius)) #只需要高斯核的标准差
def __repr__(self):
return self.__class__.__name__ + '(Gaussian Blur radius=[{0},{1}])'.format(self.radius[0],self.radius[1])
# 12.25 引入高斯噪声
class RandomGaussianNoise(object):
def __init__(self, mean=0.0, variance=1.0, amplitude=1.0):
self.mean = mean
self.variance = variance
self.amplitude = amplitude
def __call__(self, img):
img = np.array(img)
h, w, c = img.shape
N = self.amplitude * np.random.normal(loc=self.mean, scale=np.random.uniform(0, self.variance), size=(h, w, 1)) #弄成随机方差
N = np.repeat(N, c, axis=2)
img = N + img
img[img > 255] = 255 # 避免有值超过255而反转
img = Image.fromarray(img.astype('uint8')).convert('RGB')
return img
# 1.21引入,print内容添加
def __repr__(self):
format_string = self.__class__.__name__ + "("
format_string += f"mean={self.mean}"
format_string += f", variance=uniform[0,{self.variance}]"
format_string += f", amplitude={self.amplitude})"
return format_string
class ToNumpy:
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return np_img
class ToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return torch.from_numpy(np_img).to(dtype=self.dtype)
_pil_interpolation_to_str = {
Image.NEAREST: 'nearest',
Image.BILINEAR: 'bilinear',
Image.BICUBIC: 'bicubic',
Image.BOX: 'box',
Image.HAMMING: 'hamming',
Image.LANCZOS: 'lanczos',
}
_str_to_pil_interpolation = {b: a for a, b in _pil_interpolation_to_str.items()}
if has_interpolation_mode:
_torch_interpolation_to_str = {
InterpolationMode.NEAREST: 'nearest',
InterpolationMode.BILINEAR: 'bilinear',
InterpolationMode.BICUBIC: 'bicubic',
InterpolationMode.BOX: 'box',
InterpolationMode.HAMMING: 'hamming',
InterpolationMode.LANCZOS: 'lanczos',
}
_str_to_torch_interpolation = {b: a for a, b in _torch_interpolation_to_str.items()}
else:
_pil_interpolation_to_torch = {}
_torch_interpolation_to_str = {}
def str_to_pil_interp(mode_str):
return _str_to_pil_interpolation[mode_str]
def str_to_interp_mode(mode_str):
if has_interpolation_mode:
return _str_to_torch_interpolation[mode_str]
else:
return _str_to_pil_interpolation[mode_str]
def interp_mode_to_str(mode):
if has_interpolation_mode:
return _torch_interpolation_to_str[mode]
else:
return _pil_interpolation_to_str[mode]
_RANDOM_INTERPOLATION = (str_to_interp_mode('bilinear'), str_to_interp_mode('bicubic'))
class RandomResizedCropAndInterpolation:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
interpolation='bilinear'):
if isinstance(size, (list, tuple)):
self.size = tuple(size)
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = str_to_interp_mode(interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = ' '.join([interp_mode_to_str(x) for x in self.interpolation])
else:
interpolate_str = interp_mode_to_str(self.interpolation)
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
| 28,278 | 40.103198 | 174 | py |
RandStainNA | RandStainNA-master/classification/timm/data/parsers/parser_tfds.py | """ Dataset parser interface that wraps TFDS datasets
Wraps many (most?) TFDS image-classification datasets
from https://github.com/tensorflow/datasets
https://www.tensorflow.org/datasets/catalog/overview#image_classification
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import torch
import torch.distributed as dist
from PIL import Image
try:
import tensorflow as tf
tf.config.set_visible_devices([], 'GPU') # Hands off my GPU! (or pip install tensorflow-cpu)
import tensorflow_datasets as tfds
try:
tfds.even_splits('', 1, drop_remainder=False) # non-buggy even_splits has drop_remainder arg
has_buggy_even_splits = False
except TypeError:
print("Warning: This version of tfds doesn't have the latest even_splits impl. "
"Please update or use tfds-nightly for better fine-grained split behaviour.")
has_buggy_even_splits = True
except ImportError as e:
print(e)
print("Please install tensorflow_datasets package `pip install tensorflow-datasets`.")
exit(1)
from .parser import Parser
MAX_TP_SIZE = 8 # maximum TF threadpool size, only doing jpeg decodes and queuing activities
SHUFFLE_SIZE = 8192 # examples to shuffle in DS queue
PREFETCH_SIZE = 2048 # examples to prefetch
def even_split_indices(split, n, num_examples):
partitions = [round(i * num_examples / n) for i in range(n + 1)]
return [f"{split}[{partitions[i]}:{partitions[i + 1]}]" for i in range(n)]
def get_class_labels(info):
if 'label' not in info.features:
return {}
class_label = info.features['label']
class_to_idx = {n: class_label.str2int(n) for n in class_label.names}
return class_to_idx
class ParserTfds(Parser):
""" Wrap Tensorflow Datasets for use in PyTorch
There several things to be aware of:
* To prevent excessive examples being dropped per epoch w/ distributed training or multiplicity of
dataloader workers, the train iterator wraps to avoid returning partial batches that trigger drop_last
https://github.com/pytorch/pytorch/issues/33413
* With PyTorch IterableDatasets, each worker in each replica operates in isolation, the final batch
from each worker could be a different size. For training this is worked around by option above, for
validation extra examples are inserted iff distributed mode is enabled so that the batches being reduced
across replicas are of same size. This will slightly alter the results, distributed validation will not be
100% correct. This is similar to common handling in DistributedSampler for normal Datasets but a bit worse
since there are up to N * J extra examples with IterableDatasets.
* The sharding (splitting of dataset into TFRecord) files imposes limitations on the number of
replicas and dataloader workers you can use. For really small datasets that only contain a few shards
you may have to train non-distributed w/ 1-2 dataloader workers. This is likely not a huge concern as the
benefit of distributed training or fast dataloading should be much less for small datasets.
* This wrapper is currently configured to return individual, decompressed image examples from the TFDS
dataset. The augmentation (transforms) and batching is still done in PyTorch. It would be possible
to specify TF augmentation fn and return augmented batches w/ some modifications to other downstream
components.
"""
def __init__(
self,
root,
name,
split='train',
is_training=False,
batch_size=None,
download=False,
repeats=0,
seed=42,
input_name='image',
input_image='RGB',
target_name='label',
target_image='',
prefetch_size=None,
shuffle_size=None,
max_threadpool_size=None
):
""" Tensorflow-datasets Wrapper
Args:
root: root data dir (ie your TFDS_DATA_DIR. not dataset specific sub-dir)
name: tfds dataset name (eg `imagenet2012`)
split: tfds dataset split (can use all TFDS split strings eg `train[:10%]`)
is_training: training mode, shuffle enabled, dataset len rounded by batch_size
batch_size: batch_size to use to unsure total examples % batch_size == 0 in training across all dis nodes
download: download and build TFDS dataset if set, otherwise must use tfds CLI
repeats: iterate through (repeat) the dataset this many times per iteration (once if 0 or 1)
seed: common seed for shard shuffle across all distributed/worker instances
input_name: name of Feature to return as data (input)
input_image: image mode if input is an image (currently PIL mode string)
target_name: name of Feature to return as target (label)
target_image: image mode if target is an image (currently PIL mode string)
prefetch_size: override default tf.data prefetch buffer size
shuffle_size: override default tf.data shuffle buffer size
max_threadpool_size: override default threadpool size for tf.data
"""
super().__init__()
self.root = root
self.split = split
self.is_training = is_training
if self.is_training:
assert batch_size is not None, \
"Must specify batch_size in training mode for reasonable behaviour w/ TFDS wrapper"
self.batch_size = batch_size
self.repeats = repeats
self.common_seed = seed # a seed that's fixed across all worker / distributed instances
# performance settings
self.prefetch_size = prefetch_size or PREFETCH_SIZE
self.shuffle_size = shuffle_size or SHUFFLE_SIZE
self.max_threadpool_size = max_threadpool_size or MAX_TP_SIZE
# TFDS builder and split information
self.input_name = input_name # FIXME support tuples / lists of inputs and targets and full range of Feature
self.input_image = input_image
self.target_name = target_name
self.target_image = target_image
self.builder = tfds.builder(name, data_dir=root)
# NOTE: the tfds command line app can be used download & prepare datasets if you don't enable download flag
if download:
self.builder.download_and_prepare()
self.class_to_idx = get_class_labels(self.builder.info) if self.target_name == 'label' else {}
self.split_info = self.builder.info.splits[split]
self.num_examples = self.split_info.num_examples
# Distributed world state
self.dist_rank = 0
self.dist_num_replicas = 1
if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1:
self.dist_rank = dist.get_rank()
self.dist_num_replicas = dist.get_world_size()
# Attributes that are updated in _lazy_init, including the tf.data pipeline itself
self.global_num_workers = 1
self.worker_info = None
self.worker_seed = 0 # seed unique to each work instance
self.subsplit = None # set when data is distributed across workers using sub-splits
self.ds = None # initialized lazily on each dataloader worker process
def _lazy_init(self):
""" Lazily initialize the dataset.
This is necessary to init the Tensorflow dataset pipeline in the (dataloader) process that
will be using the dataset instance. The __init__ method is called on the main process,
this will be called in a dataloader worker process.
NOTE: There will be problems if you try to re-use this dataset across different loader/worker
instances once it has been initialized. Do not call any dataset methods that can call _lazy_init
before it is passed to dataloader.
"""
worker_info = torch.utils.data.get_worker_info()
# setup input context to split dataset across distributed processes
num_workers = 1
global_worker_id = 0
if worker_info is not None:
self.worker_info = worker_info
self.worker_seed = worker_info.seed
num_workers = worker_info.num_workers
self.global_num_workers = self.dist_num_replicas * num_workers
global_worker_id = self.dist_rank * num_workers + worker_info.id
""" Data sharding
InputContext will assign subset of underlying TFRecord files to each 'pipeline' if used.
My understanding is that using split, the underling TFRecord files will shuffle (shuffle_files=True)
between the splits each iteration, but that understanding could be wrong.
I am currently using a mix of InputContext shard assignment and fine-grained sub-splits for distributing
the data across workers. For training InputContext is used to assign shards to nodes unless num_shards
in dataset < total number of workers. Otherwise sub-split API is used for datasets without enough shards or
for validation where we can't drop examples and need to avoid minimize uneven splits to avoid padding.
"""
should_subsplit = self.global_num_workers > 1 and (
self.split_info.num_shards < self.global_num_workers or not self.is_training)
if should_subsplit:
# split the dataset w/o using sharding for more even examples / worker, can result in less optimal
# read patterns for distributed training (overlap across shards) so better to use InputContext there
if has_buggy_even_splits:
# my even_split workaround doesn't work on subsplits, upgrade tfds!
if not isinstance(self.split_info, tfds.core.splits.SubSplitInfo):
subsplits = even_split_indices(self.split, self.global_num_workers, self.num_examples)
self.subsplit = subsplits[global_worker_id]
else:
subsplits = tfds.even_splits(self.split, self.global_num_workers)
self.subsplit = subsplits[global_worker_id]
input_context = None
if self.global_num_workers > 1 and self.subsplit is None:
# set input context to divide shards among distributed replicas
input_context = tf.distribute.InputContext(
num_input_pipelines=self.global_num_workers,
input_pipeline_id=global_worker_id,
num_replicas_in_sync=self.dist_num_replicas # FIXME does this arg have any impact?
)
read_config = tfds.ReadConfig(
shuffle_seed=self.common_seed,
shuffle_reshuffle_each_iteration=True,
input_context=input_context)
ds = self.builder.as_dataset(
split=self.subsplit or self.split, shuffle_files=self.is_training, read_config=read_config)
# avoid overloading threading w/ combo of TF ds threads + PyTorch workers
options = tf.data.Options()
thread_member = 'threading' if hasattr(options, 'threading') else 'experimental_threading'
getattr(options, thread_member).private_threadpool_size = max(1, self.max_threadpool_size // num_workers)
getattr(options, thread_member).max_intra_op_parallelism = 1
ds = ds.with_options(options)
if self.is_training or self.repeats > 1:
# to prevent excessive drop_last batch behaviour w/ IterableDatasets
# see warnings at https://pytorch.org/docs/stable/data.html#multi-process-data-loading
ds = ds.repeat() # allow wrap around and break iteration manually
if self.is_training:
ds = ds.shuffle(min(self.num_examples, self.shuffle_size) // self.global_num_workers, seed=self.worker_seed)
ds = ds.prefetch(min(self.num_examples // self.global_num_workers, self.prefetch_size))
self.ds = tfds.as_numpy(ds)
def __iter__(self):
if self.ds is None:
self._lazy_init()
# Compute a rounded up sample count that is used to:
# 1. make batches even cross workers & replicas in distributed validation.
# This adds extra examples and will slightly alter validation results.
# 2. determine loop ending condition in training w/ repeat enabled so that only full batch_size
# batches are produced (underlying tfds iter wraps around)
target_example_count = math.ceil(max(1, self.repeats) * self.num_examples / self.global_num_workers)
if self.is_training:
# round up to nearest batch_size per worker-replica
target_example_count = math.ceil(target_example_count / self.batch_size) * self.batch_size
# Iterate until exhausted or sample count hits target when training (ds.repeat enabled)
example_count = 0
for example in self.ds:
input_data = example[self.input_name]
if self.input_image:
input_data = Image.fromarray(input_data, mode=self.input_image)
target_data = example[self.target_name]
if self.target_image:
target_data = Image.fromarray(target_data, mode=self.target_image)
yield input_data, target_data
example_count += 1
if self.is_training and example_count >= target_example_count:
# Need to break out of loop when repeat() is enabled for training w/ oversampling
# this results in extra examples per epoch but seems more desirable than dropping
# up to N*J batches per epoch (where N = num distributed processes, and J = num worker processes)
break
# Pad across distributed nodes (make counts equal by adding examples)
if not self.is_training and self.dist_num_replicas > 1 and self.subsplit is not None and \
0 < example_count < target_example_count:
# Validation batch padding only done for distributed training where results are reduced across nodes.
# For single process case, it won't matter if workers return different batch sizes.
# If using input_context or % based splits, sample count can vary significantly across workers and this
# approach should not be used (hence disabled if self.subsplit isn't set).
while example_count < target_example_count:
yield input_data, target_data # yield prev sample again
example_count += 1
def __len__(self):
# this is just an estimate and does not factor in extra examples added to pad batches based on
# complete worker & replica info (not available until init in dataloader).
return math.ceil(max(1, self.repeats) * self.num_examples / self.dist_num_replicas)
def _filename(self, index, basename=False, absolute=False):
assert False, "Not supported" # no random access to examples
def filenames(self, basename=False, absolute=False):
""" Return all filenames in dataset, overrides base"""
if self.ds is None:
self._lazy_init()
names = []
for sample in self.ds:
if len(names) > self.num_examples:
break # safety for ds.repeat() case
if 'file_name' in sample:
name = sample['file_name']
elif 'filename' in sample:
name = sample['filename']
elif 'id' in sample:
name = sample['id']
else:
assert False, "No supported name field present"
names.append(name)
return names
| 15,819 | 52.087248 | 120 | py |
RandStainNA | RandStainNA-master/classification/tests/test_layers.py | import pytest
import torch
import torch.nn as nn
import platform
import os
from timm.models.layers import create_act_layer, get_act_layer, set_layer_config
class MLP(nn.Module):
def __init__(self, act_layer="relu", inplace=True):
super(MLP, self).__init__()
self.fc1 = nn.Linear(1000, 100)
self.act = create_act_layer(act_layer, inplace=inplace)
self.fc2 = nn.Linear(100, 10)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.fc2(x)
return x
def _run_act_layer_grad(act_type, inplace=True):
x = torch.rand(10, 1000) * 10
m = MLP(act_layer=act_type, inplace=inplace)
def _run(x, act_layer=''):
if act_layer:
# replace act layer if set
m.act = create_act_layer(act_layer, inplace=inplace)
out = m(x)
l = (out - 0).pow(2).sum()
return l
out_me = _run(x)
with set_layer_config(scriptable=True):
out_jit = _run(x, act_type)
assert torch.isclose(out_jit, out_me)
with set_layer_config(no_jit=True):
out_basic = _run(x, act_type)
assert torch.isclose(out_basic, out_jit)
def test_swish_grad():
for _ in range(100):
_run_act_layer_grad('swish')
def test_mish_grad():
for _ in range(100):
_run_act_layer_grad('mish')
def test_hard_sigmoid_grad():
for _ in range(100):
_run_act_layer_grad('hard_sigmoid', inplace=None)
def test_hard_swish_grad():
for _ in range(100):
_run_act_layer_grad('hard_swish')
def test_hard_mish_grad():
for _ in range(100):
_run_act_layer_grad('hard_mish')
| 1,644 | 21.847222 | 80 | py |
RandStainNA | RandStainNA-master/classification/tests/test_models.py | import pytest
import torch
import platform
import os
import fnmatch
try:
from torchvision.models.feature_extraction import create_feature_extractor, get_graph_node_names, NodePathTracer
has_fx_feature_extraction = True
except ImportError:
has_fx_feature_extraction = False
import timm
from timm import list_models, create_model, set_scriptable, has_model_default_key, is_model_default_key, \
get_model_default_value
from timm.models.fx_features import _leaf_modules, _autowrap_functions
if hasattr(torch._C, '_jit_set_profiling_executor'):
# legacy executor is too slow to compile large models for unit tests
# no need for the fusion performance here
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(False)
# transformer models don't support many of the spatial / feature based model functionalities
NON_STD_FILTERS = [
'vit_*', 'tnt_*', 'pit_*', 'swin_*', 'coat_*', 'cait_*', '*mixer_*', 'gmlp_*', 'resmlp_*', 'twins_*',
'convit_*', 'levit*', 'visformer*', 'deit*', 'jx_nest_*', 'nest_*', 'xcit_*', 'crossvit_*', 'beit_*']
NUM_NON_STD = len(NON_STD_FILTERS)
# exclude models that cause specific test failures
if 'GITHUB_ACTIONS' in os.environ:
# GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models
EXCLUDE_FILTERS = [
'*efficientnet_l2*', '*resnext101_32x48d', '*in21k', '*152x4_bitm', '*101x3_bitm', '*50x3_bitm',
'*nfnet_f3*', '*nfnet_f4*', '*nfnet_f5*', '*nfnet_f6*', '*nfnet_f7*', '*efficientnetv2_xl*',
'*resnetrs350*', '*resnetrs420*', 'xcit_large_24_p8*', 'vit_huge*', 'vit_gi*']
NON_STD_EXCLUDE_FILTERS = ['vit_huge*', 'vit_gi*']
else:
EXCLUDE_FILTERS = []
NON_STD_EXCLUDE_FILTERS = ['vit_gi*']
TARGET_FWD_SIZE = MAX_FWD_SIZE = 384
TARGET_BWD_SIZE = 128
MAX_BWD_SIZE = 320
MAX_FWD_OUT_SIZE = 448
TARGET_JIT_SIZE = 128
MAX_JIT_SIZE = 320
TARGET_FFEAT_SIZE = 96
MAX_FFEAT_SIZE = 256
TARGET_FWD_FX_SIZE = 128
MAX_FWD_FX_SIZE = 224
TARGET_BWD_FX_SIZE = 128
MAX_BWD_FX_SIZE = 224
def _get_input_size(model=None, model_name='', target=None):
if model is None:
assert model_name, "One of model or model_name must be provided"
input_size = get_model_default_value(model_name, 'input_size')
fixed_input_size = get_model_default_value(model_name, 'fixed_input_size')
min_input_size = get_model_default_value(model_name, 'min_input_size')
else:
default_cfg = model.default_cfg
input_size = default_cfg['input_size']
fixed_input_size = default_cfg.get('fixed_input_size', None)
min_input_size = default_cfg.get('min_input_size', None)
assert input_size is not None
if fixed_input_size:
return input_size
if min_input_size:
if target and max(input_size) > target:
input_size = min_input_size
else:
if target and max(input_size) > target:
input_size = tuple([min(x, target) for x in input_size])
return input_size
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward(model_name, batch_size):
"""Run a single forward pass with each model"""
model = create_model(model_name, pretrained=False)
model.eval()
input_size = _get_input_size(model=model, target=TARGET_FWD_SIZE)
if max(input_size) > MAX_FWD_SIZE:
pytest.skip("Fixed input size model > limit.")
inputs = torch.randn((batch_size, *input_size))
outputs = model(inputs)
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [2])
def test_model_backward(model_name, batch_size):
"""Run a single forward pass with each model"""
input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_SIZE)
if max(input_size) > MAX_BWD_SIZE:
pytest.skip("Fixed input size model > limit.")
model = create_model(model_name, pretrained=False, num_classes=42)
num_params = sum([x.numel() for x in model.parameters()])
model.train()
inputs = torch.randn((batch_size, *input_size))
outputs = model(inputs)
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
outputs.mean().backward()
for n, x in model.named_parameters():
assert x.grad is not None, f'No gradient for {n}'
num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None])
assert outputs.shape[-1] == 42
assert num_params == num_grad, 'Some parameters are missing gradients'
assert not torch.isnan(outputs).any(), 'Output included NaNs'
@pytest.mark.timeout(300)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=NON_STD_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_default_cfgs(model_name, batch_size):
"""Run a single forward pass with each model"""
model = create_model(model_name, pretrained=False)
model.eval()
state_dict = model.state_dict()
cfg = model.default_cfg
pool_size = cfg['pool_size']
input_size = model.default_cfg['input_size']
if all([x <= MAX_FWD_OUT_SIZE for x in input_size]) and \
not any([fnmatch.fnmatch(model_name, x) for x in EXCLUDE_FILTERS]):
# output sizes only checked if default res <= 448 * 448 to keep resource down
input_size = tuple([min(x, MAX_FWD_OUT_SIZE) for x in input_size])
input_tensor = torch.randn((batch_size, *input_size))
# test forward_features (always unpooled)
outputs = model.forward_features(input_tensor)
assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2]
# test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features
model.reset_classifier(0)
outputs = model.forward(input_tensor)
assert len(outputs.shape) == 2
assert outputs.shape[-1] == model.num_features
# test model forward without pooling and classifier
model.reset_classifier(0, '') # reset classifier and set global pooling to pass-through
outputs = model.forward(input_tensor)
assert len(outputs.shape) == 4
if not isinstance(model, timm.models.MobileNetV3) and not isinstance(model, timm.models.GhostNet):
# FIXME mobilenetv3/ghostnet forward_features vs removed pooling differ
assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2]
if 'pruned' not in model_name: # FIXME better pruned model handling
# test classifier + global pool deletion via __init__
model = create_model(model_name, pretrained=False, num_classes=0, global_pool='').eval()
outputs = model.forward(input_tensor)
assert len(outputs.shape) == 4
if not isinstance(model, timm.models.MobileNetV3) and not isinstance(model, timm.models.GhostNet):
# FIXME mobilenetv3/ghostnet forward_features vs removed pooling differ
assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2]
# check classifier name matches default_cfg
if cfg.get('num_classes', None):
classifier = cfg['classifier']
if not isinstance(classifier, (tuple, list)):
classifier = classifier,
for c in classifier:
assert c + ".weight" in state_dict.keys(), f'{c} not in model params'
# check first conv(s) names match default_cfg
first_conv = cfg['first_conv']
if isinstance(first_conv, str):
first_conv = (first_conv,)
assert isinstance(first_conv, (tuple, list))
for fc in first_conv:
assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params'
@pytest.mark.timeout(300)
@pytest.mark.parametrize('model_name', list_models(filter=NON_STD_FILTERS, exclude_filters=NON_STD_EXCLUDE_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_default_cfgs_non_std(model_name, batch_size):
"""Run a single forward pass with each model"""
model = create_model(model_name, pretrained=False)
model.eval()
state_dict = model.state_dict()
cfg = model.default_cfg
input_size = _get_input_size(model=model)
if max(input_size) > 320: # FIXME const
pytest.skip("Fixed input size model > limit.")
input_tensor = torch.randn((batch_size, *input_size))
outputs = model.forward_features(input_tensor)
if isinstance(outputs, (tuple, list)):
outputs = outputs[0]
assert outputs.shape[1] == model.num_features
# test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features
model.reset_classifier(0)
outputs = model.forward(input_tensor)
if isinstance(outputs, (tuple, list)):
outputs = outputs[0]
assert len(outputs.shape) == 2
assert outputs.shape[1] == model.num_features
model = create_model(model_name, pretrained=False, num_classes=0).eval()
outputs = model.forward(input_tensor)
if isinstance(outputs, (tuple, list)):
outputs = outputs[0]
assert len(outputs.shape) == 2
assert outputs.shape[1] == model.num_features
# check classifier name matches default_cfg
if cfg.get('num_classes', None):
classifier = cfg['classifier']
if not isinstance(classifier, (tuple, list)):
classifier = classifier,
for c in classifier:
assert c + ".weight" in state_dict.keys(), f'{c} not in model params'
# check first conv(s) names match default_cfg
first_conv = cfg['first_conv']
if isinstance(first_conv, str):
first_conv = (first_conv,)
assert isinstance(first_conv, (tuple, list))
for fc in first_conv:
assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params'
if 'GITHUB_ACTIONS' not in os.environ:
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(pretrained=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_load_pretrained(model_name, batch_size):
"""Create that pretrained weights load, verify support for in_chans != 3 while doing so."""
in_chans = 3 if 'pruned' in model_name else 1 # pruning not currently supported with in_chans change
create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=5)
create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=0)
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(pretrained=True, exclude_filters=NON_STD_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_features_pretrained(model_name, batch_size):
"""Create that pretrained weights load when features_only==True."""
create_model(model_name, pretrained=True, features_only=True)
EXCLUDE_JIT_FILTERS = [
'*iabn*', 'tresnet*', # models using inplace abn unlikely to ever be scriptable
'dla*', 'hrnet*', 'ghostnet*', # hopefully fix at some point
'vit_large_*', 'vit_huge_*', 'vit_gi*',
]
@pytest.mark.timeout(120)
@pytest.mark.parametrize(
'model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_torchscript(model_name, batch_size):
"""Run a single forward pass with each model"""
input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE)
if max(input_size) > MAX_JIT_SIZE:
pytest.skip("Fixed input size model > limit.")
with set_scriptable(True):
model = create_model(model_name, pretrained=False)
model.eval()
model = torch.jit.script(model)
outputs = model(torch.randn((batch_size, *input_size)))
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
EXCLUDE_FEAT_FILTERS = [
'*pruned*', # hopefully fix at some point
] + NON_STD_FILTERS
if 'GITHUB_ACTIONS' in os.environ: # and 'Linux' in platform.system():
# GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models
EXCLUDE_FEAT_FILTERS += ['*resnext101_32x32d', '*resnext101_32x16d']
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FEAT_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_features(model_name, batch_size):
"""Run a single forward pass with each model in feature extraction mode"""
model = create_model(model_name, pretrained=False, features_only=True)
model.eval()
expected_channels = model.feature_info.channels()
assert len(expected_channels) >= 4 # all models here should have at least 4 feature levels by default, some 5 or 6
input_size = _get_input_size(model=model, target=TARGET_FFEAT_SIZE)
if max(input_size) > MAX_FFEAT_SIZE:
pytest.skip("Fixed input size model > limit.")
outputs = model(torch.randn((batch_size, *input_size)))
assert len(expected_channels) == len(outputs)
for e, o in zip(expected_channels, outputs):
assert e == o.shape[1]
assert o.shape[0] == batch_size
assert not torch.isnan(o).any()
def _create_fx_model(model, train=False):
# This block of code does a bit of juggling to handle any case where there are multiple outputs in train mode
# So we trace once and look at the graph, and get the indices of the nodes that lead into the original fx output
# node. Then we use those indices to select from train_nodes returned by torchvision get_graph_node_names
train_nodes, eval_nodes = get_graph_node_names(
model, tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)})
eval_return_nodes = [eval_nodes[-1]]
train_return_nodes = [train_nodes[-1]]
if train:
tracer = NodePathTracer(leaf_modules=list(_leaf_modules), autowrap_functions=list(_autowrap_functions))
graph = tracer.trace(model)
graph_nodes = list(reversed(graph.nodes))
output_node_names = [n.name for n in graph_nodes[0]._input_nodes.keys()]
graph_node_names = [n.name for n in graph_nodes]
output_node_indices = [-graph_node_names.index(node_name) for node_name in output_node_names]
train_return_nodes = [train_nodes[ix] for ix in output_node_indices]
fx_model = create_feature_extractor(
model, train_return_nodes=train_return_nodes, eval_return_nodes=eval_return_nodes,
tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)})
return fx_model
EXCLUDE_FX_FILTERS = ['vit_gi*']
# not enough memory to run fx on more models than other tests
if 'GITHUB_ACTIONS' in os.environ:
EXCLUDE_FX_FILTERS += [
'beit_large*',
'mixer_l*',
'*nfnet_f2*',
'*resnext101_32x32d',
'resnetv2_152x2*',
'resmlp_big*',
'resnetrs270',
'swin_large*',
'vgg*',
'vit_large*',
'vit_base_patch8*',
'xcit_large*',
'*evob', '*evos', # until norm_norm_norm branch is merged
]
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_fx(model_name, batch_size):
"""
Symbolically trace each model and run single forward pass through the resulting GraphModule
Also check that the output of a forward pass through the GraphModule is the same as that from the original Module
"""
if not has_fx_feature_extraction:
pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.")
model = create_model(model_name, pretrained=False)
model.eval()
input_size = _get_input_size(model=model, target=TARGET_FWD_FX_SIZE)
if max(input_size) > MAX_FWD_FX_SIZE:
pytest.skip("Fixed input size model > limit.")
with torch.no_grad():
inputs = torch.randn((batch_size, *input_size))
outputs = model(inputs)
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
model = _create_fx_model(model)
fx_outputs = tuple(model(inputs).values())
if isinstance(fx_outputs, tuple):
fx_outputs = torch.cat(fx_outputs)
assert torch.all(fx_outputs == outputs)
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
if 'GITHUB_ACTIONS' not in os.environ:
# FIXME this test is causing GitHub actions to run out of RAM and abruptly kill the test process
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(
exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [2])
def test_model_backward_fx(model_name, batch_size):
"""Symbolically trace each model and run single backward pass through the resulting GraphModule"""
if not has_fx_feature_extraction:
pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.")
input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_FX_SIZE)
if max(input_size) > MAX_BWD_FX_SIZE:
pytest.skip("Fixed input size model > limit.")
model = create_model(model_name, pretrained=False, num_classes=42)
model.train()
num_params = sum([x.numel() for x in model.parameters()])
if 'GITHUB_ACTIONS' in os.environ and num_params > 100e6:
pytest.skip("Skipping FX backward test on model with more than 100M params.")
model = _create_fx_model(model, train=True)
outputs = tuple(model(torch.randn((batch_size, *input_size))).values())
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
outputs.mean().backward()
for n, x in model.named_parameters():
assert x.grad is not None, f'No gradient for {n}'
num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None])
assert outputs.shape[-1] == 42
assert num_params == num_grad, 'Some parameters are missing gradients'
assert not torch.isnan(outputs).any(), 'Output included NaNs'
# reason: model is scripted after fx tracing, but beit has torch.jit.is_scripting() control flow
EXCLUDE_FX_JIT_FILTERS = [
'deit_*_distilled_patch16_224',
'levit*',
'pit_*_distilled_224',
] + EXCLUDE_FX_FILTERS
@pytest.mark.timeout(120)
@pytest.mark.parametrize(
'model_name', list_models(
exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS + EXCLUDE_FX_JIT_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_fx_torchscript(model_name, batch_size):
"""Symbolically trace each model, script it, and run single forward pass"""
if not has_fx_feature_extraction:
pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.")
input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE)
if max(input_size) > MAX_JIT_SIZE:
pytest.skip("Fixed input size model > limit.")
with set_scriptable(True):
model = create_model(model_name, pretrained=False)
model.eval()
model = torch.jit.script(_create_fx_model(model))
with torch.no_grad():
outputs = tuple(model(torch.randn((batch_size, *input_size))).values())
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
| 20,064 | 42.243534 | 119 | py |
RandStainNA | RandStainNA-master/classification/tests/test_utils.py | from torch.nn.modules.batchnorm import BatchNorm2d
from torchvision.ops.misc import FrozenBatchNorm2d
import timm
from timm.utils.model import freeze, unfreeze
def test_freeze_unfreeze():
model = timm.create_model('resnet18')
# Freeze all
freeze(model)
# Check top level module
assert model.fc.weight.requires_grad == False
# Check submodule
assert model.layer1[0].conv1.weight.requires_grad == False
# Check BN
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
# Unfreeze all
unfreeze(model)
# Check top level module
assert model.fc.weight.requires_grad == True
# Check submodule
assert model.layer1[0].conv1.weight.requires_grad == True
# Check BN
assert isinstance(model.layer1[0].bn1, BatchNorm2d)
# Freeze some
freeze(model, ['layer1', 'layer2.0'])
# Check frozen
assert model.layer1[0].conv1.weight.requires_grad == False
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
assert model.layer2[0].conv1.weight.requires_grad == False
# Check not frozen
assert model.layer3[0].conv1.weight.requires_grad == True
assert isinstance(model.layer3[0].bn1, BatchNorm2d)
assert model.layer2[1].conv1.weight.requires_grad == True
# Unfreeze some
unfreeze(model, ['layer1', 'layer2.0'])
# Check not frozen
assert model.layer1[0].conv1.weight.requires_grad == True
assert isinstance(model.layer1[0].bn1, BatchNorm2d)
assert model.layer2[0].conv1.weight.requires_grad == True
# Freeze/unfreeze BN
# From root
freeze(model, ['layer1.0.bn1'])
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
unfreeze(model, ['layer1.0.bn1'])
assert isinstance(model.layer1[0].bn1, BatchNorm2d)
# From direct parent
freeze(model.layer1[0], ['bn1'])
assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d)
unfreeze(model.layer1[0], ['bn1'])
assert isinstance(model.layer1[0].bn1, BatchNorm2d) | 1,978 | 33.719298 | 65 | py |
RandStainNA | RandStainNA-master/classification/tests/test_optim.py | """ Optimzier Tests
These tests were adapted from PyTorch' optimizer tests.
"""
import math
import pytest
import functools
from copy import deepcopy
import torch
from torch.testing._internal.common_utils import TestCase
from torch.autograd import Variable
from timm.scheduler import PlateauLRScheduler
from timm.optim import create_optimizer_v2
# HACK relying on internal PyTorch test functionality for comparisons that I don't want to write
torch_tc = TestCase()
def _test_basic_cases_template(weight, bias, input, constructor, scheduler_constructors):
weight = Variable(weight, requires_grad=True)
bias = Variable(bias, requires_grad=True)
input = Variable(input)
optimizer = constructor(weight, bias)
schedulers = []
for scheduler_constructor in scheduler_constructors:
schedulers.append(scheduler_constructor(optimizer))
# to check if the optimizer can be printed as a string
optimizer.__repr__()
def fn():
optimizer.zero_grad()
y = weight.mv(input)
if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device():
y = y.cuda(bias.get_device())
loss = (y + bias).pow(2).sum()
loss.backward()
return loss
initial_value = fn().item()
for _i in range(200):
for scheduler in schedulers:
if isinstance(scheduler, PlateauLRScheduler):
val_loss = fn()
scheduler.step(val_loss)
else:
scheduler.step()
optimizer.step(fn)
assert fn().item() < initial_value
def _test_state_dict(weight, bias, input, constructor):
weight = Variable(weight, requires_grad=True)
bias = Variable(bias, requires_grad=True)
input = Variable(input)
def fn_base(optimizer, weight, bias):
optimizer.zero_grad()
i = input_cuda if weight.is_cuda else input
loss = (weight.mv(i) + bias).pow(2).sum()
loss.backward()
return loss
optimizer = constructor(weight, bias)
fn = functools.partial(fn_base, optimizer, weight, bias)
# Prime the optimizer
for _i in range(20):
optimizer.step(fn)
# Clone the weights and construct new optimizer for them
weight_c = Variable(weight.data.clone(), requires_grad=True)
bias_c = Variable(bias.data.clone(), requires_grad=True)
optimizer_c = constructor(weight_c, bias_c)
fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c)
# Load state dict
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_c.load_state_dict(state_dict_c)
# Run both optimizations in parallel
for _i in range(20):
optimizer.step(fn)
optimizer_c.step(fn_c)
#assert torch.equal(weight, weight_c)
#assert torch.equal(bias, bias_c)
torch_tc.assertEqual(weight, weight_c)
torch_tc.assertEqual(bias, bias_c)
# Make sure state dict wasn't modified
torch_tc.assertEqual(state_dict, state_dict_c)
# Make sure state dict is deterministic with equal but not identical parameters
torch_tc.assertEqual(optimizer.state_dict(), optimizer_c.state_dict())
# Make sure repeated parameters have identical representation in state dict
optimizer_c.param_groups.extend(optimizer_c.param_groups)
torch_tc.assertEqual(optimizer.state_dict()['param_groups'][-1], optimizer_c.state_dict()['param_groups'][-1])
# Check that state dict can be loaded even when we cast parameters
# to a different type and move to a different device.
if not torch.cuda.is_available():
return
input_cuda = Variable(input.data.float().cuda())
weight_cuda = Variable(weight.data.float().cuda(), requires_grad=True)
bias_cuda = Variable(bias.data.float().cuda(), requires_grad=True)
optimizer_cuda = constructor(weight_cuda, bias_cuda)
fn_cuda = functools.partial(fn_base, optimizer_cuda, weight_cuda, bias_cuda)
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_cuda.load_state_dict(state_dict_c)
# Make sure state dict wasn't modified
torch_tc.assertEqual(state_dict, state_dict_c)
for _i in range(20):
optimizer.step(fn)
optimizer_cuda.step(fn_cuda)
torch_tc.assertEqual(weight, weight_cuda)
torch_tc.assertEqual(bias, bias_cuda)
# validate deepcopy() copies all public attributes
def getPublicAttr(obj):
return set(k for k in obj.__dict__ if not k.startswith('_'))
assert getPublicAttr(optimizer) == getPublicAttr(deepcopy(optimizer))
def _test_basic_cases(constructor, scheduler_constructors=None):
if scheduler_constructors is None:
scheduler_constructors = []
_test_state_dict(
torch.randn(10, 5),
torch.randn(10),
torch.randn(5),
constructor
)
_test_basic_cases_template(
torch.randn(10, 5),
torch.randn(10),
torch.randn(5),
constructor,
scheduler_constructors
)
# non-contiguous parameters
_test_basic_cases_template(
torch.randn(10, 5, 2)[..., 0],
torch.randn(10, 2)[..., 0],
torch.randn(5),
constructor,
scheduler_constructors
)
# CUDA
if not torch.cuda.is_available():
return
_test_basic_cases_template(
torch.randn(10, 5).cuda(),
torch.randn(10).cuda(),
torch.randn(5).cuda(),
constructor,
scheduler_constructors
)
def _test_model(optimizer, params, device=torch.device('cpu')):
weight = torch.tensor(
[[-0.2109, -0.4976], [-0.1413, -0.3420], [-0.2524, 0.6976]],
device=device, requires_grad=True)
bias = torch.tensor([-0.1085, -0.2979, 0.6892], device=device, requires_grad=True)
weight2 = torch.tensor([[-0.0508, -0.3941, -0.2843]], device=device, requires_grad=True)
bias2 = torch.tensor([-0.0711], device=device, requires_grad=True)
input = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], device=device).reshape(3, 2)
model = torch.nn.Sequential(torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid())
model.to(device)
pretrained_dict = model.state_dict()
pretrained_dict['0.weight'] = weight
pretrained_dict['0.bias'] = bias
pretrained_dict['2.weight'] = weight2
pretrained_dict['2.bias'] = bias2
model.load_state_dict(pretrained_dict)
optimizer = create_optimizer_v2(model, opt=optimizer, **params)
prev_loss = float('inf')
for i in range(20):
optimizer.zero_grad()
output = model(input)
loss = output.sum()
loss.backward()
loss = loss.item()
assert loss < prev_loss
prev_loss = loss
optimizer.step()
def rosenbrock(tensor):
x, y = tensor
return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
def drosenbrock(tensor):
x, y = tensor
return torch.tensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2)))
def _test_rosenbrock(constructor, scheduler_constructors=None):
if scheduler_constructors is None:
scheduler_constructors = []
params_t = torch.tensor([1.5, 1.5])
params = Variable(params_t, requires_grad=True)
optimizer = constructor([params])
schedulers = []
for scheduler_constructor in scheduler_constructors:
schedulers.append(scheduler_constructor(optimizer))
solution = torch.tensor([1, 1])
initial_dist = params.data.dist(solution)
def eval(params, w):
# Depending on w, provide only the x or y gradient
optimizer.zero_grad()
loss = rosenbrock(params)
loss.backward()
grad = drosenbrock(params.data)
# NB: We torture test the optimizer by returning an
# uncoalesced sparse tensor
if w:
i = torch.LongTensor([[0, 0]])
x = grad[0]
v = torch.tensor([x / 4., x - x / 4.])
else:
i = torch.LongTensor([[1, 1]])
y = grad[1]
v = torch.tensor([y - y / 4., y / 4.])
x = torch.sparse.DoubleTensor(i, v, torch.Size([2])).to(dtype=v.dtype)
with torch.no_grad():
params.grad = x.to_dense()
return loss
for i in range(2000):
# Do cyclic coordinate descent
w = i % 2
optimizer.step(functools.partial(eval, params, w))
for scheduler in schedulers:
if isinstance(scheduler, PlateauLRScheduler):
scheduler.step(rosenbrock(params))
else:
scheduler.step()
torch_tc.assertLessEqual(params.data.dist(solution), initial_dist)
def _build_params_dict(weight, bias, **kwargs):
return [{'params': [weight]}, dict(params=[bias], **kwargs)]
def _build_params_dict_single(weight, bias, **kwargs):
return [dict(params=bias, **kwargs)]
#@pytest.mark.parametrize('optimizer', ['sgd', 'momentum'])
# FIXME momentum variant frequently fails in GitHub runner, but never local after many attempts
@pytest.mark.parametrize('optimizer', ['sgd'])
def test_sgd(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=1e-2),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-2),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-2), optimizer)
)
# _test_basic_cases(
# lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.9, step_size=10)]
# )
# _test_basic_cases(
# lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3),
# [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="linear")]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="constant")]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.9, step_size=10),
# lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4)]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.9, step_size=10),
# lambda opt: ReduceLROnPlateau(opt)]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.99, step_size=10),
# lambda opt: ExponentialLR(opt, gamma=0.99),
# lambda opt: ReduceLROnPlateau(opt)]
# )
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1, weight_decay=.1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['adamw', 'adam', 'nadam', 'adamax'])
def test_adam(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['adabelief'])
def test_adabelief(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['radam', 'radabelief'])
def test_rectified(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['adadelta', 'adagrad'])
def test_adaother(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-1)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['adafactor'])
def test_adafactor(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(_build_params_dict_single(weight, bias), optimizer)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['lamb', 'lambc'])
def test_lamb(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['lars', 'larc', 'nlars', 'nlarc'])
def test_lars(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['madgrad', 'madgradw'])
def test_madgrad(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-2)
)
_test_model(optimizer, dict(lr=1e-2))
@pytest.mark.parametrize('optimizer', ['novograd'])
def test_novograd(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['rmsprop', 'rmsproptf'])
def test_rmsprop(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-2)
)
_test_model(optimizer, dict(lr=1e-2))
@pytest.mark.parametrize('optimizer', ['adamp'])
def test_adamp(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['sgdp'])
def test_sgdp(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['lookahead_sgd', 'lookahead_momentum'])
def test_lookahead_sgd(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
@pytest.mark.parametrize('optimizer', ['lookahead_adamw', 'lookahead_adam'])
def test_lookahead_adam(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
@pytest.mark.parametrize('optimizer', ['lookahead_radam'])
def test_lookahead_radam(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-4)
)
| 24,464 | 32.331063 | 114 | py |
RandStainNA | RandStainNA-master/segmentation/CA25.py | from email.mime import base
from CA25net import *
import scipy.io
import numpy as np
import time
import torch
# from shutil import copyfile
# device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
device = 'cpu'
print(device)
base_dir = '/root/autodl-tmp/pycharm_project_CA2.5'
train_path = '/root/autodl-tmp/MoNuSeg2018/standard/train'
test_path = '/root/autodl-tmp/MoNuSeg2018/standard/test'
epoch = 50
batch_size = 1
deline_lr_epoch = 3
show_epoch = 1
weight_of_mask = 0.65
lr = 1e-4
# lr = 5e-5
gamma = 0.95
w_dir = '{}/weights'.format(base_dir)
if not os.path.exists(w_dir):
os.makedirs(w_dir)
r_dir = '{}/results'.format(base_dir)
if not os.path.exists(r_dir):
os.makedirs(r_dir)
print(base_dir)
# copyfile('CA25.py','{}/CA25.py'.format(base_dir))
f = open('{}/log.txt'.format(base_dir), 'w')
training_data = XuDataset(train_path)
train_loader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
test_data = XuDataset(test_path)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
model = Cia().to(device)
# checkpoint = torch.load('weights/ep61_loss0.7928918600082397.ptf')
# checkpoint = torch.load('abweights/CA25_initial.ptf')
# model.load_state_dict(checkpoint['model_state_dict'])
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
# Mask acc : 0.9201083183288574 -- Boundary acc : 0.5252535939216614
#torch.save({'model_state_dict': model.state_dict()}, 'abweights/CA25_initial.ptf')
print('Begin training.')
start = time.time()
loss_f, macc_f, bacc_f, cacc_f = [], [], [], []
for ep in range(epoch):
w = weight_of_mask
#0.75 - np.exp(2*ep/epoch)/(2*np.exp(2))
# if ep < 70:
# w = 0.8
# else:
# w = 0.2
for bt, data in enumerate(train_loader):
model.train()
img, label, bound = data
img = img.cuda()
label = label.cuda()
bound = bound.cuda()
mout, bout = model(img)
#print(bout.shape)
#loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound, ep/(2*epoch)+1/4)
# loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound, 0.3) #w是控制黏连边权重
loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound, 0) #w是控制黏连边权重,置零0后CA2.0
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ep % deline_lr_epoch == 0:
lr = lr * gamma
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
if ep % show_epoch == 0:
acc_all, bacc_all, cacc_all, loss_all = [], [], [], []
with torch.no_grad():
for verify in test_loader:
img, label, bound = verify
img = img.cuda()
label = label.cuda()
bound = bound.cuda()
model.eval()
mout, bout = model(img)
# loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound, 0.5)
loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound, 0) #w是控制黏连边权重,置零0后CA2.0
loss_all.append(loss.cpu().numpy())
acc = dice_acc(mout[0][0], label)
acc_all.append(acc)
acc = dice_acc(bout[0][0], bound>0)
bacc_all.append(acc)
acc = dice_acc(bout[0][1], bound>1)
cacc_all.append(acc)
acc_all = np.array(acc_all)
loss_all = np.array(loss_all)
bacc_all = np.array(bacc_all)
cacc_all = np.array(cacc_all)
message = 'epoch num : {} -- Loss: {} -- Mask acc : {} -- Boundary acc : {} -- Clustered edge acc : {} \n'.format(ep + 1, loss_all.mean(), acc_all.mean(), bacc_all.mean(), cacc_all.mean())
print(message)
f.write(message)
f.flush()
loss_f.append(loss_all.mean())
macc_f.append(acc_all.mean())
bacc_f.append(bacc_all.mean())
cacc_f.append(cacc_all.mean())
# if ep > 9 and ep % 10 == 0 :
#lr = lr * 0.6
if ep % (epoch-1) == 0:
torch.save({'model_state_dict': model.state_dict()}, '{}/ep{}_loss{}.ptf'.format(w_dir,ep+1,bacc_all.mean()))
macc_f = np.array(macc_f)
loss_f = np.array(loss_f)
bacc_f = np.array(bacc_f)
cacc_f = np.array(cacc_f)
mdic = {"macc":macc_f, "loss":loss_f,"bacc":bacc_f, "cacc":cacc_f}
scipy.io.savemat("{}/cl_train.mat".format(r_dir), mdic)
torch.save({'model_state_dict': model.state_dict()}, '{}/CA25_n.ptf'.format(w_dir))
end = time.time()
print('Total training time is {}h'.format((end-start)/3600))
print('Finished Training')
f.write('Total training time is {}h\n'.format((end-start)/3600))
f.close()
# %%
# loss_all,acc_all = [],[]
# with torch.no_grad():
# for verify in train_loader:
# img, label = verify
# img = img.cuda()
# label = label.cuda()
# model.eval()
# out = model(img)
# loss = my_loss(out, label)
# acc = my_acc(out, label)
# acc_all.append(acc)
# loss_all.append(loss.cpu().numpy())
# acc_all = np.array(acc_all)
# loss_all = np.array(loss_all)
# print('Loss : {} -- Acc : {} -- Max Acc : {} -- Min Acc : {}'.format(loss_all.mean(), acc_all.mean(), acc_all.max(), acc_all.min()))
# mdic = {"loss_mean":loss_noCL, "loss_max":loss_max,"loss_std":loss_std, "acc_test":acc_all}
# scipy.io.savemat("result/noCL_results.mat", mdic)
# torch.save({
# 'epoch': ep,
# 'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'loss': loss,
# }, 'Unet_noCL.ptf') | 5,784 | 31.683616 | 201 | py |
RandStainNA | RandStainNA-master/segmentation/CIA.py | from CIAnet import *
import scipy.io
import numpy as np
import time
import os
from PIL import Image
from torchvision import transforms
from torch.utils.data import DataLoader,TensorDataset
from tqdm import tqdm
import yaml
import random
# from Dataset import ciaData
from transform import color_norm_jitter, HEDJitter, LABJitter, LABJitter_hsv, Dynamic_P_class, ConcatDataset #2.26添加
import argparse
import logging #2.25添加
import copy #2.26添加
import json #2.25添加
_logger = logging.getLogger('train')
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset parameters
parser.add_argument('--dataset', metavar='DIR',
help='path to dataset')
parser.add_argument('--output', type=str, metavar='DIR',
help='path to output dir')
# Model parameters
parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL',
help='Name of model to train (default: "resnet50"')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.05)')
parser.add_argument('--epochs', type=int, default=50, metavar='N',
help='number of epochs to train (default: 50)')
parser.add_argument('-b', '--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
############## cj
# 2.9加入,cj的p控制
parser.add_argument('--color-jitter', nargs='+', type=float, default=None,
help='Color jitter factor Brigtness-Contrast-S-Hue(default: [0, 0, 0, 0])')
# 12.26加入,控制HEDJitter
parser.add_argument('--hed-jitter', type=float, default=None,
help='HED-jitter factory(default: 0)')
parser.add_argument('--lab-jitter', nargs='+', type=float, default=None,
help='LAB-jitter factory(default: None)')
parser.add_argument('--cj-p', type=float, default=1.0, metavar='PCT',
help='color jitter possibility (default: 1, range: 0-1)')
############# 12.20加入,norm&jitter参数配置 ##########
parser.add_argument('--nj-config', type=str, default=None, metavar='PCT',
help='norm&jitter yaml config path (default: '')')
parser.add_argument('--nj-stdhyper', type=float, default=0.0, metavar='PCT',
help='norm&jitter std hyper (default: 0)')
parser.add_argument('--nj-distribution', type=str, default=None, metavar='PCT',
help='norm&jitter distribution (default: '')')
parser.add_argument('--nj-p', type=float, default=1.0, metavar='PCT', #2.9加入,nj的p控制
help='norm&jitter possibility (default: 1, range: 0-1)')
############# 2.9加入,norm&jitter强度控制 ##########
parser.add_argument('--nj-dynamic',action='store_true', default=False,
help='Enable norm-jitter dynamic-p (default: False)')
parser.add_argument('--dynamic-factor', type=float, default=1.0,
help='norm-jitter dynamic-p factor(default: 1)')
parser.add_argument('--nj-dynamicStd',action='store_true', default=False,
help='Enable norm-jitter dynamic-std (default: False)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
#12.20增加,获取yaml文件的参数
def get_yaml_data(yaml_file):
# 打开yaml文件
# _logger.info(yaml_file)
file = open(yaml_file, 'r', encoding="utf-8")
file_data = file.read()
file.close()
# 将字符串转化为字典或列表
# print("***转化yaml数据为字典或列表***")
data = yaml.load(file_data, Loader=yaml.FullLoader)
return data
if __name__ == '__main__':
args, args_text = _parse_args()
# 12.16修改,加入各个随机种子
# random_seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available(): torch.cuda.manual_seed(args.seed)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print(device)
# epoch, batch = 50, 2
epoch, batch = args.epochs, args.batch_size
train_path = os.path.join(args.dataset,'train') #'/root/autodl-tmp/MoNuSeg2018/standard/train'
val_path = os.path.join(args.dataset,'val')
test_path = os.path.join(args.dataset,'test') #'/root/autodl-tmp/MoNuSeg2018/standard/test'
base_dir = '/root/autodl-tmp/pycharm_project_CA2.5'
w_dir = '{}/weights'.format(base_dir)
o_dir = '{}/outputs/{}'.format(base_dir, args.output)
if not os.path.exists(w_dir):
os.makedirs(w_dir)
if not os.path.exists(o_dir):
os.makedirs(o_dir)
if not os.path.exists(os.path.join(o_dir,'test')):
os.makedirs(os.path.join(o_dir,'test'))
if not os.path.exists(os.path.join(o_dir,'val')):
os.makedirs(os.path.join(o_dir,'val'))
'''
x = torch.from_numpy(x).float()
x = x / 255 # normalization
x = x.unsqueeze(1)
y = torch.from_numpy(y).to(torch.long)
z = torch.from_numpy(z).to(torch.long)
'''
#1.20添加,将输出保存在一个log里
_logger.setLevel(level = logging.INFO)
handler = logging.FileHandler("{}/output_info.log".format(o_dir))
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
_logger.addHandler(handler)
# train_size = 250
transform_list = []
if args.nj_config is not None:
nj_config = get_yaml_data(args.nj_config)
nj_stdhyper = args.nj_stdhyper
nj_distribution = args.nj_distribution #1.30添加,手工指定6个采样的分布
nj_p = args.nj_p #2.9添加,调整nj的概率
nj_config['std_hyper'] = nj_stdhyper
nj_config['distribution'] = nj_distribution
nj_config['p'] = nj_p
norm_jitter = nj_config
# LAB / HED
# 根据config文件的空间来决定是哪种
# 12.26好像有问题,之前不知道顺序,很可怕
if norm_jitter['methods'] == 'Reinhard':
# 1.10修改,nj方法的lab和hsv进行统一
# 1.30修改,lab,hsv和hed方法统一,根据color_space确定
if norm_jitter['color_space'] == 'LAB' or norm_jitter['color_space'] == 'HSV' or norm_jitter['color_space'] == 'HED':
color_space = norm_jitter['color_space'] #获取颜色空间名称
#1.30修改,avg和std已经自带分布,在transform里面修改即可
mean_dataset = [norm_jitter[color_space[0]]['avg'],norm_jitter[color_space[1]]['avg'],norm_jitter[color_space[2]]['avg']]
std_dataset = [norm_jitter[color_space[0]]['std'],norm_jitter[color_space[1]]['std'],norm_jitter[color_space[2]]['std']]
std_hyper = norm_jitter['std_hyper']
distribution = norm_jitter['distribution'] #1.30添加,手工指定分布
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
transform_list += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space=color_space, distribution=distribution)]
elif norm_jitter['color_space'] == 'Random': #1.10增加,混合多种方法,等概率随机进行选取
distribution = norm_jitter['distribution'] #1.30添加,手工指定分布
if 'L' in list(norm_jitter.keys()): #2.8修改,测试HED,lab,hsv三者的排列组合
mean_dataset = [norm_jitter['L']['avg'],norm_jitter['A']['avg'],norm_jitter['B']['avg']]
std_dataset = [norm_jitter['L']['std'],norm_jitter['A']['std'],norm_jitter['B']['std']]
std_hyper = norm_jitter['std_hyper']
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
transform_list += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space='LAB',distribution=distribution)]
if 'E' in list(norm_jitter.keys()): #2.8修改,测试HED,lab,hsv三者的排列组合
mean_dataset = [norm_jitter['H']['avg'],norm_jitter['E']['avg'],norm_jitter['D']['avg']]
std_dataset = [norm_jitter['H']['std'],norm_jitter['E']['std'],norm_jitter['D']['std']]
std_hyper = norm_jitter['std_hyper']
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
# special_tfl += [hed_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=1)]
# 1.30修改,nj方法统一lab和hed,所以统一用一个即可
transform_list += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space='HED',distribution=distribution)]
# 2.6修改,增加hsv来random
if 'h' in list(norm_jitter.keys()): #2.8修改,测试HED,lab,hsv三者的排列组合
mean_dataset = [norm_jitter['h']['avg'],norm_jitter['S']['avg'],norm_jitter['V']['avg']]
std_dataset = [norm_jitter['h']['std'],norm_jitter['S']['std'],norm_jitter['V']['std']]
std_hyper = norm_jitter['std_hyper']
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
transform_list += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space='HSV',distribution=distribution)]
###### baseline ###########
if args.color_jitter is not None:
brightness = args.color_jitter[0]
contrast = args.color_jitter[1]
saturation = args.color_jitter[2]
hue = args.color_jitter[3]
transform_list+=[transforms.RandomApply([transforms.ColorJitter(brightness, contrast, saturation, hue)],p=args.cj_p)]
if args.hed_jitter is not None:
transform_list+=[transforms.RandomApply([HEDJitter(args.hed_jitter)],p=args.cj_p)]
if args.lab_jitter is not None:
if len(args.lab_jitter) == 1:
transform_list+=[transforms.RandomApply([LABJitter(args.lab_jitter[0])],p=args.cj_p)]
else:
l_factor = args.lab_jitter[0]
a_factor = args.lab_jitter[1]
b_factor = args.lab_jitter[2]
transform_list+=[transforms.RandomApply([LABJitter_hsv(l_factor,a_factor,b_factor)],p=args.cj_p)]
transform_list += [transforms.ToTensor()]
mean_ = (0.485, 0.456, 0.406)
std_ = (0.229, 0.224, 0.225)
transform_list += [transforms.Normalize(
mean=torch.tensor(mean_),
std=torch.tensor(std_))
]
transform_train = transforms.Compose(transform_list)
###### test #######
transform_test = transforms.Compose([
# transforms.Resize((512, 512)),
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean_),
std=torch.tensor(std_)
)
])
print('train_transform:\n',transform_train)
print('test_transform:\n',transform_test)
train_dataset = ciaData(train_path,transform=transform_train)
if args.nj_dynamic: #2.26加入dynamic
_logger.info('nj_dynamic!!')
train_dataset_copy = copy.deepcopy(train_dataset)
train_dataset_copy.transform=transform_test
train_dataset = ConcatDataset(train_dataset, train_dataset_copy)
if not os.path.exists(val_path):
val_dataset = ciaData(train_path,transform=transform_test)
else:
val_dataset = ciaData(val_path,transform=transform_test)
test_dataset = ciaData(test_path,transform=transform_test)
train_loader = DataLoader(train_dataset, batch_size=batch, shuffle=True, num_workers=15, pin_memory=True)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=15, pin_memory=True)
# verify_loader = DataLoader(deal_dataset, batch_size=batch, shuffle=False)
#deal_dataset = TensorDataset(x[100:110], y[100:110], z[100:110])
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=15, pin_memory=True)
model = CIAnet(growthRate=6, nDenseBlocks=[6,12,24,16], reduction=0.5, bottleneck=True).to(device)
#checkpoint = torch.load('weights/CIA1.ptf')
#model.load_state_dict(checkpoint['model_state_dict'])
lr = args.lr #1e-5 #1e-5
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95, last_epoch=-1)
# lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=[20],gamma = 0.1)
# Mask acc : 0.9201083183288574 -- Boundary acc : 0.5252535939216614
# torch.save({'model_state_dict': model.state_dict()}, 'weights/CIA_initial.ptf')
print('Begin training.')
_logger.info('Begin training.')
start = time.time()
# loss_f, macc_f, bacc_f = [], [], []
best_dict = {}
best_dict['Dice'] = 0
best_dict['Iou'] = 0
Dynamic_P = Dynamic_P_class(epochs=args.epochs, batch_per_epoch=int(len(train_dataset)/args.batch_size)+1, dynamic_factor=args.dynamic_factor) #2.9新加,传递函数为引用类型即可
for ep in range(epoch):
ep += 1
save_idx = 0 # 每个epoch在test时保存一张图片
# w = 0.75 - np.exp(2*ep/epoch)/(2*np.exp(2))
# 2.22发现bug,ep较小的时候,w是负值
# if ep < 35:
# w = 0.8
# else:
# w = 0.2
w = 0.8 #0.5
train_bar = tqdm(train_loader)
for batch in train_bar:
model.train()
if args.nj_dynamic == False: #2.26加入dynamic
img, label, bound = batch
else:
batch_1, batch_2 = batch
dynamic_p = Dynamic_P.step() #2.9新加全局函数,用step方法维护概率p,每个batch调整一次
if np.random.rand(1) < dynamic_p: #选择有nj的
img, label, bound = batch_1
else: # 选择没有nj的
img, label, bound = batch_2
img = img.to(device)
label = label.to(device)
bound = bound.to(device)
mout, bout = model(img)
# print(mout.shape, bout.shape)
#loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound, ep/(2*epoch)+1/4)
loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound)
# print(my_loss(mout, label), cia_loss(bout, bound, 0.5))
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_bar.set_description('Train Epoch: [{}/{}], lr: {:.8f}, Loss: {:.4f}'.format(ep, epoch, optimizer.param_groups[0]['lr'], loss))
lr_scheduler.step() #每个epoch更新一次lr
if ep % 1 == 0:
# lr = lr * 0.99
acc_all, iou_all, bacc_all, loss_all = [], [], [], []
with torch.no_grad():
for verify in tqdm(val_loader): #2.22修改,直接进行测试
img, label, bound = verify
if save_idx == 0 :
img_save = label.cpu().clone()
img_save = img_save.squeeze(0) # 压缩一维
img_save = transforms.ToPILImage()(img_save) # 自动转换为0-255
img_save.save(os.path.join(o_dir,'val/label_val.png'))
img = img.to(device)
label = label.to(device)
bound = bound.to(device)
model.eval()
mout, bout = model(img)
if save_idx == 0 :
img_save = mout[0][0].cpu().clone()
img_save = img_save.squeeze(0) # 压缩一维
img_save = transforms.ToPILImage()(img_save) # 自动转换为0-255
img_save.save(os.path.join(o_dir,'val/ep:{}-val.png'.format(ep)))
save_idx += 1
loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound, 0.5)
loss_all.append(loss.cpu().numpy())
acc = dice_acc(mout[0][0], label) #没问题,这边acc计算默认b=1了
acc_all.append(acc)
acc = my_acc(mout[0][0], label[0][0])
if math.isnan(float(acc)):
pass
else:
iou_all.append(acc)
acc = dice_acc(bout[0][0], bound>0.1)
bacc_all.append(acc)
acc_all = np.array(acc_all)
iou_all = np.array(iou_all)
loss_all = np.array(loss_all)
bacc_all = np.array(bacc_all)
_logger.info('epoch num val: {} -- Loss: {} -- Dice : {} -- Iou : {} -- Boundary acc : {}'.format(ep , round(float(loss_all.mean()), 4), round(float(acc_all.mean()),4), round(float(iou_all.mean()),4),round(bacc_all.mean(),4)))
print('epoch num val: {} -- Loss: {} -- Dice : {} -- Iou : {} -- Boundary acc : {}'.format(ep , round(float(loss_all.mean()), 4), round(float(acc_all.mean()),4), round(float(iou_all.mean()),4),round(bacc_all.mean(),4)))
# if ep > 49 and ep % 20 == 0:
# lr = lr * 0.95
# torch.save({'model_state_dict': model.state_dict()}, 'weights/ep{}_loss{}.ptf'.format(ep+1,bacc_all.mean()))
# if ep % 2 == 0:
# torch.save({'model_state_dict': model.state_dict()}, '{}/ep{}_loss{}.ptf'.format(w_dir,ep,round(bacc_all.mean(), 3)))
acc_all, iou_all, bacc_all, loss_all = [], [], [], []
with torch.no_grad():
for verify in tqdm(test_loader):
img, label, bound = verify
if save_idx == 1 :
img_save = label[0][0].cpu().clone()
img_save = img_save.squeeze(0) # 压缩一维
img_save = transforms.ToPILImage()(img_save) # 自动转换为0-255
img_save.save(os.path.join(o_dir,'test/label_test.png'))
img = img.to(device)
label = label.to(device)
bound = bound.to(device)
model.eval()
mout, bout = model(img)
if save_idx == 1 :
img_save = mout[0][0].cpu().clone()
img_save = img_save.squeeze(0) # 压缩一维
img_save = transforms.ToPILImage()(img_save) # 自动转换为0-255
img_save.save(os.path.join(o_dir,'test/ep:{}-test.png'.format(ep)))
save_idx += 1
loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound, 0.5)
loss_all.append(loss.cpu().numpy())
acc = dice_acc(mout[0][0], label)
acc_all.append(acc)
acc = my_acc(mout[0][0], label[0][0])
if math.isnan(float(acc)):
pass
else:
iou_all.append(acc)
acc = dice_acc(bout[0][0], bound>0.1)
bacc_all.append(acc)
acc_all = np.array(acc_all)
iou_all = np.array(iou_all)
loss_all = np.array(loss_all)
bacc_all = np.array(bacc_all)
_logger.info('epoch num test: {} -- Loss: {} -- Dice : {} -- Iou : {} -- Boundary acc : {}\n'.format(ep , round(float(loss_all.mean()), 4), round(float(acc_all.mean()),4), round(float(iou_all.mean()),4),round(bacc_all.mean(),4)))
print('epoch num test: {} -- Loss: {} -- Dice : {} -- Iou : {} -- Boundary acc : {}\n'.format(ep , round(float(loss_all.mean()), 4), round(float(acc_all.mean()),4), round(float(iou_all.mean()),4),round(bacc_all.mean(),4)))
if best_dict['Dice'] < float(acc_all.mean()):
best_dict['Dice'] = round(float(acc_all.mean()),4)
best_dict['Iou'] = round(float(iou_all.mean()),4)
best_dict['epoch'] = ep
json_str = json.dumps(best_dict)
json_path = '{}/best.json'.format(o_dir)
with open(json_path, 'w') as json_file:
json_file.write(json_str)
#2.9每个epoch训练后,监视一下p的值
if args.nj_dynamic != False:
print('dynamic_p',dynamic_p)
# macc_f = np.array(macc_f)
# loss_f = np.array(loss_f)
# bacc_f = np.array(bacc_f)
# cacc_f = np.array(cacc_f)
# mdic = {"macc":macc_f, "loss":loss_f,"bacc":bacc_f, "cacc":cacc_f}
# scipy.io.savemat("results/cl_train.mat", mdic)
torch.save({'model_state_dict': model.state_dict()}, 'weights/CIA.ptf')
end = time.time()
print('Total training time is {}h'.format((end-start)/3600))
print('Finished Training')
# %%
# loss_all,acc_all = [],[]
# with torch.no_grad():
# for verify in train_loader:
# img, label = verify
# img = img.cuda()
# label = label.cuda()
# model.eval()
# out = model(img)
# loss = my_loss(out, label)
# acc = my_acc(out, label)
# acc_all.append(acc)
# loss_all.append(loss.cpu().numpy())
# acc_all = np.array(acc_all)
# loss_all = np.array(loss_all)
# print('Loss : {} -- Acc : {} -- Max Acc : {} -- Min Acc : {}'.format(loss_all.mean(), acc_all.mean(), acc_all.max(), acc_all.min()))
# mdic = {"loss_mean":loss_noCL, "loss_max":loss_max,"loss_std":loss_std, "acc_test":acc_all}
# scipy.io.savemat("result/noCL_results.mat", mdic)
# torch.save({
# 'epoch': ep,
# 'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'loss': loss,
# }, 'Unet_noCL.ptf') | 22,675 | 46.143451 | 246 | py |
RandStainNA | RandStainNA-master/segmentation/transform.py | import torch
import torchvision.transforms.functional as F
from torchvision import transforms #2.13加入
try:
from torchvision.transforms.functional import InterpolationMode
has_interpolation_mode = True
except ImportError:
has_interpolation_mode = False
from PIL import Image, ImageFilter #1.20加入
import warnings
import math
import random
import numpy as np
import cv2 #12.20加入
from skimage import color #12.26加入
import os #12.20加入
#12.17 norm&jitter引入方法
class color_norm_jitter(object):
'''
参数:
1.lab的三个channel的mean和std(这个一般是在外面算完传入进来的,在里面算分布)
2.Reinhard_cn方法
3.概率p
'''
def __init__(self, mean, std, std_hyper=0, probability=0, color_space=None, distribution=None):
self.mean = mean # [l,a,b] 是l_mean的正态分布的均值和方差,是一个字典
self.std = std # [l,a,b]
self.std_adjust = std_hyper #=0时按照统计规则
self.p = probability # 一半概率选一个
self.color_space = color_space
self.distribution = distribution #1.30添加,手工指定分布
def getavgstd(self, image):
avg = []
std = []
image_avg_l = np.mean(image[:, :, 0])
image_std_l = np.std(image[:, :, 0])
image_avg_a = np.mean(image[:, :, 1])
image_std_a = np.std(image[:, :, 1])
image_avg_b = np.mean(image[:, :, 2])
image_std_b = np.std(image[:, :, 2])
avg.append(image_avg_l)
avg.append(image_avg_a)
avg.append(image_avg_b)
std.append(image_std_l)
std.append(image_std_a)
std.append(image_std_b)
return (avg, std)
def quick_loop(self, image1, image_avg, image_std, temp_avg, temp_std):
if self.color_space != 'HED': #LAB和HSV
image_std = np.clip(np.array(image_std), 0.001, 255)
image1 = (image1 - np.array(image_avg)) * (np.array(temp_std) / np.array(image_std)) + np.array(temp_avg)
image1 = np.clip(image1, 0, 255).astype(np.uint8)
else: #HED
image_std = np.clip(np.array(image_std), 0.0001, 255) #经常容易除0,保护一下
image1 = (image1 - np.array(image_avg)) * (np.array(temp_std) / np.array(image_std)) + np.array(temp_avg)
return image1
def __call__(self, img):
# 这边应该考虑单张图就好了
if np.random.rand(1) < self.p:
image = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) # 注意颜色空间转换
if self.color_space == 'LAB':
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) # 注意颜色空间转换
elif self.color_space == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # 注意颜色空间转换
elif self.color_space == 'HED': #1.30将HED空间扰动也加入
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #hed的变化是在rgb上变化
image = color.rgb2hed(img) #rgb, [0,1]
image_avg, image_std = self.getavgstd(image)
l_mean, a_mean, b_mean = self.mean[0], self.mean[1], self.mean[2]
l_std, a_std, b_std = self.std[0], self.std[1], self.std[2]
std_adjust = self.std_adjust
#1.30修改,l_mean已经有'mean','std','distribution'3个参数
if self.distribution != None: #1.30添加,如果有手工指定分布,则全部按照分布的来,否则按照统计的来
if self.distribution == 'uniform':
np_distribution = np.random.uniform #均匀分布时,按照3Σ原则来确定采样范围
template_avg_l = np_distribution(low=l_mean['mean']-3*l_mean['std'], high=l_mean['mean']+3*l_mean['std'])
template_std_l = np_distribution(low=l_std['mean']-3*l_std['std'], high=l_std['mean']+3*l_std['std'])
template_avg_a = np_distribution(low=a_mean['mean']-3*a_mean['std'], high=a_mean['mean']+3*a_mean['std'])
template_std_a = np_distribution(low=a_std['mean']-3*a_std['std'], high=a_std['mean']+3*a_std['std'])
template_avg_b = np_distribution(low=b_mean['mean']-3*b_mean['std'], high=b_mean['mean']+3*b_mean['std'])
template_std_b = np_distribution(low=b_std['mean']-3*b_std['std'], high=b_std['mean']+3*b_std['std'])
else: #不是均匀分布时,考虑的是均值和方差
if self.distribution == 'normal':
np_distribution = np.random.normal
elif self.distribution == 'laplace':
np_distribution = np.random.laplace
# 2.05添加,1+std调整为全部的
template_avg_l = np_distribution(loc=l_mean['mean'], scale=l_mean['std']*(1+std_adjust))
template_std_l = np_distribution(loc=l_std['mean'], scale=l_std['std']*(1+std_adjust))
template_avg_a = np_distribution(loc=a_mean['mean'], scale=a_mean['std']*(1+std_adjust))
template_std_a = np_distribution(loc=a_std['mean'], scale=a_std['std']*(1+std_adjust))
template_avg_b = np_distribution(loc=b_mean['mean'], scale=b_mean['std']*(1+std_adjust))
template_std_b = np_distribution(loc=b_std['mean'], scale=b_std['std']*(1+std_adjust))
else: #如果没有指定分布,则需要根据nj参数来确定各分布
np_d_true_list = [l_mean['distribution'], l_std['distribution'], a_mean['distribution'], a_std['distribution'], b_mean['distribution'], b_std['distribution']]
# print(np_d_true_list)
np_d_sample_list = []
for np_d_true in np_d_true_list:
if np_d_true == 'norm':
np_d_sample_list.append(np.random.normal)
elif np_d_true == 'laplace':
np_d_sample_list.append(np.random.laplace)
# print(np_d_sample_list)
# 2.5修改,1+std改为全部
template_avg_l = np_d_sample_list[0](loc=l_mean['mean'], scale=l_mean['std']*(1+std_adjust))
template_std_l = np_d_sample_list[1](loc=l_std['mean'], scale=l_std['std']*(1+std_adjust))
template_avg_a = np_d_sample_list[2](loc=a_mean['mean'], scale=a_mean['std']*(1+std_adjust))
template_std_a = np_d_sample_list[3](loc=a_std['mean'], scale=a_std['std']*(1+std_adjust))
template_avg_b = np_d_sample_list[4](loc=b_mean['mean'], scale=b_mean['std']*(1+std_adjust))
template_std_b = np_d_sample_list[5](loc=b_std['mean'], scale=b_std['std']*(1+std_adjust))
template_avg = [float(template_avg_l), float(template_avg_a), float(template_avg_b)]
template_std = [float(template_std_l), float(template_std_a), float(template_std_b)]
image = self.quick_loop(image, image_avg, image_std, template_avg, template_std)
if self.color_space == 'LAB':
image = cv2.cvtColor(image, cv2.COLOR_LAB2BGR)
return Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
elif self.color_space == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB)) #这个算是调整好了
elif self.color_space == 'HED':
nimg = color.hed2rgb(image)
imin = nimg.min()
imax = nimg.max()
rsimg = (255 * (nimg - imin) / (imax - imin)).astype('uint8') # rescale to [0,255]
return Image.fromarray(rsimg)
else:
return img
# 1.21引入,print内容添加
def __repr__(self):
format_string = self.__class__.__name__ + "("
format_string += f"methods=Reinhard"
format_string += f", colorspace={self.color_space}"
format_string += f", mean={self.mean}"
format_string += f", std={self.std}"
format_string += f", std_adjust={self.std_adjust}"
format_string += f", distribution={self.distribution}" #1.30添加,print期望分布
format_string += f", p={self.p})"
return format_string
#12.25引入HEDJitter方法
class HEDJitter(object):
"""Randomly perturbe the HED color space value an RGB image.
First, it disentangled the hematoxylin and eosin color channels by color deconvolution method using a fixed matrix.
Second, it perturbed the hematoxylin, eosin and DAB stains independently.
Third, it transformed the resulting stains into regular RGB color space.
Args:
theta (float): How much to jitter HED color space,
alpha is chosen from a uniform distribution [1-theta, 1+theta]
betti is chosen from a uniform distribution [-theta, theta]
the jitter formula is **s' = \alpha * s + \betti**
"""
def __init__(self, theta=0., p=1.0): # HED_light: theta=0.05; HED_strong: theta=0.2
# assert isinstance(theta, numbers.Number), "theta should be a single number."
self.theta = theta
# 12.26这边的随机采样不应该是这样的,应该是每次都随机
self.alpha = 0 # np.random.uniform(1-theta, 1+theta, (1, 3))
self.betti = 0 # np.random.uniform(-theta, theta, (1, 3))
self.p = p #2.13加入
@staticmethod
def adjust_HED(img, alpha, betti):
img = np.array(img)
s = np.reshape(color.rgb2hed(img), (-1, 3))
ns = alpha * s + betti # perturbations on HED color space
nimg = color.hed2rgb(np.reshape(ns, img.shape))
imin = nimg.min()
imax = nimg.max()
rsimg = (255 * (nimg - imin) / (imax - imin)).astype('uint8') # rescale to [0,255]
# transfer to PIL image
return Image.fromarray(rsimg)
def __call__(self, img):
# 每张图片都重新来弄,既可以记录,又可以更新
if np.random.rand(1) < self.p: #2.13加入概率
self.alpha = np.random.uniform(1-self.theta, 1+self.theta, (1, 3))
self.betti = np.random.uniform(-self.theta, self.theta, (1, 3))
return self.adjust_HED(img, self.alpha, self.betti)
else:
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'theta={0}'.format(self.theta)
format_string += ', alpha={0}'.format(self.alpha)
format_string += ', betti={0}'.format(self.betti)
format_string += ', p={0})'.format(self.p)
return format_string
#12.25引入LABJitter方法
class LABJitter(object):
"""Randomly perturbe the LAB color space value an RGB image.
First, it disentangled the hematoxylin and eosin color channels by color deconvolution method using a fixed matrix.
Second, it perturbed the hematoxylin, eosin and DAB stains independently.
Third, it transformed the resulting stains into regular RGB color space.
Args:
theta (float): How much to jitter HED color space,
alpha is chosen from a uniform distribution [1-theta, 1+theta]
betti is chosen from a uniform distribution [-theta, theta]
the jitter formula is **s' = \alpha * s + \betti**
"""
def __init__(self, theta=0., p=1.0): # LAB_light: theta=0.05; LAB_strong: theta=0.2
# assert isinstance(theta, numbers.Number), "theta should be a single number."
self.theta = theta
# 12.26这边的随机采样不应该是这样的,应该是每次都随机
self.alpha = 0 # np.random.uniform(1-theta, 1+theta, (1, 3))
self.betti = 0 # np.random.uniform(-theta, theta, (1, 3))
self.p = p #2.13加入概率
@staticmethod
def adjust_LAB(img, alpha, betti):
img = np.array(img)
s = np.reshape(color.rgb2lab(img), (-1, 3)) #1.21修改,rgb2hed改为rgb2lab
ns = alpha * s + betti # perturbations on LAB color space
nimg = color.lab2rgb(np.reshape(ns, img.shape)) #1.21修改,hed2rgb改为lab2rgb
imin = nimg.min()
imax = nimg.max()
rsimg = (255 * (nimg - imin) / (imax - imin)).astype('uint8') # rescale to [0,255]
# transfer to PIL image
return Image.fromarray(rsimg)
def __call__(self, img):
# 每张图片都重新来弄,既可以记录,又可以更新
if np.random.rand(1) < self.p: #2.13加入概率
self.alpha = np.random.uniform(1-self.theta, 1+self.theta, (1, 3))
self.betti = np.random.uniform(-self.theta, self.theta, (1, 3))
return self.adjust_LAB(img, self.alpha, self.betti)
else:
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'theta={0}'.format(self.theta)
format_string += ', alpha={0}'.format(self.alpha)
format_string += ', betti={0}'.format(self.betti)
format_string += ', p={0})'.format(self.p)
return format_string
# 2.6 加入labjitter-hsv策略
# 借鉴pytorch调整hue来修改各通道参数
class LABJitter_hsv(object):
def __init__(self, l_factor, a_factor, b_factor, p=1.0):
self.l_factor = l_factor
self.a_factor = a_factor
self.b_factor = b_factor
self.p = p #2.13加入概率
def adjust_channel(self, channel, factor) -> Image.Image:
if not (0.5 <= factor <= 1.5):
raise ValueError(f"factor ({factor}) is not in [-0.5, 0.5].")
# h, s, v = img.convert("HSV").split()
channel = np.array(channel, dtype=np.uint8) #确保整型
# uint8 addition take cares of rotation across boundaries
with np.errstate(over="ignore"):
channel += np.uint8(factor * 255)
channel = np.array(channel, dtype=np.uint8) #将超出范围的限制在0-255
return channel
def __call__(self, img):
# 每张图片都重新来弄,既可以记录,又可以更新
if np.random.rand(1) < self.p: #2.13加入概率
l_factor = np.random.uniform(1-self.l_factor, 1+self.l_factor)
a_factor = np.random.uniform(1-self.a_factor, 1+self.a_factor)
b_factor = np.random.uniform(1-self.b_factor, 1+self.b_factor)
img_bgr = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
img_lab = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LAB)
np_l, np_a, np_b = cv2.split(img_lab)
np_l = self.adjust_channel(np_l, l_factor)
np_a = self.adjust_channel(np_a, a_factor)
np_b = self.adjust_channel(np_b, b_factor)
LAB = cv2.merge([np_l, np_a, np_b])
image = cv2.cvtColor(LAB, cv2.COLOR_LAB2BGR)
return Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
else:
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'l_factor={0}'.format(self.l_factor)
format_string += ', a_factor={0}'.format(self.a_factor)
format_string += ', b_factor={0}'.format(self.b_factor)
format_string += ', p={0})'.format(self.p)
return format_string
# 2.9 动态调整p的类
class Dynamic_P_class(object):
def __init__(self, epochs=0, batch_per_epoch=0, dynamic_factor=1.0, function='sin_pi'):
self.batches = epochs * batch_per_epoch #总batches数
# 通过正弦*某值来调控到第几个epoch时是全力,*5时,3个epoch训练完后达到1,最后3个epoch恢复
if function=='sin_pi': #先增后减型
self.p_list = [math.sin(math.pi*idx / self.batches) *dynamic_factor for idx in range(self.batches)] #构造时就得到所有p的取值,sin的0-π
elif function=='sin_pi_2': #递增型
self.p_list = [math.sin(math.pi*idx / (2*self.batches)) *dynamic_factor for idx in range(self.batches)]
self.i = -1
def step(self): #每个batch递进都会取得下一个
self.i += 1 #初始是0
return self.p_list[self.i]
# 2.17添加
class ConcatDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets) | 15,678 | 44.184438 | 174 | py |
RandStainNA | RandStainNA-master/segmentation/CIA_tta.py | from CIAnet import *
import scipy.io
import numpy as np
import time
import os
from PIL import Image
from torchvision import transforms
from torch.utils.data import DataLoader,TensorDataset
from tqdm import tqdm
import yaml
import random
# from Dataset import ciaData
from transform import color_norm_jitter, HEDJitter, LABJitter, LABJitter_hsv, Dynamic_P_class, ConcatDataset #2.26添加
import argparse
import logging #2.25添加
import copy #2.26添加
import json #2.25添加
_logger = logging.getLogger('train')
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset parameters
parser.add_argument('--dataset', metavar='DIR',
help='path to dataset')
parser.add_argument('--output', type=str, metavar='DIR',
help='path to output dir')
# Model parameters
parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL',
help='Name of model to train (default: "resnet50"')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.05)')
parser.add_argument('--epochs', type=int, default=50, metavar='N',
help='number of epochs to train (default: 50)')
parser.add_argument('-b', '--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
############## cj
# 2.9加入,cj的p控制
parser.add_argument('--color-jitter', nargs='+', type=float, default=None,
help='Color jitter factor Brigtness-Contrast-S-Hue(default: [0, 0, 0, 0])')
# 12.26加入,控制HEDJitter
parser.add_argument('--hed-jitter', type=float, default=None,
help='HED-jitter factory(default: 0)')
parser.add_argument('--lab-jitter', nargs='+', type=float, default=None,
help='LAB-jitter factory(default: None)')
parser.add_argument('--cj-p', type=float, default=1.0, metavar='PCT',
help='color jitter possibility (default: 1, range: 0-1)')
############# 12.20加入,norm&jitter参数配置 ##########
parser.add_argument('--nj-config', type=str, default=None, metavar='PCT',
help='norm&jitter yaml config path (default: '')')
parser.add_argument('--nj-stdhyper', type=float, default=0.0, metavar='PCT',
help='norm&jitter std hyper (default: 0)')
parser.add_argument('--nj-distribution', type=str, default=None, metavar='PCT',
help='norm&jitter distribution (default: '')')
parser.add_argument('--nj-p', type=float, default=1.0, metavar='PCT', #2.9加入,nj的p控制
help='norm&jitter possibility (default: 1, range: 0-1)')
############# 2.9加入,norm&jitter强度控制 ##########
parser.add_argument('--nj-dynamic',action='store_true', default=False,
help='Enable norm-jitter dynamic-p (default: False)')
parser.add_argument('--dynamic-factor', type=float, default=1.0,
help='norm-jitter dynamic-p factor(default: 1)')
############# 2.9加入,nj-TTA ##########
parser.add_argument('--nj-TTA', type=int, default=0,
help='Enable norm-jitter Test Time Augmentation (default: 0)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
#12.20增加,获取yaml文件的参数
def get_yaml_data(yaml_file):
# 打开yaml文件
# _logger.info(yaml_file)
file = open(yaml_file, 'r', encoding="utf-8")
file_data = file.read()
file.close()
# 将字符串转化为字典或列表
# print("***转化yaml数据为字典或列表***")
data = yaml.load(file_data, Loader=yaml.FullLoader)
return data
if __name__ == '__main__':
args, args_text = _parse_args()
# 12.16修改,加入各个随机种子
# random_seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available(): torch.cuda.manual_seed(args.seed)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print(device)
# epoch, batch = 50, 2
epoch, batch = args.epochs, args.batch_size
train_path = os.path.join(args.dataset,'train') #'/root/autodl-tmp/MoNuSeg2018/standard/train'
val_path = os.path.join(args.dataset,'val')
test_path = os.path.join(args.dataset,'test') #'/root/autodl-tmp/MoNuSeg2018/standard/test'
base_dir = '/root/autodl-tmp/pycharm_project_CA2.5'
w_dir = '{}/weights'.format(base_dir)
o_dir = '{}/outputs/{}'.format(base_dir, args.output)
if not os.path.exists(w_dir):
os.makedirs(w_dir)
if not os.path.exists(o_dir):
os.makedirs(o_dir)
if not os.path.exists(os.path.join(o_dir,'test')):
os.makedirs(os.path.join(o_dir,'test'))
if not os.path.exists(os.path.join(o_dir,'val')):
os.makedirs(os.path.join(o_dir,'val'))
'''
x = torch.from_numpy(x).float()
x = x / 255 # normalization
x = x.unsqueeze(1)
y = torch.from_numpy(y).to(torch.long)
z = torch.from_numpy(z).to(torch.long)
'''
#1.20添加,将输出保存在一个log里
_logger.setLevel(level = logging.INFO)
handler = logging.FileHandler("{}/output_info.log".format(o_dir))
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
_logger.addHandler(handler)
# train_size = 250
transform_list = []
if args.nj_config is not None:
nj_config = get_yaml_data(args.nj_config)
nj_stdhyper = args.nj_stdhyper
nj_distribution = args.nj_distribution #1.30添加,手工指定6个采样的分布
nj_p = args.nj_p #2.9添加,调整nj的概率
nj_config['std_hyper'] = nj_stdhyper
nj_config['distribution'] = nj_distribution
nj_config['p'] = nj_p
norm_jitter = nj_config
# LAB / HED
# 根据config文件的空间来决定是哪种
# 12.26好像有问题,之前不知道顺序,很可怕
if norm_jitter['methods'] == 'Reinhard':
# 1.10修改,nj方法的lab和hsv进行统一
# 1.30修改,lab,hsv和hed方法统一,根据color_space确定
if norm_jitter['color_space'] == 'LAB' or norm_jitter['color_space'] == 'HSV' or norm_jitter['color_space'] == 'HED':
color_space = norm_jitter['color_space'] #获取颜色空间名称
#1.30修改,avg和std已经自带分布,在transform里面修改即可
mean_dataset = [norm_jitter[color_space[0]]['avg'],norm_jitter[color_space[1]]['avg'],norm_jitter[color_space[2]]['avg']]
std_dataset = [norm_jitter[color_space[0]]['std'],norm_jitter[color_space[1]]['std'],norm_jitter[color_space[2]]['std']]
std_hyper = norm_jitter['std_hyper']
distribution = norm_jitter['distribution'] #1.30添加,手工指定分布
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
transform_list += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space=color_space, distribution=distribution)]
elif norm_jitter['color_space'] == 'Random': #1.10增加,混合多种方法,等概率随机进行选取
distribution = norm_jitter['distribution'] #1.30添加,手工指定分布
if 'L' in list(norm_jitter.keys()): #2.8修改,测试HED,lab,hsv三者的排列组合
mean_dataset = [norm_jitter['L']['avg'],norm_jitter['A']['avg'],norm_jitter['B']['avg']]
std_dataset = [norm_jitter['L']['std'],norm_jitter['A']['std'],norm_jitter['B']['std']]
std_hyper = norm_jitter['std_hyper']
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
transform_list += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space='LAB',distribution=distribution)]
if 'E' in list(norm_jitter.keys()): #2.8修改,测试HED,lab,hsv三者的排列组合
mean_dataset = [norm_jitter['H']['avg'],norm_jitter['E']['avg'],norm_jitter['D']['avg']]
std_dataset = [norm_jitter['H']['std'],norm_jitter['E']['std'],norm_jitter['D']['std']]
std_hyper = norm_jitter['std_hyper']
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
# special_tfl += [hed_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=1)]
# 1.30修改,nj方法统一lab和hed,所以统一用一个即可
transform_list += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space='HED',distribution=distribution)]
# 2.6修改,增加hsv来random
if 'h' in list(norm_jitter.keys()): #2.8修改,测试HED,lab,hsv三者的排列组合
mean_dataset = [norm_jitter['h']['avg'],norm_jitter['S']['avg'],norm_jitter['V']['avg']]
std_dataset = [norm_jitter['h']['std'],norm_jitter['S']['std'],norm_jitter['V']['std']]
std_hyper = norm_jitter['std_hyper']
p = norm_jitter['p'] #2.9添加,采用增强的概率,默认是1
transform_list += [color_norm_jitter(mean=mean_dataset,std=std_dataset,std_hyper=std_hyper,probability=p,color_space='HSV',distribution=distribution)]
###### baseline ###########
if args.color_jitter is not None:
brightness = args.color_jitter[0]
contrast = args.color_jitter[1]
saturation = args.color_jitter[2]
hue = args.color_jitter[3]
transform_list+=[transforms.RandomApply([transforms.ColorJitter(brightness, contrast, saturation, hue)],p=args.cj_p)]
if args.hed_jitter is not None:
transform_list+=[transforms.RandomApply([HEDJitter(args.hed_jitter)],p=args.cj_p)]
if args.lab_jitter is not None:
if len(args.lab_jitter) == 1:
transform_list+=[transforms.RandomApply([LABJitter(args.lab_jitter[0])],p=args.cj_p)]
else:
l_factor = args.lab_jitter[0]
a_factor = args.lab_jitter[1]
b_factor = args.lab_jitter[2]
transform_list+=[transforms.RandomApply([LABJitter_hsv(l_factor,a_factor,b_factor)],p=args.cj_p)]
transform_list += [transforms.ToTensor()]
mean_ = (0.485, 0.456, 0.406)
std_ = (0.229, 0.224, 0.225)
transform_list += [transforms.Normalize(
mean=torch.tensor(mean_),
std=torch.tensor(std_))
]
transform_train = transforms.Compose(transform_list)
###### test #######
transform_test = transforms.Compose([
# transforms.Resize((512, 512)),
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean_),
std=torch.tensor(std_)
)
])
print('train_transform:\n',transform_train)
print('test_transform:\n',transform_test)
train_dataset = ciaData(train_path,transform=transform_train)
if args.nj_dynamic: #2.26加入dynamic
_logger.info('nj_dynamic!!')
train_dataset_copy = copy.deepcopy(train_dataset)
train_dataset_copy.transform=transform_test
train_dataset = ConcatDataset(train_dataset, train_dataset_copy)
if not os.path.exists(val_path):
val_dataset = ciaData(train_path,transform=transform_test)
else:
val_dataset = ciaData(val_path,transform=transform_test)
test_dataset = ciaData(test_path,transform=transform_test)
if args.nj_TTA > 0 : #2.26加入测试时增强
_logger.info('nj_TTA {}!!'.format(args.nj_TTA))
test_dataset_list = []
for idx in range(args.nj_TTA+1): #多一个test_transform
if idx == 0:
test_dataset_copy = copy.deepcopy(test_dataset)
test_dataset_list.append(test_dataset_copy)
else:
test_dataset_copy = copy.deepcopy(test_dataset)
test_dataset_copy.transform=transform_train
test_dataset_list.append(test_dataset_copy)
# test_dataset = ConcatDataset(test_dataset_copy, test_dataset, test_dataset_copy) #只能用这种方法传入,不能一个完整list
test_dataset_tuple = tuple(test_dataset_list)
test_dataset = ConcatDataset(*test_dataset_tuple) #这样就可以取得类似的效果
train_loader = DataLoader(train_dataset, batch_size=batch, shuffle=True, num_workers=15, pin_memory=True)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=15, pin_memory=True)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=15, pin_memory=True)
model = CIAnet(growthRate=6, nDenseBlocks=[6,12,24,16], reduction=0.5, bottleneck=True).to(device)
#checkpoint = torch.load('weights/CIA1.ptf')
#model.load_state_dict(checkpoint['model_state_dict'])
lr = args.lr #1e-5 #1e-5
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95, last_epoch=-1) #0.95
# lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=[20],gamma = 0.1)
# Mask acc : 0.9201083183288574 -- Boundary acc : 0.5252535939216614
# torch.save({'model_state_dict': model.state_dict()}, 'weights/CIA_initial.ptf')
print('Begin training.')
_logger.info('Begin training.')
start = time.time()
# loss_f, macc_f, bacc_f = [], [], []
best_dict = {}
best_dict['Dice'] = 0
best_dict['Iou'] = 0
Dynamic_P = Dynamic_P_class(epochs=args.epochs, batch_per_epoch=int(len(train_dataset)/args.batch_size)+1, dynamic_factor=args.dynamic_factor) #2.9新加,传递函数为引用类型即可
for ep in range(epoch):
ep += 1
save_idx = 0 # 每个epoch在test时保存一张图片
# w = 0.75 - np.exp(2*ep/epoch)/(2*np.exp(2))
# 2.22发现bug,ep较小的时候,w是负值
# if ep < 35:
# w = 0.8
# else:
# w = 0.2
w = 0.8 #0.5
train_bar = tqdm(train_loader)
for batch in train_bar:
model.train()
if args.nj_dynamic == False: #2.26加入dynamic
img, label, bound = batch
else:
batch_1, batch_2 = batch
dynamic_p = Dynamic_P.step() #2.9新加全局函数,用step方法维护概率p,每个batch调整一次
if np.random.rand(1) < dynamic_p: #选择有nj的
img, label, bound = batch_1
else: # 选择没有nj的
img, label, bound = batch_2
img = img.to(device)
label = label.to(device)
bound = bound.to(device)
mout, bout = model(img)
# print(mout.shape, bout.shape)
#loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound, ep/(2*epoch)+1/4)
loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound)
# print(my_loss(mout, label), cia_loss(bout, bound, 0.5))
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_bar.set_description('Train Epoch: [{}/{}], lr: {:.8f}, Loss: {:.4f}'.format(ep, epoch, optimizer.param_groups[0]['lr'], loss))
lr_scheduler.step() #每个epoch更新一次lr
if ep % 1 == 0:
# lr = lr * 0.99
acc_all, iou_all, bacc_all, loss_all = [], [], [], []
with torch.no_grad():
for verify in tqdm(val_loader): #2.22修改,直接进行测试
img, label, bound = verify
if save_idx == 0 :
img_save = label.cpu().clone()
img_save = img_save.squeeze(0) # 压缩一维
img_save = transforms.ToPILImage()(img_save) # 自动转换为0-255
img_save.save(os.path.join(o_dir,'val/label_val.png'))
img = img.to(device)
label = label.to(device)
bound = bound.to(device)
model.eval()
mout, bout = model(img)
if save_idx == 0 :
img_save = mout[0][0].cpu().clone()
img_save = img_save.squeeze(0) # 压缩一维
img_save = transforms.ToPILImage()(img_save) # 自动转换为0-255
img_save.save(os.path.join(o_dir,'val/ep:{}-val.png'.format(ep)))
save_idx += 1
loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound, 0.5)
loss_all.append(loss.cpu().numpy())
acc = dice_acc(mout[0][0], label) #没问题,这边acc计算默认b=1了
acc_all.append(acc)
acc = my_acc(mout[0][0], label[0][0])
if math.isnan(float(acc)):
pass
else:
iou_all.append(acc)
acc = dice_acc(bout[0][0], bound>0.1)
bacc_all.append(acc)
acc_all = np.array(acc_all)
iou_all = np.array(iou_all)
loss_all = np.array(loss_all)
bacc_all = np.array(bacc_all)
_logger.info('epoch num val: {} -- Loss: {} -- Dice : {} -- Iou : {} -- Boundary acc : {}'.format(ep , round(float(loss_all.mean()), 4), round(float(acc_all.mean()),4), round(float(iou_all.mean()),4),round(bacc_all.mean(),4)))
print('epoch num val: {} -- Loss: {} -- Dice : {} -- Iou : {} -- Boundary acc : {}'.format(ep , round(float(loss_all.mean()), 4), round(float(acc_all.mean()),4), round(float(iou_all.mean()),4),round(bacc_all.mean(),4)))
# if ep > 49 and ep % 20 == 0:
# lr = lr * 0.95
# torch.save({'model_state_dict': model.state_dict()}, 'weights/ep{}_loss{}.ptf'.format(ep+1,bacc_all.mean()))
# if ep % 2 == 0:
# torch.save({'model_state_dict': model.state_dict()}, '{}/ep{}_loss{}.ptf'.format(w_dir,ep,round(bacc_all.mean(), 3)))
acc_all, iou_all, bacc_all, loss_all = [], [], [], []
with torch.no_grad():
for verify in tqdm(test_loader):
if args.nj_TTA > 0:
img, label, bound = verify[0]
img_list = [img.to(device)] #原图也搞上
for idx in range(args.nj_TTA):
img_list += [verify[idx+1][0].to(device)] #取第idx次增强的img
else:
img, label, bound = verify
img = img.to(device)
label = label.to(device)
bound = bound.to(device)
if save_idx == 1 :
img_save = label[0][0].cpu().clone()
img_save = img_save.squeeze(0) # 压缩一维
img_save = transforms.ToPILImage()(img_save) # 自动转换为0-255
img_save.save(os.path.join(o_dir,'test/label_test.png'))
model.eval()
if args.nj_TTA > 0:
mout_mean = 0
bout_mean = 0
for idx in range(args.nj_TTA+1):
mout, bout = model(img_list[idx])
mout_mean = (mout + mout_mean * idx)/(idx+1)
bout_mean = (bout + bout_mean * idx)/(idx+1)
a = torch.ones_like(mout)
b = torch.zeros_like(mout)
thresh = 0.8
mout = torch.where(mout_mean>=thresh, a, b)
bout = torch.where(bout_mean>=thresh, a, b)
else:
mout, bout = model(img)
if save_idx == 1 :
img_save = mout[0][0].cpu().clone()
img_save = img_save.squeeze(0) # 压缩一维
img_save = transforms.ToPILImage()(img_save) # 自动转换为0-255
img_save.save(os.path.join(o_dir,'test/ep:{}-test.png'.format(ep)))
save_idx += 1
loss = w*my_loss(mout, label) + (1-w)*cia_loss(bout, bound, 0.5)
loss_all.append(loss.cpu().numpy())
acc = dice_acc(mout[0][0], label)
acc_all.append(acc)
acc = my_acc(mout[0][0], label[0][0])
if math.isnan(float(acc)):
pass
else:
iou_all.append(acc)
acc = dice_acc(bout[0][0], bound>0.1)
bacc_all.append(acc)
acc_all = np.array(acc_all)
iou_all = np.array(iou_all)
loss_all = np.array(loss_all)
bacc_all = np.array(bacc_all)
_logger.info('epoch num test: {} -- Loss: {} -- Dice : {} -- Iou : {} -- Boundary acc : {}\n'.format(ep , round(float(loss_all.mean()), 4), round(float(acc_all.mean()),4), round(float(iou_all.mean()),4),round(bacc_all.mean(),4)))
print('epoch num test: {} -- Loss: {} -- Dice : {} -- Iou : {} -- Boundary acc : {}\n'.format(ep , round(float(loss_all.mean()), 4), round(float(acc_all.mean()),4), round(float(iou_all.mean()),4),round(bacc_all.mean(),4)))
if best_dict['Dice'] < float(acc_all.mean()):
best_dict['Dice'] = round(float(acc_all.mean()),4)
best_dict['Iou'] = round(float(iou_all.mean()),4)
best_dict['epoch'] = ep
json_str = json.dumps(best_dict)
json_path = '{}/best.json'.format(o_dir)
with open(json_path, 'w') as json_file:
json_file.write(json_str)
#2.9每个epoch训练后,监视一下p的值
if args.nj_dynamic != False:
print('dynamic_p',dynamic_p)
# macc_f = np.array(macc_f)
# loss_f = np.array(loss_f)
# bacc_f = np.array(bacc_f)
# cacc_f = np.array(cacc_f)
# mdic = {"macc":macc_f, "loss":loss_f,"bacc":bacc_f, "cacc":cacc_f}
# scipy.io.savemat("results/cl_train.mat", mdic)
torch.save({'model_state_dict': model.state_dict()}, 'weights/CIA.ptf')
end = time.time()
print('Total training time is {}h'.format((end-start)/3600))
print('Finished Training')
# %%
# loss_all,acc_all = [],[]
# with torch.no_grad():
# for verify in train_loader:
# img, label = verify
# img = img.cuda()
# label = label.cuda()
# model.eval()
# out = model(img)
# loss = my_loss(out, label)
# acc = my_acc(out, label)
# acc_all.append(acc)
# loss_all.append(loss.cpu().numpy())
# acc_all = np.array(acc_all)
# loss_all = np.array(loss_all)
# print('Loss : {} -- Acc : {} -- Max Acc : {} -- Min Acc : {}'.format(loss_all.mean(), acc_all.mean(), acc_all.max(), acc_all.min()))
# mdic = {"loss_mean":loss_noCL, "loss_max":loss_max,"loss_std":loss_std, "acc_test":acc_all}
# scipy.io.savemat("result/noCL_results.mat", mdic)
# torch.save({
# 'epoch': ep,
# 'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'loss': loss,
# }, 'Unet_noCL.ptf') | 24,432 | 46.350775 | 246 | py |
RandStainNA | RandStainNA-master/segmentation/CIAnet.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import math
from torchvision.io import read_image
from torch.utils.data import Dataset,DataLoader,TensorDataset
from os import listdir
import os
from PIL import Image
from torchvision import transforms
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
class Bottleneck(nn.Module):
def __init__(self, nChannels, growthRate):
super(Bottleneck, self).__init__()
interChannels = 4*growthRate
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, interChannels, kernel_size=1,
bias=False)
self.bn2 = nn.BatchNorm2d(interChannels)
self.conv2 = nn.Conv2d(interChannels, growthRate, kernel_size=3,
padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat((x, out), 1)
return out
class SingleLayer(nn.Module):
def __init__(self, nChannels, growthRate):
super(SingleLayer, self).__init__()
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, growthRate, kernel_size=3,
padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
def __init__(self, nChannels, nOutChannels):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(nChannels)
self.conv1 = nn.Conv2d(nChannels, nOutChannels, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = F.avg_pool2d(out, 2)
return out
class info_agg(nn.Module):
def __init__(self, in_size, out_size=256):
# information aggregation module
# 1 is mask, 2 is contour
super(info_agg,self).__init__()
self.conv01 = nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=False)
self.conv02 = nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=False)
self.convshare = nn.Conv2d(2*out_size, out_size, kernel_size=3, padding=1, bias=False)
self.conv11 = nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=False)
self.conv12 = nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=False)
# initialise the blocks
# for m in self.children():
# if m.__class__.__name__.find('unetConv2') != -1: continue
# init_weights(m, init_type='kaiming')
def forward(self, input1, input2):
input1 = self.conv01(input1)
input2 = self.conv02(input2)
fshare = self.convshare(torch.cat([input1, input2], 1))
return self.conv11(fshare), self.conv12(fshare)
class Lateral_Connection(nn.Module):
def __init__(self, left_size, down_size, is_deconv=False, n_concat=2):
super(Lateral_Connection, self).__init__()
self.conv = nn.Conv2d(left_size, 256, kernel_size=3, padding=1, bias=False)
if is_deconv:
self.up = nn.ConvTranspose2d(down_size, down_size//2, kernel_size=4, stride=2, padding=1)
else:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
# initialise the blocks
# for m in self.children():
# if m.__class__.__name__.find('unetConv2') != -1: continue
# init_weights(m, init_type='kaiming')
def forward(self, down_input, left_input):
outputs0 = self.up(down_input)
outputs0 = torch.cat([outputs0, self.conv(left_input)], 1)
return outputs0
class CIAnet(nn.Module):
def __init__(self, growthRate, nDenseBlocks, reduction, bottleneck):
super(CIAnet, self).__init__()
nChannels = 2*growthRate
self.conv1 = nn.Conv2d(3, nChannels, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense(nChannels, growthRate, nDenseBlocks[0], bottleneck)
nChannels1 = nChannels + nDenseBlocks[0]*growthRate
nOutChannels = int(math.floor(nChannels1*reduction))
self.trans1 = Transition(nChannels1, nOutChannels)
nChannels = nOutChannels
self.dense2 = self._make_dense(nChannels, growthRate, nDenseBlocks[1], bottleneck)
nChannels2 = nChannels + nDenseBlocks[1]*growthRate
nOutChannels = int(math.floor(nChannels2*reduction))
self.trans2 = Transition(nChannels2, nOutChannels)
nChannels = nOutChannels
self.dense3 = self._make_dense(nChannels, growthRate, nDenseBlocks[2], bottleneck)
nChannels3 = nChannels + nDenseBlocks[2]*growthRate
nOutChannels = int(math.floor(nChannels3*reduction))
self.trans3 = Transition(nChannels3, nOutChannels)
nChannels = nOutChannels
self.dense4 = self._make_dense(nChannels, growthRate, nDenseBlocks[3], bottleneck)
nChannels4 = nChannels + nDenseBlocks[3]*growthRate
# decoder
# 0 denotes semantic mask, 1 denotes boundary mask
self.latconnect04 = Lateral_Connection(nChannels3,nChannels4)
self.latconnect14 = Lateral_Connection(nChannels3,nChannels4)
self.iam4 = info_agg(nChannels4+256)
self.latconnect03 = Lateral_Connection(nChannels2,256)
self.latconnect13 = Lateral_Connection(nChannels2,256)
self.iam3 = info_agg(512)
self.latconnect02 = Lateral_Connection(nChannels1,256)
self.latconnect12 = Lateral_Connection(nChannels1,256)
self.iam2 = info_agg(512)
self.final_0 = nn.Conv2d(256, 1, 1)
self.final_1 = nn.Conv2d(256, 1, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# elif isinstance(m, nn.Linear):
# m.bias.data.zero_()
def _make_dense(self, nChannels, growthRate, nDenseBlocks, bottleneck):
layers = []
for i in range(int(nDenseBlocks)):
if bottleneck:
layers.append(Bottleneck(nChannels, growthRate))
else:
layers.append(SingleLayer(nChannels, growthRate))
nChannels += growthRate
return nn.Sequential(*layers)
def forward(self, x):
# encoder
out = self.conv1(x)
x0 = self.dense1(out)
out = self.trans1(x0)
x1 = self.dense2(out)
out = self.trans2(x1)
x2 = self.dense3(out)
out = self.trans3(x2)
x3 = self.dense4(out)
# decoder
# 0 denotes mask, 1 denotes boundary
y02 = self.latconnect04(x3,x2)
y12 = self.latconnect14(x3,x2)
y02,y12 = self.iam4(y02,y12)
y01 = self.latconnect03(y02,x1)
y11 = self.latconnect13(y12,x1)
y01,y11 = self.iam3(y01,y11)
y00 = self.latconnect02(y01,x0)
y10 = self.latconnect12(y11,x0)
y00,y10 = self.iam2(y00,y10)
# final layer
y00 = self.final_0(y00)
y10 = self.final_1(y10)
return (torch.sigmoid(y00), torch.sigmoid(y10))
# loss function
# CIA loss
def _cia_loss(pred, target):
b = pred.shape[0]
IoU = 0.0
for i in range(0,b):
#compute the IoU of the foreground
classes = target[i] > 0
Iand1 = -torch.sum(classes*torch.log(pred[i][0]+1e-6)/(torch.sum(classes)+1) + ~classes*torch.log(1-pred[i][0]+1e-6)/(torch.sum(~classes)+1))
# print('class{}: {}'.format(j,Iand1))
IoU = IoU + Iand1
return IoU/b
def _st_loss(pred, target, thresh):
# Smooth Truncated Loss
b = pred.shape[0]
ST = 0.0
for i in range(0,b):
#compute the IoU of the foreground
w = target[i] > 0
pt = w*pred[i][0]
certain = pt > thresh
Iand1 = -(torch.sum( certain*torch.log(pt+1e-6) + ~certain*(np.log(thresh) - (1-(pt/thresh)**2)/2) ))
ST = ST + Iand1/512/512
return ST/b
class CIA(torch.nn.Module):
def __init__(self, size_average = True):
super(CIA, self).__init__()
self.size_average = size_average
def forward(self, pred, target, thresh, lw):
# print(_cia_loss(pred, target), _st_loss(pred, target, thresh))
return _cia_loss(pred, target) + lw * _st_loss(pred, target, thresh)
def cia_loss(pred, label, thr=0.2, lamb=0.5):
Cia_loss = CIA(size_average=True)
cia_out = Cia_loss(pred, label, thr, lamb)
return cia_out
# IOU loss
def _iou(pred, target, size_average = True):
b = pred.shape[0]
IoU = 0.0
for i in range(0,b):
#compute the IoU of the foreground
w = target[i] == 0
Iand1 = torch.sum(target[i]*pred[i])
Ior1 = torch.sum(target[i]) + torch.sum(pred[i])-Iand1
IoU1 = Iand1/Ior1
#IoU loss is (1-IoU1)
IoU = IoU + (1-IoU1)
return IoU/b
class IOU(torch.nn.Module):
def __init__(self, size_average = True):
super(IOU, self).__init__()
self.size_average = size_average
def forward(self, pred, target):
return _iou(pred, target, self.size_average)
def my_loss(pred,label):
iou_loss = IOU(size_average=True)
iou_out = iou_loss(pred, label)
# print("iou_loss:", iou_out.data.cpu().numpy())
return iou_out
# accuracy
def my_acc(pred,target):
# IOU
temp = pred > 0.5
temp = temp.long()
b = pred.shape[0]
IoU = 0.0
for i in range(0,b):
#compute the IoU of the foreground
Iand1 = torch.sum(target[i]*temp[i])
Ior1 = torch.sum(target[i]) + torch.sum(temp[i])-Iand1
IoU1 = Iand1.float()/Ior1.float()
IoU = IoU + IoU1
IoU = IoU/b
return IoU.detach().cpu().numpy()
def bd_acc(pred,target):
# boundary accuracy
b = pred.shape[0]
IoU = 0.0
for i in range(0,b):
temp = pred[i] > 0.5
label = torch.zeros(2,512,512)
label[0] = target[i] > 0
label[1] = target[i] > 1
# temp = temp.long()
# compute the IoU of the foreground\
label = label.cuda()
Iand1 = torch.sum(label*temp)
Ior1 = torch.sum(label) + torch.sum(temp)-Iand1
IoU1 = Iand1.float()/Ior1.float()
IoU = IoU + IoU1
IoU = IoU/b
return IoU.detach().cpu().numpy()
def dice_acc(pred,target):
# dice coefficient
temp = pred > 0.5
temp = temp.long()
IoU = 0.0
Iand1 = 2*torch.sum(target[0]*temp)
Ior1 = torch.sum(target[0]) + torch.sum(temp)
if Ior1 == 0:
IoU = 0
return IoU
else:
IoU1 = Iand1.float()/Ior1.float()
IoU = IoU + IoU1
return IoU.detach().cpu().numpy()
def pixel_acc(pred,target):
# pixel accuracy
temp = pred > 0.5
temp = temp.long()
b = pred.shape[0]
precision,recall,F1 = 0.0,0.0,0.0
for i in range(0,b):
TP = torch.sum(target[i]*temp[i])
FP = torch.sum((1-target[i])*temp[i])
FN = torch.sum(target[i]*(1-temp[i]))
precision = precision + TP.float() / (TP.float()+FP.float())
recall = recall + TP.float() / (FN.float()+TP.float())
F1 = F1 + 2*(precision*recall)/(recall+precision)
return precision.detach().cpu().numpy(), recall.detach().cpu().numpy(), F1.detach().cpu().numpy()
#2.22添加数据集
class XuDataset(Dataset):
def __init__(self, img_dir):
self.img_dir = img_dir
self.data_names = listdir(os.path.join(self.img_dir, 'data'))
self.label_names = listdir(os.path.join(self.img_dir, 'label'))
self.bound_names = listdir(os.path.join(self.img_dir, 'bound'))
def __len__(self):
return len(listdir(os.path.join(self.img_dir, 'data')))
def __getitem__(self, idx):
img_name = self.data_names[idx]
img_path = os.path.join(self.img_dir, 'data', img_name)
# print(img_name)
# image = read_image(img_path)
# image = image.float()
# image = image / image.max()
trans = transforms.Compose([transforms.ToTensor()])
image = Image.open(img_path).convert('RGB')
image = trans(image)
# print(image.shape)
# print(torch.max(image))
mask_name = self.label_names[idx]
mask_path = os.path.join(self.img_dir, 'label', mask_name)
# print(mask_name)
# label = read_image(mask_path)
# label = label.float()
# label = label / 255.0
label = Image.open(mask_path).convert('1')
label = trans(label)
# print(label.shape)
# print(torch.max(label))
bound_name = self.bound_names[idx]
bound_path = os.path.join(self.img_dir, 'bound', bound_name)
# print(bound_name)
# bound = read_image(bound_path)
# bound = bound.float()
# bound = bound / 255.0 # 学习label的处理方式先试一下
bound = Image.open(bound_path).convert('1')
bound = trans(bound)
# print(bound.shape)
# print(torch.max(bound))
# return image[0].unsqueeze(0), label[0].unsqueeze(0), bound[0].unsqueeze(0)
# return image.unsqueeze(0), label.unsqueeze(0), bound.unsqueeze(0)
return image, label, bound
class ciaData(Dataset): #继承Dataset
def __init__(self, root_dir, transform=None): #__init__是初始化该类的一些基础参数
self.root_dir = root_dir #文件目录
self.transform = transform #变换
self.images = os.listdir(os.path.join(self.root_dir, 'data'))#目录里的所有文件
def __len__(self):#返回整个数据集的大小
return len(self.images)
def __getitem__(self,index):#根据索引index返回dataset[index]
image_index = self.images[index]#根据索引index获取该图片
img_path = os.path.join(self.root_dir, 'data', image_index)#获取索引为index的图片的路径名
lab_path = os.path.join(self.root_dir, 'label', image_index)
bou_path = os.path.join(self.root_dir, 'bound', image_index)
img = Image.open(img_path).convert('RGB')# 读取该图片
label = Image.open(lab_path).convert('1')
bound = Image.open(bou_path).convert('1')
# if (self.transform == None):
# T=transforms.Compose([
# transforms.Resize((512, 512)),
# transforms.ToTensor()
# ])
# else:
T1 = transforms.Compose([
# transforms.Resize((512, 512)),
transforms.ToTensor()
])
T = self.transform
# 如果没有设置,就有个T的默认设置,否则就用给定的
img = T(img)#对样本进行变换
label = T1(label)
bound = T1(bound)
return img,label,bound #返回该样本 | 14,754 | 33.717647 | 149 | py |
RandStainNA | RandStainNA-master/segmentation/CA25net.py | import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from torchvision import models
from torch.utils.data import Dataset,DataLoader,TensorDataset
from torch.autograd import Variable
from os import listdir
from torchvision.io import read_image
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super(unetConv2, self).__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
if is_batchnorm:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
else:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
x = inputs
for i in range(1, self.n + 1):
conv = getattr(self, 'conv%d' % i)
x = conv(x)
return x
class unetUp_origin(nn.Module):
def __init__(self, in_size, out_size, is_deconv=False, n_concat=2):
super(unetUp_origin, self).__init__()
# self.conv = unetConv2(out_size*2, out_size, False)
if is_deconv:
self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=4, stride=2, padding=1)
else:
self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs0, *input):
outputs0 = self.up(inputs0)
for i in range(len(input)):
outputs0 = torch.cat([outputs0, input[i]], 1)
return self.conv(outputs0)
class info_agg(nn.Module):
def __init__(self, in_size, out_size=256):
# information aggregation module
# 1 is mask, 2 is contour
super(info_agg,self).__init__()
self.convshare = unetConv2(in_size*3, out_size, True)
self.conv11 = unetConv2(out_size, out_size, False)
self.conv12 = unetConv2(out_size, out_size, False)
self.conv13 = unetConv2(out_size, out_size, False)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, input1, input2, input3):
fshare = self.convshare(torch.cat([input1, input2, input3], 1))
return self.conv11(fshare), self.conv12(fshare), self.conv13(fshare)
from torch.nn import init
def weights_init_normal(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
#print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
class Cia(nn.Module):
def __init__(self, in_channels=1, n_classes=1, feature_scale=4, is_deconv=True, is_batchnorm=True,is_ds=False):
super(Cia, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.is_ds = is_ds
self.feature_scale = feature_scale
filters = [32, 64, 128, 256, 512]
info_channel = 256
#filters = [64, 128, 256, 512, 1024]
# filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.conv00 = unetConv2(self.in_channels, filters[0], self.is_batchnorm)
self.maxpool0 = nn.MaxPool2d(kernel_size=2)
self.conv10 = unetConv2(filters[0], filters[1], self.is_batchnorm)
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
self.conv20 = unetConv2(filters[1], filters[2], self.is_batchnorm)
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
self.conv30 = unetConv2(filters[2], filters[3], self.is_batchnorm)
self.maxpool3 = nn.MaxPool2d(kernel_size=2)
self.conv40 = unetConv2(filters[3], filters[4], self.is_batchnorm)
# upsampling
# mask path
self.up_concat01 = unetUp_origin(filters[1], filters[0], self.is_deconv)
self.up_concat11 = unetUp_origin(filters[2], filters[1], self.is_deconv)
self.up_concat21 = unetUp_origin(filters[3], filters[2], self.is_deconv)
self.up_concat31 = unetUp_origin(filters[4], filters[3], self.is_deconv)
self.up_concat02 = unetUp_origin(filters[1], filters[0], self.is_deconv, 3)
self.up_concat12 = unetUp_origin(filters[2], filters[1], self.is_deconv, 3)
self.up_concat22 = unetUp_origin(filters[3], filters[2], self.is_deconv, 3)
self.up_concat03 = unetUp_origin(filters[1], filters[0], self.is_deconv, 4)
self.up_concat13 = unetUp_origin(filters[2], filters[1], self.is_deconv, 4)
self.up_concat04 = unetUp_origin(filters[1], filters[0], self.is_deconv, 5)
# boundary path
self.upb_concat01 = unetUp_origin(filters[1], filters[0], self.is_deconv)
self.upb_concat11 = unetUp_origin(filters[2], filters[1], self.is_deconv)
self.upb_concat21 = unetUp_origin(filters[3], filters[2], self.is_deconv)
self.upb_concat31 = unetUp_origin(filters[4], filters[3], self.is_deconv)
self.upb_concat02 = unetUp_origin(filters[1], filters[0], self.is_deconv, 3)
self.upb_concat12 = unetUp_origin(filters[2], filters[1], self.is_deconv, 3)
self.upb_concat22 = unetUp_origin(filters[3], filters[2], self.is_deconv, 3)
self.upb_concat03 = unetUp_origin(filters[1], filters[0], self.is_deconv, 4)
self.upb_concat13 = unetUp_origin(filters[2], filters[1], self.is_deconv, 4)
self.upb_concat04 = unetUp_origin(filters[1], filters[0], self.is_deconv, 5)
# three class path
self.upt_concat31 = unetUp_origin(filters[4], filters[3], self.is_deconv)
self.upt_concat22 = unetUp_origin(filters[3], filters[2], self.is_deconv, 3)
self.upt_concat13 = unetUp_origin(filters[2], filters[1], self.is_deconv, 4)
self.upt_concat04 = unetUp_origin(filters[1], filters[0], self.is_deconv, 5)
# final conv (without any concat)
self.final_4 = nn.Conv2d(filters[0], n_classes, 1)
self.finalb_1 = nn.Conv2d(filters[0], n_classes, 1)
self.finalb_2 = nn.Conv2d(filters[0], n_classes, 1)
self.finalb_3 = nn.Conv2d(filters[0], n_classes, 1)
self.finalb_4 = nn.Conv2d(filters[0], n_classes, 1)
self.finalt_4 = nn.Conv2d(filters[0], n_classes, 1)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
init_weights(m)
elif isinstance(m, nn.BatchNorm2d):
init_weights(m)
def forward(self, inputs):
# column : 0
X_00 = self.conv00(inputs)
maxpool0 = self.maxpool0(X_00)
X_10 = self.conv10(maxpool0)
maxpool1 = self.maxpool1(X_10)
X_20 = self.conv20(maxpool1)
maxpool2 = self.maxpool2(X_20)
X_30 = self.conv30(maxpool2)
maxpool3 = self.maxpool3(X_30)
X_40 = self.conv40(maxpool3)
# Semantic segmentation path
# column : 1
X_01 = self.up_concat01(X_10, X_00)
X_11 = self.up_concat11(X_20, X_10)
X_21 = self.up_concat21(X_30, X_20)
X_31 = self.up_concat31(X_40, X_30)
Y_01 = self.upb_concat01(X_10, X_00)
Y_11 = self.upb_concat11(X_20, X_10)
Y_21 = self.upb_concat21(X_30, X_20)
Y_31 = self.upb_concat31(X_40, X_30)
Z_31 = self.upt_concat31(X_40, X_30)
# column : 2
X_02 = self.up_concat02(X_11, X_00, X_01)
X_12 = self.up_concat12(X_21, X_10, X_11)
X_22 = self.up_concat22(X_31, X_20, X_21)
Y_02 = self.upb_concat02(Y_11, X_00, Y_01)
Y_12 = self.upb_concat12(Y_21, X_10, Y_11)
Y_22 = self.upb_concat22(Y_31, X_20, Y_21)
Z_22 = self.upt_concat22(Z_31, X_20, Y_21)
# column : 3
X_03 = self.up_concat03(X_12, X_00, X_01, X_02)
X_13 = self.up_concat13(X_22, X_10, X_11, X_12)
Y_03 = self.upb_concat03(Y_12, X_00, Y_01, Y_02)
Y_13 = self.upb_concat13(Y_22, X_10, Y_11, Y_12)
Z_13 = self.upt_concat13(Z_22, X_10, Y_11, Y_12)
# column : 4
X_04 = self.up_concat04(X_13, X_00, X_01, X_02, X_03)
Y_04 = self.upb_concat04(Y_13, X_00, Y_01, Y_02, Y_03)
Z_04 = self.upt_concat04(Z_13, X_00, Y_01, Y_02, Y_03)
# final layer
final_m = self.final_4(X_04)
final_b1 = self.finalb_1(Y_01)
final_b2 = self.finalb_2(Y_02)
final_b3 = self.finalb_3(Y_03)
final_b4 = self.finalb_4(Y_04)
final_t = self.finalt_4(Z_04)
if self.is_ds:
return torch.sigmoid(final_m), torch.sigmoid(torch.cat([final_b4,final_t],1))
else:
return torch.sigmoid(final_m), torch.sigmoid(torch.cat([(0.5*final_b1+0.75*final_b2+1.25*final_b3+1.5*final_b4)/4,final_t],1))
# loss function
# boundary loss
def _bd_loss(pred, target):
b = pred.shape[0]
IoU = 0.0
for i in range(0,b):
#compute the IoU of the foreground
Iand1 = -(torch.sum(target[i]*torch.log(pred[i]+1e-6) + (1-target[i])*torch.log(1-pred[i]+1e-6)))
IoU = IoU + Iand1/512/512
return IoU/b
class BD(torch.nn.Module):
def __init__(self, size_average = True):
super(BD, self).__init__()
self.size_average = size_average
def forward(self, pred, target):
return _bd_loss(pred, target)
def bd_loss(pred,label):
loss = BD(size_average=True)
bd_out = loss(pred, label)
return bd_out
# CIA loss
def _cia_loss(pred, target, w):
b = pred.shape[0]
IoU = 0.0
for i in range(0,b):
#compute the IoU of the foreground
classes = target[i] > 0
Iand1 = -torch.sum(classes*torch.log(pred[i][0]+1e-6)/(torch.sum(classes)+1) + ~classes*torch.log(1-pred[i][0]+1e-6)/(torch.sum(~classes)+1))
# print('class{}: {}'.format(j,Iand1))
IoU = IoU + (1-w)*Iand1
classes = target[i] > 1
Iand1 = -torch.sum(classes*torch.log(pred[i][1]+1e-6)/(torch.sum(classes)+1) + ~classes*torch.log(1-pred[i][1]+1e-6)/(torch.sum(~classes)+1))
# print('class2: {}'.format(Iand1))
IoU = IoU + w*Iand1
return IoU/b
def _st_loss(pred, target, thresh):
# Smooth Truncated Loss
b = pred.shape[0]
ST = 0.0
for i in range(0,b):
#compute the IoU of the foreground
w = target[i] > 1
pt = w * pred[i][1]
w = target[i] > 0
pt = pt + w*pred[i][0]
certain = pt > thresh
Iand1 = -(torch.sum( certain*torch.log(pt+1e-6) + ~certain*(np.log(thresh) - (1-(pt/thresh)**2)/2) ))
ST = ST + Iand1/512/512
return ST/b
class CIA(torch.nn.Module):
def __init__(self, size_average = True):
super(CIA, self).__init__()
self.size_average = size_average
def forward(self, pred, target, w, thresh, lw):
# print(_cia_loss(pred, target), _st_loss(pred, target, thresh))
return _cia_loss(pred, target, w) + lw * _st_loss(pred, target, thresh)
def cia_loss(pred, label, w, thr=0.2, lamb=0.5):
Cia_loss = CIA(size_average=True)
cia_out = Cia_loss(pred, label, w, thr, lamb)
return cia_out
# IOU loss
def _iou(pred, target, size_average = True):
b = pred.shape[0]
IoU = 0.0
for i in range(0,b):
#compute the IoU of the foreground
w = target[i] == 0
Iand1 = torch.sum(target[i]*pred[i])
Ior1 = torch.sum(target[i]) + torch.sum(pred[i])-Iand1
IoU1 = Iand1/Ior1
#IoU loss is (1-IoU1)
IoU = IoU + (1-IoU1)
return IoU/b
class IOU(torch.nn.Module):
def __init__(self, size_average = True):
super(IOU, self).__init__()
self.size_average = size_average
def forward(self, pred, target):
return _iou(pred, target, self.size_average)
def my_loss(pred,label):
iou_loss = IOU(size_average=True)
iou_out = iou_loss(pred, label)
# print("iou_loss:", iou_out.data.cpu().numpy())
return iou_out
# accuracy
def dice_acc(pred,target):
# dice coefficient
temp = pred > 0.5
temp = temp.long()
IoU = 0.0
Iand1 = 2*torch.sum(target[0]*temp)
Ior1 = torch.sum(target[0]) + torch.sum(temp)
if Ior1 == 0:
IoU = 0
return IoU
else:
IoU1 = Iand1.float()/Ior1.float()
IoU = IoU + IoU1
return IoU.detach().cpu().numpy()
# Dataset
# class XuDataset(Dataset):
# def __init__(self, img_dir):
# self.img_dir = img_dir
#
# def __len__(self):
# return len(listdir(os.path.join(self.img_dir, 'data')))
#
# def __getitem__(self, idx):
# img_path = os.path.join(self.img_dir, 'data', 'i'+str(idx+1)+'.png')
# image = read_image(img_path)
# image = image.float()
# image = image / image.max()
#
# mask_path = os.path.join(self.img_dir, 'label', 'm'+str(idx+1)+'.png')
# label = read_image(mask_path)
#
# bound_path = os.path.join(self.img_dir, 'bound', 'b'+str(idx+1)+'.png')
# bound = read_image(bound_path)
# return image[0].unsqueeze(0), label[0].unsqueeze(0), bound[0].unsqueeze(0)
class XuDataset(Dataset):
def __init__(self, img_dir):
self.img_dir = img_dir
self.data_names = listdir(os.path.join(self.img_dir, 'data'))
self.label_names = listdir(os.path.join(self.img_dir, 'label'))
self.bound_names = listdir(os.path.join(self.img_dir, 'bound'))
def __len__(self):
return len(listdir(os.path.join(self.img_dir, 'data')))
def __getitem__(self, idx):
img_name = self.data_names[idx]
img_path = os.path.join(self.img_dir, 'data', img_name)
image = read_image(img_path)
image = image.float()
image = image / image.max()
mask_name = self.label_names[idx]
mask_path = os.path.join(self.img_dir, 'label', mask_name)
label = read_image(mask_path)
label = label.float()
label = label / 255.0
bound_name = self.bound_names[idx]
bound_path = os.path.join(self.img_dir, 'bound', bound_name)
bound = read_image(bound_path)
return image[0].unsqueeze(0), label[0].unsqueeze(0), bound[0].unsqueeze(0)
class TestDataset(Dataset):
def __init__(self, img_dir):
self.img_dir = img_dir
self.data_names = listdir(os.path.join(self.img_dir))
def __len__(self):
return len(self.data_names)
def __getitem__(self, idx):
img_name = self.data_names[idx]
img_path = os.path.join(self.img_dir, img_name)
image = read_image(img_path)
image = image.float()
image = image / image.max()
return image[0].unsqueeze(0),img_name
| 16,634 | 34.697425 | 149 | py |
RandStainNA | RandStainNA-master/segmentation/colornorm/Reinhard_quick.py | import cv2
import numpy as np
import time
import copy
from skimage import color, io
from PIL import Image
# 到时候可以尝试numpy优化
def quick_loop(image, image_avg, image_std, temp_avg, temp_std, isHed=False):
# for k in range(3):
# image_new[:,:,k] = (image[:,:,k] - image_avg[k]) * (temp_std[k] / (image_std[k]) )+ temp_avg[k]
# print(type(image),image.shape)
# print(image_avg)
image = (image - np.array(image_avg))*(np.array(temp_std)/np.array(image_std))+np.array(temp_avg)
if isHed : #1.10添加,针对hed进行特殊操作
pass
else:
image = np.clip(image, 0, 255).astype(np.uint8) #这边的问题
return image
# @numba.jit(nopython=True)
# 原始版本,3重for循环
def for_loop(image, height, width, channel, image_avg, image_std, temp_avg, temp_std):
for i in range(0, height):
for j in range(0, width):
for k in range(0, channel):
t = image[i, j, k]
if abs(image_std[k]) < 0.0001:
image_std[k] = 0.0001 # 下面有255保护
t = (t - image_avg[k]) * (temp_std[k] / image_std[k]) + temp_avg[k]
t = 0 if t < 0 else t
t = 255 if t > 255 else t
image[i, j, k] = t
# cv2.imwrite('test1.png', image)
return image.astype(np.uint8)
# @torch.no_grad()
def getavgstd(image):
avg = []
std = []
image_avg_l = np.mean(image[:, :, 0])
image_std_l = np.std(image[:, :, 0])
image_avg_a = np.mean(image[:, :, 1])
image_std_a = np.std(image[:, :, 1])
image_avg_b = np.mean(image[:, :, 2])
image_std_b = np.std(image[:, :, 2])
avg.append(image_avg_l)
avg.append(image_avg_a)
avg.append(image_avg_b)
std.append(image_std_l)
std.append(image_std_a)
std.append(image_std_b)
return (avg, std)
# 初次实验模板数值
# target_avg: [171.03409996811226, 151.29910714285714, 109.92771444515306]
# target_std: [37.22305651345217, 9.072264487990362, 8.478056840434128]
def reinhard_cn(image_path, temp_path, save_path, isDebug=False, color_space=None):
isHed = False
image = cv2.imread(image_path)
if isDebug:
cv2.imwrite('source.png',image)
template = cv2.imread(temp_path) ### template images
if isDebug:
cv2.imwrite('template.png',template)
if color_space == 'LAB':
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
template = cv2.cvtColor(template, cv2.COLOR_BGR2LAB)
# cv2.imwrite('lab.png',image)
elif color_space == 'HED':
isHed = True
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #color.rgb2hed需要rgb的ndarray作为输入
template = cv2.cvtColor(template,cv2.COLOR_BGR2RGB)
# image = np.array(Image.open(image_path))
# template = np.array(Image.open(temp_path))
image = color.rgb2hed(image) #归一化
template = color.rgb2hed(template) #归一化,所以下边要注意
elif color_space == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
template = cv2.cvtColor(template, cv2.COLOR_BGR2HSV)
elif color_space == 'GRAY': #如果是灰色,下面都不用处理了
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imwrite(save_path,image)
return
image_avg, image_std = getavgstd(image)
template_avg, template_std = getavgstd(template)
# template_avg, template_std = [171.03409996811226, 151.29910714285714, 109.92771444515306], [37.22305651345217, 9.072264487990362, 8.478056840434128]
if isDebug: #正常
print("isDebug!!!")
print('source_avg: ', image_avg)
print('source_std: ', image_std)
print('target_avg: ', template_avg)
print('target_std: ', template_std)
# 注意,python函数传矩阵和list一样也会是内存空间相同,所以后面可能需要注意一下
# quick_loop快速颜色归一化操作
image = quick_loop(image, image_avg, image_std, template_avg, template_std, isHed=isHed)
# origin版颜色归一化操作
# height, width, channel = image.shape
# image_origin = for_loop(image, height, width, channel, image_avg, image_std, template_avg, template_std)
if color_space == 'LAB':
image = cv2.cvtColor(image, cv2.COLOR_LAB2BGR)
cv2.imwrite(save_path,image)
elif color_space == 'HED':
image = color.hed2rgb(image) # 转成0-1了,所以需要恢复一下
imin = image.min()
imax = image.max()
image = (255 * (image - imin) / (imax - imin)).astype('uint8')
image = Image.fromarray(image)
image.save(save_path)
elif color_space == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
cv2.imwrite(save_path,image)
if isDebug:
cv2.imwrite('results.png', image)
# 人工指定模板的归一化
def reinhard_cn_temp(image_path, temp_path, save_path, isDebug=False):
image = cv2.imread(image_path)
if isDebug:
cv2.imwrite('source.png',image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
cv2.imwrite('lab.png',image)
# template = cv2.imread(temp_path) ### template images
# if isDebug:
# cv2.imwrite('template.png',template)
# template = cv2.cvtColor(template, cv2.COLOR_BGR2LAB)
image_avg, image_std = getavgstd(image)
# template_avg, template_std = getavgstd(template)
template_avg, template_std = [159.685, 150.534, 116.994], [36.815, 8.078, 6.072] #random 3000的结果
if isDebug:
print("isDebug!!!")
print('source_avg: ', image_avg)
print('source_std: ', image_std)
print('target_avg: ', template_avg)
print('target_std: ', template_std)
# 注意,python函数传矩阵和list一样也会是内存空间相同,所以后面可能需要注意一下
# quick_loop快速颜色归一化操作
image = quick_loop(image, image_avg, image_std, template_avg, template_std)
# origin版颜色归一化操作
# height, width, channel = image.shape
# image_origin = for_loop(image, height, width, channel, image_avg, image_std, template_avg, template_std)
image = cv2.cvtColor(image, cv2.COLOR_LAB2BGR)
cv2.imwrite(save_path,image)
if isDebug:
cv2.imwrite('results.png', image)
if __name__ == '__main__':
# demo
image_path = r'/mnt/pycharm_project_colorNorm/output/colorNorm_effect/TUM-AIQIMVKD_source.png'
# image_path = r'/mnt/nine_class/train_use_2w/TUM/TUM-ADEMNHMK.png'
# image_path = './demo/other/TUM-TCGA-CVATFAAT.png'
# temp_path = r'/mnt/pycharm_project_colorNorm/output/colorNorm_effect/TUM-CEQTLTKV_target_1.png'
# temp_path = r'/mnt/pycharm_project_colorNorm/output/colorNorm_effect/TUM-CNPQPHGS_target2.png'
temp_path = './demo/other/TUM-AIQIMVKD_template.png'
# save_path = r'/mnt/pycharm_project_colorNorm/output/colorNorm_effect/source_target2_effect.png'
save_path = './save/other/norm_TUM-TCGA-CVATFAAT.png'
t1 = time.time()
reinhard_cn(image_path, temp_path, save_path, isDebug=True)
t2 = time.time()
print(t2-t1)
print('Color Norm finished!!!!') | 6,783 | 37.11236 | 154 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/scotland_carbon/src/grid_search.py | import numpy as np
import pandas as pd
import joblib
import itertools
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_validate, ShuffleSplit
from tqdm import tqdm
from sklearn.metrics import make_scorer
import xgboost
from utils import load_csv_to_pd, FEATURES_DICT
# Hyperparameter space we search through
brt_n_estimators = [x for x in range(100, 3000, 100)]
brt_lrs = [0.1, 0.05, 0.005]
brt_max_depths = [1, 2, 3, 5, 10]
xgb_max_depths = [1,3,6,8,10]
xgb_min_child_weight = [2,5,10]
xgb_subsamples = [0.4,0.6,0.8]
xgb_etas = [0.01, 0.02, 0.1]
rf_n_estimators_space = [x for x in range(100, 3000, 100)]
rf_max_features = [1/2,1/3,1/6,1,2,3,4,5,6,7,8,9,10]
# Location of data source
csv_file_path = r"Carbon-Trading-Verification\scotland_carbon\data\S1AIW_S2AL2A_DEM_IDX_SOCS_SG_L_INVEN_AGB_300m_processed.csv"
# Methods that create the corresponding machine learning model upon providing parameters
def create_BRT_model(n_estimators, lr, max_depth):
return GradientBoostingRegressor(
n_estimators=n_estimators,
learning_rate=lr,
max_depth=max_depth,
random_state=0,
loss='ls'
)
def create_XGB_model(min_child_weight, max_depth, subsample, eta):
return xgboost.XGBRegressor(
min_child_weight=min_child_weight,
n_estimators=451,
max_depth=max_depth,
eta=eta,
subsample=subsample
)
def create_RF_model(n_estimators, max_features):
return RandomForestRegressor(
n_estimators=n_estimators,
max_features=max_features
)
# Perform grid search
def grid_search(feature, pred, ml_model, isLog):
# Load data to pandas data frame
data_df = load_csv_to_pd(csv_file_path)
# Obtain the list of features corresponding to the specified model
features_list = FEATURES_DICT[feature]
# Get ground truth data name
ground_truth_col = 'AGB' if pred == 'agb' else 'OC'
# Create training data
X = data_df[features_list].values.astype(np.float32)
# Create ground truth data
y = np.log(data_df[ground_truth_col].values.astype(np.float32)) if isLog else data_df[ground_truth_col].values.astype(np.float32)
# Specify scoring metrics
scoring = {
'mean_squared_error': make_scorer(mean_squared_error),
'mean_absolute_error': make_scorer(mean_absolute_error),
'r2_score': make_scorer(r2_score)
}
# Cross validation splits
cv_splits = 5
cv = ShuffleSplit(n_splits=cv_splits, test_size=0.2)
# Cross validation results
results_df = pd.DataFrame(columns=['idx', 'lr', 'n_estimators', 'max_depth', 'min_child_weight', 'max_feature', 'fit_time', 'score_time', 'test_root_mean_squared_error', 'test_mean_absolute_error', 'test_r2_score'])
idx = 0
# Grid search for BRT models
if ml_model == 'brt':
# We iterate through every combination of hyperparameters
for lr, n_estimators, max_depth in tqdm(list(itertools.product(brt_lrs, brt_n_estimators, brt_max_depths))):
# Create BRT model with the corresponding hyperparameter
brt = create_BRT_model(n_estimators,lr,max_depth)
# Get validation scores through cross validation
scores = cross_validate(brt, X, y, scoring=scoring, cv=cv, n_jobs=-1)
# result_dict to store results for this iteration
result_dict = {}
# Store the hyperparamters used in this iteration
result_dict['idx'] = idx
result_dict['lr'] = lr
result_dict['n_estimators'] = n_estimators
result_dict['max_depth'] = max_depth
# Store the time used in this iteration
result_dict['fit_time'] = sum(scores['fit_time']) / cv_splits
result_dict['score_time'] = sum(scores['score_time']) / cv_splits
# Store the evaluation metrics for this set of hyperparameter
result_dict['test_root_mean_squared_error'] = np.sqrt(sum(scores['test_mean_squared_error']) / cv_splits)
result_dict['test_mean_absolute_error'] = sum(scores['test_mean_absolute_error']) / cv_splits
result_dict['test_r2_score'] = sum(scores['test_r2_score']) / cv_splits
# Append results for this iteration to all results
results_df = results_df.append(result_dict, ignore_index=True)
print('IDX: {:d} | lr {:.4f} | n_estimators {:.4f} | max_depth {:.4f} | RMSE {:.4f} | MAE {:.4f} | R2 {:.4f}'.format(idx, lr, n_estimators, max_depth, result_dict['test_root_mean_squared_error'], result_dict['test_mean_absolute_error'], result_dict['test_r2_score']))
idx += 1
# Grid search for XGBoost models
elif ml_model == 'xgb' :
# We iterate through every combination of hyperparameters
for mcw, md, ssp, eta in tqdm(list(itertools.product(xgb_min_child_weight, xgb_max_depths, xgb_subsamples, xgb_etas))):
# Create XGB model with the corresponding hyperparameter
model = create_XGB_model(mcw,md,ssp,eta)
# Get validation scores through cross validation
scores = cross_validate(model, X, y, scoring=scoring, cv=cv, n_jobs=-1)
# result_dict to store results for this iteration
result_dict = {}
# Store the hyperparamters used in this iteration
result_dict['min_child_weight'] = mcw
result_dict['eta'] = eta
result_dict['subsample'] = ssp
result_dict['max_depth'] = md
# Store the time used in this iteration
result_dict['fit_time'] = sum(scores['fit_time']) / cv_splits
result_dict['score_time'] = sum(scores['score_time']) / cv_splits
# Store the evaluation metrics for this set of hyperparameter
result_dict['test_root_mean_squared_error'] = np.sqrt(sum(scores['test_mean_squared_error']) / cv_splits)
result_dict['test_mean_absolute_error'] = sum(scores['test_mean_absolute_error']) / cv_splits
result_dict['test_r2_score'] = sum(scores['test_r2_score']) / cv_splits
# Append results for this iteration to all results
results_df = results_df.append(result_dict, ignore_index=True)
print('IDX: {:d} | lr {:.4f} | n_estimators {:.4f} | max_depth {:.4f} | RMSE {:.4f} | MAE {:.4f} | R2 {:.4f}'.format(mcw, eta, ssp, md, result_dict['test_root_mean_squared_error'], result_dict['test_mean_absolute_error'], result_dict['test_r2_score']))
idx += 1
# Grid search for RF models
elif ml_model == 'rf':
for n_estimators, max_fea in tqdm(list(itertools.product(rf_n_estimators_space, rf_max_features))):
# Create BRT model with the corresponding hyperparameter
rf = create_RF_model(n_estimators,max_fea)
# Get validation scores through cross validation
scores = cross_validate(rf, X, y, scoring=scoring, cv=cv, n_jobs=-1)
# result_dict to store results for this iteration
result_dict = {}
# Store the hyperparamters used in this iteration
result_dict['idx'] = idx
result_dict['n_estimators'] = n_estimators
result_dict['max_feature'] = max_fea
# Store the time used in this iteration
result_dict['fit_time'] = sum(scores['fit_time']) / cv_splits
result_dict['score_time'] = sum(scores['score_time']) / cv_splits
# Store the evaluation metrics for this set of hyperparameter
result_dict['test_root_mean_squared_error'] = np.sqrt(sum(scores['test_mean_squared_error']) / cv_splits)
result_dict['test_mean_absolute_error'] = sum(scores['test_mean_absolute_error']) / cv_splits
result_dict['test_r2_score'] = sum(scores['test_r2_score']) / cv_splits
# Append results for this iteration to all results
results_df = results_df.append(result_dict, ignore_index=True)
print('IDX: {:d} | n_estimators {:.4f} | max_feature {:.4f} | RMSE {:.4f} | MAE {:.4f} | R2 {:.4f}'.format(idx, n_estimators, max_fea, result_dict['test_root_mean_squared_error'], result_dict['test_mean_absolute_error'], result_dict['test_r2_score']))
idx += 1
# Save result to csv
results_df.to_csv(feature + '_' + pred + '_' + ml_model + '_gridsearch.csv', index=False)
grid_search('MODEL_A', 'soc', 'xgb', True)
grid_search('MODEL_D', 'agb', 'rf', False) | 8,751 | 47.353591 | 279 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/scotland_carbon/src/train.py | import numpy as np
import pandas as pd
import joblib
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, mean_absolute_percentage_error
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
import xgboost
from utils import load_csv_to_pd, FEATURES_DICT
# Specify input data location
csv_file_path = r"Carbon-Trading-Verification\scotland_carbon\data\S1AIW_S2AL2A_DEM_IDX_SOCS_SG_L_INVEN_AGB_300m_processed.csv"
# Dictionary storing mapping from machine learning name to machine learning model
MODEL_DICT = {
'brt': GradientBoostingRegressor(
n_estimators=1000,
learning_rate=0.01,
max_depth=5,
random_state=0,
loss='ls'),
'xgb': xgboost.XGBRegressor(
min_child_weight=2,
n_estimators=451,
max_depth=6,
eta=0.01,
subsample=0.6),
'rf': RandomForestRegressor(
n_estimators=300,
max_features=7
)
}
# Map target variable names to column names in input csv file
PRED_DICT = {
'agb': 'AGB',
'soc': 'OC',
'socd': 'SG_15_30'
}
def train(feature, pred, model, log, model_path):
# Get the ground truth label for our target variable
ground_truth = PRED_DICT[pred]
# load data
data_df = load_csv_to_pd(csv_file_path)
# Create train test data set
mask = np.random.rand(len(data_df)) < 0.8
train_df = data_df[mask]
test_df = data_df[~mask]
# Get training data from feature list
X_train = train_df[FEATURES_DICT[feature]].values.astype(np.float32)
# Get training ground truth data, log for SOC predictions, no log for AGB predictions
Y_train = np.log(train_df[ground_truth].values) if log else train_df[ground_truth].values.astype(np.float32)
# Get test data from feature list
X_test = test_df[FEATURES_DICT[feature]].values.astype(np.float32)
# Get testing ground truth data
Y_test = np.log(test_df[ground_truth].values) if log else test_df[ground_truth].values.astype(np.float32)
# Get machine learning model and train on xtrain and ytrain data
m = MODEL_DICT[model].fit(X_train,Y_train)
# Get Evaluation metrics - RMSE, MAE, R2
test_rmse = np.sqrt(mean_squared_error(Y_test, m.predict(X_test)))
test_mae = mean_absolute_error(Y_test, m.predict(X_test))
test_r2 = r2_score(Y_test, m.predict(X_test))
# Format evluation metrics into result string and print out
result = 'TEST RESULTS: | RMSE {:.4f} | MAE {:.4f} | R2 {:.4f}'.format(test_rmse, test_mae, test_r2)
print(result)
# Construct output model file name and output result file name
out_name = model_path + '/' + pred + '_' + model + '_' + feature + '_' + '.joblib.pkl'
out_result = model_path + '/' + pred + '_' + model + '_' + feature + '_' + 'result.txt'
# Store model
joblib.dump(m, out_name, compress=3)
# Store result
with open(out_result, "w") as text_file:
text_file.write(result)
# Train parameters:
# (feature model, target variable (soc,agb,socd) , machine learning technique (brt, rf, xgb), log output or not, output path)
train('MODEL_A', 'soc', 'rf', True, '.')
| 3,173 | 37.240964 | 127 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-AGB/tuning.py |
def xgb_maxdepth_childweight(params, dtrain, dtest):
params['eval_metric'] = "mae"
gridsearch_params = [
(max_depth, min_child_weight)
for max_depth in range(2,12,2)
for min_child_weight in range(1,6)
]
# Define initial best params and MAE
min_mae = float("Inf")
best_params = None
for max_depth, min_child_weight in gridsearch_params:
print("CV with max_depth={}, min_child_weight={}".format(
max_depth,
min_child_weight))
# Update our parameters
params['max_depth'] = max_depth
params['min_child_weight'] = min_child_weight
# Run CV
cv_results = xgb.cv(
params,
dtrain,
num_boost_round=451,
seed=42,
nfold=5,
metrics={'mae'},
early_stopping_rounds=10
)
# Update best MAE
mean_mae = cv_results['test-mae-mean'].min()
boost_rounds = cv_results['test-mae-mean'].argmin()
print("\tMAE {} for {} rounds".format(mean_mae, boost_rounds))
if mean_mae < min_mae:
min_mae = mean_mae
best_params = (max_depth,min_child_weight)
print("Best params: {}, {}, MAE: {}".format(best_params[0], best_params[1], min_mae))
return best_params[0], best_params[1], min_mae
# printed: Best params: 6, 2, MAE: 98.3690782
def eta_sub_tuning(params, dtrain, dtest):
gridsearch_params = [
(subsample, eta)
for subsample in [i/10. for i in range(6,11)]
for eta in [0.01, 0.05, 0.1, 0.2, 0.3]
]
# Define initial best params and MAE
min_mae = float("Inf")
best_params = None
for subsample, eta in gridsearch_params:
print("CV with subsample={}, eta={}".format(
subsample,
eta))
# Update our parameters
params['subsample'] = subsample
params['eta'] = eta
# Run CV
cv_results = xgb.cv(
params,
dtrain,
num_boost_round=451,
seed=42,
nfold=5,
metrics={'mae'},
early_stopping_rounds=10
)
# Update best MAE
mean_mae = cv_results['test-mae-mean'].min()
boost_rounds = cv_results['test-mae-mean'].argmin()
print("\tMAE {} for {} rounds".format(mean_mae, boost_rounds))
if mean_mae < min_mae:
min_mae = mean_mae
best_params = (subsample,eta)
print("Best params: {}, {}, MAE: {}".format(best_params[0], best_params[1], min_mae))
return best_params[0], best_params[1], min_mae
#Best params: 1.0, 0.6, MAE: 98.3690782
def gamma_tuning(params, dtrain, dtest):
# Define initial best params and MAE
min_mae = float("Inf")
best_params = None
for gamma in [i/10. for i in range(0,5)]:
print("CV with gamma={}".format(gamma))
# Update our parameters
params['gamma'] = gamma
# Run CV
cv_results = xgb.cv(
params,
dtrain,
num_boost_round=451,
seed=42,
nfold=5,
metrics={'mae'},
early_stopping_rounds=10
)
# Update best MAE
mean_mae = cv_results['test-mae-mean'].min()
boost_rounds = cv_results['test-mae-mean'].argmin()
print("\tMAE {} for {} rounds".format(mean_mae, boost_rounds))
if mean_mae < min_mae:
min_mae = mean_mae
best_params = gamma
print("Best params: {}, MAE: {}".format(best_params, min_mae))
return best_params, min_mae
#Best params: 1.0, 0.6, MAE: 98.3690782
def hyperparameter_tuning():
region = 'quick'
tag = '300'
settings = [False, False, '', 'onehot', 'hypertuning']
data, labels = import_region_grid(region, tag)
labels = labels.values.ravel()
x_train, x_test, y_train, y_test = data_transformation(data, labels, settings)
dtrain = xgb.DMatrix(x_train, label=y_train)
dtest = xgb.DMatrix(x_test, label=y_test)
params = {
# Parameters that we are going to tune.
'max_depth':6,
'min_child_weight': 2,
'eta':.01,
'subsample': 1,
'colsample_bytree': 0.6,
'gamma':0.2,
# Other parameters
'objective':'reg:squarederror',
}
eta_vals = [0.01, 0.05, 0.1, 0.2, 0.3]
params['eval_metric'] = "mae"
gridsearch_params = [
(max_depth, min_child_weight, subsample, colsample, gamma, eta)
for max_depth in range(2,12,2)
for min_child_weight in range(1,6)
for subsample in [i/10. for i in range(6,11)]
for colsample in [i/10. for i in range(6,11)]
for gamma in [i/10. for i in range(0,5)]
for eta in eta_vals
]
scores = ['mse', 'r2']
# Define initial best params and MAE
min_mae = float("Inf")
best_params = None
for max_depth, min_child_weight, subsample, colsample, gamma, eta in gridsearch_params:
print("CV with max_depth={}, min_child_weight={}, subsample={}, colsample={}, gamma={}, eta={}".format(
max_depth,
min_child_weight,
subsample,
colsample,
gamma,
eta))
# Update our parameters
params['max_depth'] = max_depth
params['min_child_weight'] = min_child_weight
params['subsample'] = subsample
params['colsample'] = colsample
params['gamma'] = gamma
params['eta'] = eta
start = time.process_time()
# Run CV
cv_results = xgb.cv(
params,
dtrain,
num_boost_round=451,
seed=42,
nfold=5,
metrics={'mae'},
early_stopping_rounds=10
)
# Update best MAE
mean_mae = cv_results['test-mae-mean'].min()
boost_rounds = cv_results['test-mae-mean'].argmin()
print("\tMAE {} for {} time taken {}\n".format(mean_mae, boost_rounds, time.process_time() - start))
if mean_mae < min_mae:
min_mae = mean_mae
best_params = params
print("Best params: max_depth={}, min_child_weight={}, subsample={}, colsample={}, gamma={}, eta={}, MAE: {}".format(best_params['max_depth'], best_params['min_child_weight'], best_params['subsample'], best_params['colsample'], best_params['gamma'], best_params['eta'], min_mae))
#Best params: 0.01 (eta), MAE: 98.04678799999999
#Best params: 0 (gamma), MAE: 98.3484922
def hyperparameter_tuning_rf(data, labels):
settings = [False, False, '', 'onehot', 'hypertuning']
x_train, x_test, y_train, y_test = data_transformation(data, labels, settings)
base_model = RandomForestRegressor(n_estimators = 10, random_state = 42)
base_model.fit(x_train, y_train)
base_accuracy = evaluate(base_model, x_test, y_test)
n_estimators=[int(i) for i in range(100, 3100, 100)]
max_features = [1/6, 1/3, 1/2]
param_grid = {
# Parameters that we are going to tune.
'n_estimators':n_estimators,
'max_features': max_features,
}
# Create a based model
rf = RandomForestRegressor()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
cv = 10, n_jobs = -1, verbose = 2)
grid_search.fit(x_train, y_train)
print(grid_search.best_params_)
best_grid = grid_search.best_estimator_
grid_accuracy = evaluate(best_grid, x_test, y_test)
print(f'Best grid {best_grid}')
print('Improvement of {:0.2f}%.'.format( 100 * (grid_accuracy - base_accuracy) / base_accuracy))
#Best params: 0.01 (eta), MAE: 98.04678799999999
#Best params: 0 (gamma), MAE: 98.3484922
def tune_xgboost(x_train, x_test, y_train, y_test):
params = {
# Parameters that we are going to tune.
'max_depth':10,
'min_child_weight': 3,
'gamma': 0.2,
'eta':.05,
'subsample': 0.8,
# Other parameters
'objective':'reg:squarederror',
}
dtrain = xgb.DMatrix(x_train, label=y_train)
dtest = xgb.DMatrix(x_test, label=y_test)
start = time.process_time()
max_depth,min_child_weight, mae = xgb_maxdepth_childweight(params, dtrain, dtest)
params['max_depth'] = max_depth
params['min_child_weight'] = min_child_weight
subsample, eta, mae = eta_sub_tuning(params, dtrain, dtest)
params['subsample'] = subsample
params['eta'] = eta
gamma, mae = gamma_tuning(params, dtrain, dtest)
params['gamma'] = gamma
print('Taken {} time'.format(time.process_time()-start))
return params, mae
def tune_all_xgboost():
print('Tuning xgboost for feature eng')
region = 'quick'
tag = '300'
settings = [True, True, 'pda', 'onehot', 'hypertuning']
data, labels = import_region_grid(region, tag)
labels = labels.values.ravel()
x_train, x_test, y_train, y_test = data_transformation(data, labels, settings)
params = {
# Parameters that we are going to tune.
'max_depth':8,
'min_child_weight': 4,
'gamma': 0.2, #gamma:0
'eta':.05, #eta: 0.01
'subsample': 0.8, #subsample: 0.6
# Other parameters
'objective':'reg:squarederror',
}
dtrain = xgb.DMatrix(x_train, label=y_train)
dtest = xgb.DMatrix(x_test, label=y_test)
subsample, eta, mae = eta_sub_tuning(params, dtrain, dtest)
params['subsample'] = subsample
params['eta'] = eta
gamma, mae = gamma_tuning(params, dtrain, dtest)
params['gamma'] = gamma
print("Best params COMBINATION: max_depth={}, min_child_weight={}, subsample={}, gamma={}, eta={}, MAE: {}".format(
params['max_depth'], params['min_child_weight'], params['subsample'], gamma, params['eta'], mae))
data, labels = import_sentinel_grid(region, tag)
labels = labels.values.ravel()
x_train, x_test, y_train, y_test = data_transformation(data, labels, settings)
params, mae = tune_xgboost(x_train, x_test, y_train, y_test)
print("Best params SENTINEL 1A: max_depth={}, min_child_weight={}, subsample={}, gamma={}, eta={}, MAE: {}".format(
params['max_depth'], params['min_child_weight'], params['subsample'], params['gamma'], params['eta'], mae))
data, labels = import_landsat_grid(region, tag)
labels = labels.values.ravel()
x_train, x_test, y_train, y_test = data_transformation(data, labels, settings)
params, mae = tune_xgboost(x_train, x_test, y_train, y_test)
print("Best params LANDSAT8: max_depth={}, min_child_weight={}, subsample={}, gamma={}, eta={}, MAE: {}".format(
params['max_depth'], params['min_child_weight'], params['subsample'], params['gamma'], params['eta'], mae))
| 10,956 | 35.523333 | 283 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-AGB/main.py | #import sklearn
import warnings
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
from sklearn.model_selection import cross_val_score, cross_validate, train_test_split, RepeatedKFold, GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_classification
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as PDA
from sklearn import metrics
from sklearn.linear_model import LinearRegression as LR
import xgboost as xgb
import time
import math
import os
from scipy.stats.mstats import winsorize
# Import tools needed for visualization
from sklearn.tree import export_graphviz
import pydot
import matplotlib.pyplot as plt
from os import path as op
import pickle
import pandas as pd
import geopandas as gpd
import shapely as shp
import tempfile
import rasterio
from rasterio.features import rasterize
from rasterstats.io import bounds_window
from rasterio.io import MemoryFile
import rasterstats
from rasterio.plot import show
import numpy as np
import pickle
from import_data import import_grid
from feature_engineering import log_transform, dim_reduction
from plots import draw_map, print_tree, plot_results, plot_scatter, var_importance, histogram, plot_topVar
from model_params import get_params, get_inven_abv
def evaluate(model, x_test, y_test):
# Predict on the test data
predictions = model.predict(x_test)
# Calculate the absolute errors
errors = abs(predictions - y_test)
mse = metrics.mean_squared_error(y_test, predictions)
print('RMSE:', round(math.sqrt(mse), 2), 'Mg C/ha.')
r2 = metrics.r2_score(y_test, predictions)
print('R2:', round(math.sqrt(r2), 2))
nonzero_y_test = np.where(y_test == 0, 1, y_test)
mape = np.mean(100 * (errors / nonzero_y_test))
print('Accuracy:', round(model.score(x_test, nonzero_y_test)*100, 2), '%.')
return predictions
def data_transformation(data, labels, settings):
if settings['cap'] != 0:
cap = 1 - settings['cap']
#Capping the outlier rows with Percentiles
upper_lim = np.quantile(labels, cap)
print(f'Upper limit {upper_lim}')
labels[labels > upper_lim] = upper_lim
if settings['log'] == True:
# Take log
data = log_transform(data)
#data = standardization(data)
x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.25, random_state=42)
# Dimensionality reduction
# Set number of used features
n_components = 7
x_train, x_test = dim_reduction(x_train, x_test, y_train, settings, n_components)
return x_train, x_test, y_train, y_test
def get_model(settings):
params = get_params(settings)
model_name = settings['model']
if model_name == 'random_forest':
model = RandomForestRegressor(n_estimators=params['n_estimators'],
max_features=params['max_features'])
elif model_name == 'xgboost':
model = xgb.XGBRegressor(min_child_weight=params['min_child_weight'],
n_estimators=451,
max_depth=params['max_depth'],
eta=params['eta'],
subsample=params['subsample'])
else:
model = LR()
return model
def train_model(data, labels, settings):
data = np.nan_to_num(data)
x_train, x_test, y_train, y_test = data_transformation(data, labels, settings)
model = get_model(settings)
# Convert data to numpy
if not isinstance(data,(list,pd.core.series.Series,np.ndarray)):
data = data.to_numpy()
if not isinstance(x_train,(list,pd.core.series.Series,np.ndarray)):
x_train = x_train.to_numpy()
if not isinstance(x_test,(list,pd.core.series.Series,np.ndarray)):
x_test = x_test.to_numpy()
# Define model evaluation method
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
# Evaluate model
scores = cross_val_score(model, data, labels, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
# Force scores to be positive
scores = np.absolute(scores)
mae = scores.mean()
std = scores.std()
data_sum = np.sum(scores)
print('Mean MAE: %.3f (%.3f)' % (scores.mean(), scores.std()))
# Train the model on training data
model.fit(x_train, y_train)
# Save model
filename = settings['filename']
model_save = filename + '.sav'
pickle.dump(model, open(model_save, 'wb'))
# Load model
#model = pickle.load(open(model_save, 'rb'))
predictions = evaluate(model, x_test, y_test)
# Plots predictions, labels and errors --> currently too many data points to visualise clearly
# plot_results(predictions, errors, y_test, filename)
# Scatter plots
plot_scatter(predictions, y_test, settings)
return model
def run(settings):
settings['filename'] = settings['filename']+'_'+settings['dataset']
data, labels = import_grid(settings)
# Convert labels to numpy array
labels = labels.values.ravel()
# Use log carbon
if settings['log_label']:
labels = np.log(labels+1+labels.min())
avg_carbon = labels.mean()
print('Average Carbon:', round(np.mean(avg_carbon), 2), 'Mg C/ha.')
print("CheckPoint 1")
file_dir = 'output/'+ settings['region']+'_region/' + settings['model'] +'/' + settings['filename']
print("CheckPoint 2")
settings['filename'] = file_dir
print("Entering Model")
print("Output_file directory: ", file_dir)
model = train_model(data, labels, settings)
print("Leaving Model")
feature_list = list(data.columns)
# Plot the top features used by model
var_importance(model, settings, feature_list)
top_var = 15
if settings['model'] != 'linear':
plot_topVar(model, settings, top_var)
if settings['dim'] == '':
# Calculate raster
prediction_raster(model, settings)
# Produce Carbon Map
draw_map(settings)
def prediction_raster(model, settings):
model_name = settings['model']
filename = settings['filename']
region = settings['region']
dataset = settings['dataset']
input = 'data/raster/combined.tif'
output = filename+'.tif'
# Load satellite raster files
sentinel_raster = rasterio.open('data/' + region + '_region/raster/' + region + '_sentinel.tif')
profile = sentinel_raster.profile
landsat_2 = rasterio.open('data/' + region + '_region/raster/' + region + '_landsat2.tif')
landsat_3 = rasterio.open('data/' + region + '_region/raster/' + region + '_landsat3.tif')
landsat_4 = rasterio.open('data/' + region + '_region/raster/' + region + '_landsat4.tif')
landsat_5 = rasterio.open('data/' + region + '_region/raster/' + region + '_landsat5.tif')
landsat_6 = rasterio.open('data/' + region + '_region/raster/' + region + '_landsat6.tif')
landsat_7 = rasterio.open('data/' + region + '_region/raster/' + region + '_landsat7.tif')
# Load inventory raster files
inventory = []
inven = get_inven_abv(settings)
for species in inven:
inventory.append(rasterio.open('data/' + region + '_region/raster/' + region + '_' + species + '.tif'))
# Create empty rasters for vegetation indices
ndvi = np.empty(landsat_4.shape, dtype = rasterio.float32)
# ndvi = (band5 - band4) / (band5 + band4)
savi = np.empty(landsat_4.shape, dtype = rasterio.float32)
# savi = (band5 - band4) / (band5 + band4 + 0.5)
evi = np.empty(landsat_4.shape, dtype = rasterio.float32)
# evi = 2.5 * ((band5 - band4) / (band5 + 6 * band4 - 7.5 * band2 +1))
arvi = np.empty(landsat_4.shape, dtype = rasterio.float32)
# arvi = (band5 - 2*band4 - band2) / (band5 + 2*band4 - band2)
if dataset == 'combined':
bands = 8+len(inventory)
elif dataset == 'landsat':
bands = 6+len(inventory)
else:
bands = 2+len(inventory)
# Update raster file with number of bands used (depends on data input)
profile.update({'count': bands, 'dtype': rasterio.float32})
# Keep track of the band placement
i = 1
# Open model input file to merge feature rasters
with rasterio.open(input, 'w+', **profile) as inp:
if dataset != 'landsat':
inp.write(sentinel_raster.read(1),i)
inp.write(sentinel_raster.read(2),i + 1)
i += 2
if dataset != 'sentinel':
inp.write(landsat_2.read(1), i)
inp.write(landsat_3.read(1), i+1)
inp.write(landsat_4.read(1), i+2)
inp.write(landsat_5.read(1), i+3)
inp.write(landsat_6.read(1), i+4)
inp.write(landsat_7.read(1), i+5)
i += 6
for species in inventory:
inp.write(species.read(1), i)
i += 1
profile.update({'count': 1})
with rasterio.open(output, 'w', **profile) as dst:
# Perform prediction on each small image patch to minimize required memory
window_size = 6
for i in range((inp.shape[0] // window_size) + 1):
for j in range((inp.shape[1] // window_size) + 1):
# define the pixels to read (and write) with rasterio windows reading
window = rasterio.windows.Window(
j * window_size,
i * window_size,
# don't read past the image bounds
min(window_size, inp.shape[1] - j * window_size),
min(window_size, inp.shape[0] - i * window_size))
# read the image into the proper format
data = inp.read(window=window)
img_shift = np.moveaxis(data, 0, 2)
img_flatten = img_shift.reshape(-1, img_shift.shape[-1])
img_vegs = img_flatten
# Add vegetation indices if there are Landsat bands
if dataset != 'sentinel':
band2 = data[3].astype(float)
band2 = np.nan_to_num(band2.reshape(band2.shape[0]*band2.shape[1],1))
band4 = data[5].astype(float)
band4 = np.nan_to_num(band4.reshape(band4.shape[0]*band4.shape[1],1))
band5 = data[6].astype(float)
band5 = np.nan_to_num(band5.reshape(band5.shape[0]*band5.shape[1],1))
ndvi = np.where((band5 + band4) == 0., 0., (band5 - band4) / (band5 + band4))
savi = np.where((band5 + band4 + 0.5) == 0., 0.,(band5 - band4) / (band5 + band4 + 0.5))
evi = np.where((band5 + 6 * band4 - 7.5 * band2 + 1) == 0., 0., 2.5 * ((band5 - band4) / (band5 + 6 * band4 - 7.5 * band2 + 1)))
arvi = np.where((band5 + 2*band4 - band2) == 0., 0., (band5 - 2*band4 - band2) / (band5 + 2*band4 - band2))
img_vegs = np.concatenate([img_flatten, ndvi, savi, evi, arvi], axis=1)
# Remove no data values, store the indices for later use
m = np.ma.masked_invalid(img_vegs)
pred_input = img_vegs.reshape(-1, img_vegs.shape[-1])
# Skip empty inputs
if not len(pred_input):
continue
pred_out = model.predict(pred_input)
# Revert predictions if log labels were used
if settings['log_label']:
pred_out = np.exp(pred_out)
# Add the prediction back to the valid pixels (using only the first band of the mask to decide on validity)
# Makes the assumption that all bands have identical no-data value arrangements
output = np.zeros(img_flatten.shape[0])
output[~m.mask[:, 0]] = pred_out.flatten()
# Resize to the original image dimensions
output = output.reshape(*img_shift.shape[:-1])
# Create the final mask
mask = (~m.mask[:, 0]).reshape(*img_shift.shape[:-1])
# Write to the final files
dst.write(output.astype(rasterio.float32), 1, window=window)
dst.write_mask(mask, window=window)
inp.close()
def main():
# settings = {
# 'dataset': 'combined',
# 'region': 'quick',
# 'model': 'random_forest',
# 'inven': True,
# 'filename': 'tuned_noFEng',
# 'cap': 0,
# 'log': False,
# 'dim': '',
# 'log_label': False,
# }
settings = {
'dataset': 'combined',
'region': 'quick',
'model': 'linear',
'inven': True,
'filename': 'tuned_noFEng',
'cap': 0,
'log': False,
'dim': '',
'log_label': False,
}
run(settings)
if __name__ == "__main__":
main()
| 13,057 | 35.887006 | 152 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-AGB/model_params.py | EXPANDED_SPECIES_TRAIN = ['Category_Non woodland', 'Category_Woodland',
'Species_Assumed woodland', 'Species_Bare area', 'Species_Broadleaved',
'Species_Conifer', 'Species_Failed', 'Species_Felled',
'Species_Grassland', 'Species_Ground prep', 'Species_Low density',
'Species_Mixed mainly broadleaved', 'Species_Mixed mainly conifer',
'Species_Open water', 'Species_Other vegetation', 'Species_Quarry',
'Species_Road', 'Species_Shrub', 'Species_Urban', 'Species_Windblow',
'Species_Young trees']
EXPANDED_SPECIES_QUICK = ['Category_Non woodland',
'Category_Woodland', 'Species_Assumed woodland', 'Species_Broadleaved',
'Species_Conifer', 'Species_Felled', 'Species_Grassland', 'Species_Other vegetation',
'Species_Young trees']
COMBINED_COLUMNS = ['Sentinel VH', 'Sentinel VV', 'Landsat 2', 'Landsat 3',
'Landsat 4', 'Landsat 5', 'Landsat 6', 'Landsat 7', 'Category',
'Species', 'NDVI', 'SAVI', 'EVI', 'ARVI']
LANDSAT_COLUMNS= ['Landsat 2', 'Landsat 3', 'Landsat 4', 'Landsat 5',
'Landsat 6', 'Landsat 7', 'Category', 'Species', 'NDVI', 'SAVI', 'EVI', 'ARVI']
SENTINEL_COLUMNS = ['Sentinel VH', 'Sentinel VV']
sentinel_rf = ['Sentinel VH', 'Sentinel VV', 'Category Woodland',
'Species Conifer', 'Species Felled', 'Species Assumed Woodland', 'Species Broadleaved',
'Species Young Trees', 'Category Non Woodland', 'Species Agriculture', 'Species Bare Area',
'Species Failed', 'Species Grassland', 'Species Ground Prep', 'Species Low Density']
sentinel_xgb = ['Category Woodland', 'Species Felled', 'Species Conifer', 'Category Non Woodland',
'Species Assumed Woodland', 'Species Windblow', 'Species Broadleaved', 'Sentinel VH', 'Sentinel VV',
'Species Grassland', 'Species Ground Prep', 'Species Mixed Mainly Broadleaved', 'Species Bare Area',
'Species Failed', 'Species Low Density']
INVEN = ['nonwood', 'wood', 'as', 'br', 'co', 'fe', 'gr', 'ot', 'yo']
INVEN_TRAIN = ['nonwood', 'wood', 'ag', 'as', 'ba', 'br', 'co', 'fa', 'fe', 'gr',
'gro', 'lo', 'mib', 'mic', 'op', 'ot', 'ro', 'sh', 'wib', 'yo']
xgb_params_combined = {
'max_depth': 8,
'min_child_weight': 4,
'subsample': 0.6,
'eta': 0.01,
'gamma': 0.0,}
xgb_params_sentinel = {
'max_depth': 6,
'min_child_weight': 5,
'subsample': 0.8,
'eta': 0.05,
'gamma': 0.0,}
xgb_params_landsat = {
'max_depth': 6,
'min_child_weight': 2,
'subsample': 0.6,
'eta': 0.01,
'gamma': 0.4,}
rf_params_combined = {
'n_estimators': 1400,
'max_features': 1/6,
}
rf_params_sentinel = {
'n_estimators': 1400,
'max_features': 1/6,
}
rf_params_landsat = {
'n_estimators': 2400,
'max_features': 1/6,
}
def get_columns(settings):
dataset = settings['dataset']
inven = settings['inven']
column_names = []
if dataset != 'landsat':
column_names.extend(['Sentinel VH', 'Sentinel VV'])
if dataset != 'sentinel':
column_names.extend(['Landsat 2', 'Landsat 3', 'Landsat 4', 'Landsat 5', 'Landsat 6', 'Landsat 7'])
if inven:
column_names.extend(['Category', 'Species'])
if dataset != 'sentinel':
column_names.extend(['NDVI', 'SAVI', 'EVI', 'ARVI'])
return column_names
def get_extended_columns(settings):
column_names = get_columns(settings)
dataset = settings['dataset']
if dataset == 'sentinel':
column_names = column_names[:-2]
column_names.extend(EXPANDED_SPECIES_TRAIN)
else:
veg_indices = column_names[-4:]
column_names = column_names[:(len(column_names)-6)]
column_names.extend(EXPANDED_SPECIES_QUICK)
column_names.extend(veg_indices)
return column_names
def get_params(settings):
dataset = settings['dataset']
model = settings['model']
if model == 'xgboost':
if dataset == 'combined':
return xgb_params_combined
elif dataset == 'landsat':
return xgb_params_landsat
else:
return xgb_params_sentinel
else:
if dataset == 'combined':
return rf_params_combined
elif dataset == 'landsat':
return rf_params_landsat
else:
return rf_params_sentinel
def get_inven_abv(settings):
if settings['inven']:
if settings['dataset'] == 'sentinel' or settings['dim'] != '':
return INVEN_TRAIN
else:
return INVEN
return []
| 4,459 | 34.967742 | 107 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-SOC/src/brt/eval.py | import numpy as np
import pandas as pd
import joblib
import rasterio
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.ensemble import GradientBoostingRegressor
print("Start")
model_path = "models/brtmodel_SoilGrids_nonlog_120621.joblib.pkl"
# model_path = "models/brtmodel.joblib.pkl"
print("Loading model", model_path, "...")
brt = joblib.load(model_path)
print("Loading Raster...")
image = rasterio.open(fr'C:\Users\kothi\Documents\individual_project\qgisdata\S1AIW_S2AL2A_NDVI_SATVI_EVI_DEM.tif')
num_bands = image.count
img_width = image.width
img_height = image.height
num_pixels = img_height * img_width
all_data = []
print("Image shape:", image.shape)
print("Converting Raster to Array...")
for i in tqdm(range(num_bands)):
data = image.read(i+1)
data = pd.DataFrame(data).fillna(0).to_numpy()
all_data.append(data)
all_data = np.dstack(all_data)
all_data_shape = all_data.shape
print("Raster array shape:", all_data_shape)
print(np.any(np.isnan(all_data))) # False
print(np.all(np.isfinite(all_data))) # True
print("Calculating SOC...")
result_data = []
non_zero = 0
for t in tqdm(all_data):
z = brt.predict(t)
result_data.append(z)
non_zero += np.count_nonzero(z)
z[z!=z] = 0
# print(torch.min(z), torch.max(z))
print("non_zero:", non_zero)
# result_data = np.exp(np.stack(result_data))
result_data = np.stack(result_data)
print("max val: ", np.max(result_data))
plt.hist(result_data.flatten(), bins=np.linspace(0, 500, 100), histtype=u'step', density=True)
plt.savefig('brt_inference_histogram.png')
plt.show()
plt.imshow(result_data, cmap='viridis_r')
plt.colorbar()
plt.savefig('brt_map.png')
plt.show()
with rasterio.open(
'out/brt_map.tif',
'w',
driver='GTiff',
height=result_data.shape[0],
width=result_data.shape[1],
count=1,
dtype=result_data.dtype,
crs='+proj=latlong',
transform=image.transform,
) as dst:
dst.write(result_data, 1) | 2,028 | 26.794521 | 115 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-SOC/src/bag/eval.py | import numpy as np
import pandas as pd
import joblib
import rasterio
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.ensemble import GradientBoostingRegressor
print("Start")
model_path = "models/bagmodel.joblib.pkl"
print("Loading model", model_path, "...")
bag = joblib.load(model_path)
print("Loading Raster...")
image = rasterio.open(fr'C:\Users\kothi\Documents\individual_project\qgisdata\S1AIW_S2AL2A_NDVI_SATVI_EVI_DEM.tif')
num_bands = image.count
img_width = image.width
img_height = image.height
num_pixels = img_height * img_width
all_data = []
print("Image shape:", image.shape)
print("Converting Raster to Array...")
for i in tqdm(range(num_bands)):
data = image.read(i+1)
data = pd.DataFrame(data).fillna(0).to_numpy()
all_data.append(data)
all_data = np.dstack(all_data)
all_data_shape = all_data.shape
print("Raster array shape:", all_data_shape)
print(np.any(np.isnan(all_data))) # False
print(np.all(np.isfinite(all_data))) # True
print("Calculating SOC...")
result_data = []
non_zero = 0
for t in tqdm(all_data):
z = bag.predict(t)
result_data.append(z)
non_zero += np.count_nonzero(z)
z[z!=z] = 0
# print(torch.min(z), torch.max(z))
print("non_zero:", non_zero)
result_data = np.exp(np.stack(result_data))
print("max val: ", np.max(result_data))
plt.hist(result_data.flatten(), bins=np.linspace(0, 500, 100), histtype=u'step', density=True)
plt.savefig('bag_inference_histogram.png')
plt.show()
plt.imshow(result_data, cmap='viridis_r')
plt.colorbar()
plt.savefig('bag_map.png')
plt.show()
with rasterio.open(
'out/bag_map.tif',
'w',
driver='GTiff',
height=result_data.shape[0],
width=result_data.shape[1],
count=1,
dtype=result_data.dtype,
crs='+proj=latlong',
transform=image.transform,
) as dst:
dst.write(result_data, 1) | 1,922 | 26.084507 | 115 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-SOC/src/nns/gridsearch.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import itertools
from models import SimpleNet
import eval
layers_space = [7, 9, 10]
neurons_space = [10, 20, 50]
dropout_space = [0.2, 0.5]
lr_space = [0.01, 0.005, 0.001, 0.0005]
epochs_space = [50, 100, 150]
# layers_space = [7]
# neurons_space = [20]
# dropout_space = [0.2]
# lr_space = [0.05]
# epochs_space = [20]
USE_GPU = True
if USE_GPU and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
print(device)
def load_csv_to_pd(csv_file_path):
df = pd.read_csv(csv_file_path, sep=r'\s*,\s*', engine='python')
df.drop_duplicates(subset=None, inplace=True)
return df
csv_file_path = r"C:\Users\kothi\Documents\individual_project\individual_project\data\S1AIW_S2AL2A_NDVI_EVI_SATVI_DEM_LUCASTIN_roi_points_0.04.csv"
data_df = load_csv_to_pd(csv_file_path)
features_list = [
'VH_1','VV_1','VH_2','VV_2','VH_3','VV_3','VH_4','VV_4','VH_5','VV_5',
'BAND_11','BAND_12','BAND_2','BAND_3','BAND_4','BAND_5','BAND_6','BAND_7','BAND_8','BAND_8A','NDVI','EVI','SATVI',
'DEM_ELEV','DEM_CS','DEM_LSF','DEM_SLOPE','DEM_TWI'
]
idx = 0
results_df = pd.DataFrame(columns=['idx', 'lr', 'epochs', 'dropout', 'layers', 'neurons', 'test_rmse', 'test_mae', 'test_r2'])
for lr, epochs, dropout, layers, neurons in tqdm(list(itertools.product(lr_space, epochs_space, dropout_space, layers_space, neurons_space))):
msk = np.random.rand(len(data_df)) < 0.5
data_df_1 = data_df[msk]
data_df_2 = data_df[~msk]
rmses = []
maes = []
r2s = []
for i in range(2):
train_df = data_df_1
test_df = data_df_2
if i == 1:
train_df = data_df_2
test_df = data_df_1
train_labels_tensor = torch.tensor(np.log(train_df['OC'].values.astype(np.float32)))
train_data_tensor = torch.tensor(train_df[features_list].values.astype(np.float32))
train_tensor = TensorDataset(train_data_tensor, train_labels_tensor)
train_loader = DataLoader(dataset=train_tensor, batch_size=32, shuffle=True)
test_labels_tensor = torch.tensor(np.log(test_df['OC'].values.astype(np.float32)))
test_data_tensor = torch.tensor(test_df[features_list].values.astype(np.float32))
test_tensor = TensorDataset(test_data_tensor, test_labels_tensor)
test_loader = DataLoader(dataset=test_tensor, batch_size = 1)
model = SimpleNet(len(features_list), layers, neurons, dropout)
model = model.to(device=device)
optimizer = optim.Adam(model.parameters(), lr=lr)
for e in range(epochs):
total_t = 0
for t, (x, y) in enumerate(train_loader):
model.train()
x = x.to(device=device)
y = y.to(device=device)
z = model(x)
z_array = z.detach().cpu().numpy()
loss = F.mse_loss(z, y)
total_t += 1
optimizer.zero_grad()
loss.backward()
optimizer.step()
rmse = float(eval.check_rmse(model, test_loader, device))
mae = float(eval.check_mae(model, test_loader, device))
r2 = float(eval.check_r2(model, test_loader, device))
rmses.append(rmse)
maes.append(mae)
r2s.append(r2)
total_rmse = sum(rmses) / len(rmses)
total_mae = sum(maes) / len(maes)
total_r2 = sum(r2s) / len(r2s)
result_dict = {}
result_dict['idx'] = idx
result_dict['lr'] = lr
result_dict['epochs'] = epochs
result_dict['dropout'] = dropout
result_dict['layers'] = layers
result_dict['neurons'] = neurons
result_dict['test_rmse'] = total_rmse
result_dict['test_mae'] = total_mae
result_dict['test_r2'] = total_r2
results_df = results_df.append(result_dict, ignore_index=True)
print('IDX: {:d} | lr {:.4f} | epochs {:d} | dropout {:.4f} | layers {:d} | neurons {:d} | RMSE {:.4f} | MAE {:.4f} | R2 {:.4f}'.format(idx, lr, epochs, dropout, layers, neurons, total_rmse, total_mae, total_r2))
idx += 1
results_df.to_csv('out/nn_gridsearch.csv', index=False)
| 4,373 | 33.714286 | 216 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-SOC/src/nns/eval.py | import torch
from torch import tensor
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
import rasterio
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import pandas as pd
def check_rmse(model, test_loader, device):
num_samples = 0
sum_rmse = 0
model.eval()
with torch.no_grad():
ys = []
zs = []
for x, y in test_loader:
num_samples += 1
x = x.to(device=device)
y = y.to(device=device)
z = model(x)
ys.append(y.detach().cpu().item())
zs.append(z.detach().cpu().item())
return np.sqrt(mean_squared_error(ys, zs))
def check_rmspe(model, test_loader, device):
num_samples = 0
sum_rmse = 0
model.eval()
with torch.no_grad():
for x, y in test_loader:
num_samples += 1
x = x.to(device=device)
y = y.to(device=device)
z = model(x)
sum_rmse += (((z - y) / y)**2)
return torch.sqrt(sum_rmse / num_samples)
def check_mae(model, test_loader, device):
num_samples = 0
sum_mae = 0
model.eval()
with torch.no_grad():
for x, y in test_loader:
num_samples += 1
x = x.to(device=device)
y = y.to(device=device)
z = model(x)
sum_mae += torch.abs(z - y)
return sum_mae / num_samples
def check_mape(model, test_loader, device):
num_samples = 0
sum_mae = 0
model.eval()
with torch.no_grad():
for x, y in test_loader:
num_samples += 1
x = x.to(device=device)
y = y.to(device=device)
z = model(x)
sum_mae += torch.abs((z - y) / y)
return sum_mae / num_samples
def check_r2(model, test_loader, device):
model.eval()
with torch.no_grad():
ys = []
zs = []
for x, y in test_loader:
x = x.to(device=device)
y = y.to(device=device)
z = model(x)
ys.append(y.detach().cpu().item())
zs.append(z.detach().cpu().item())
return r2_score(ys, zs)
if __name__ == "__main__":
USE_GPU = True
print("Start")
if USE_GPU and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
print("Using:", device)
print("Loading Model...")
model_save_path = r'C:\Users\kothi\Documents\individual_project\individual_project\models\nnmodel.pt'
model = torch.load(model_save_path)
model.eval()
print("Loading Raster...")
image = rasterio.open(fr'C:\Users\kothi\Documents\individual_project\qgisdata\S1AIW_S2AL2A_NDVI_SATVI_EVI_DEM.tif')
# image = rasterio.open(fr'C:\Users\kothi\Documents\individual_project\qgisdata\S2A1C_DEM.tif')
# num_bands = 10
num_bands = image.count
img_width = image.width
img_height = image.height
num_pixels = img_height * img_width
all_data = []
print("Image shape:", image.shape)
print("Converting Raster to Array...")
for i in tqdm(range(num_bands)):
data = image.read(i+1)
data = pd.DataFrame(data).replace([np.inf, -np.inf], np.nan).fillna(0).to_numpy()
all_data.append(data)
all_data = np.dstack(all_data)
all_data_shape = all_data.shape
print("Raster array shape:", all_data_shape)
print(np.any(np.isnan(all_data))) # False
print(np.all(np.isfinite(all_data))) # True
print("Calculating SOC...")
result_data = []
non_zero = 0
with torch.no_grad():
for t in tqdm(all_data):
t = torch.Tensor(t).to(device=device)
z = model(t).cpu()
result_data.append(z)
non_zero += torch.count_nonzero(z)
z[z!=z] = 0
print("non_zero:", non_zero)
result_data = np.exp(torch.stack(result_data).detach().numpy())
print("max val: ", np.max(result_data))
plt.hist(result_data.flatten(), bins=np.linspace(0, 500, 100), histtype=u'step', density=True)
plt.savefig('nn_inference_histogram.png')
plt.show()
plt.imshow(result_data, cmap='viridis_r')
plt.colorbar()
plt.savefig('nn_map.png')
plt.show()
with rasterio.open(
'out/nn_map.tif',
'w',
driver='GTiff',
height=result_data.shape[0],
width=result_data.shape[1],
count=1,
dtype=result_data.dtype,
crs='+proj=latlong',
transform=image.transform,
) as dst:
dst.write(result_data, 1) | 4,701 | 29.335484 | 119 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-SOC/src/nns/models.py | import torch
import torch.nn as nn
class SimpleNet(nn.Module):
'''
Network Architecture from the Emadi 2020 paper on Norther Iran Soils
Estimated values:
Dropout = 0.2-0.8
Learning rate = 0.001-0.05
Epochs = 100
'''
def __init__(self, input_neurons, layers=5, neurons=50, dropout=0.5):
super(SimpleNet, self).__init__()
dropout_value = dropout
num_neurons = neurons
layers_list = []
assert(layers >= 2)
layers_list.append(nn.Linear(input_neurons, num_neurons))
layers_list.append(nn.Dropout(dropout_value))
layers_list.append(nn.ReLU())
for i in range(layers - 2):
layers_list.append(nn.Linear(num_neurons, num_neurons))
if i % 2 == 0:
layers_list.append(nn.Dropout(dropout_value))
else:
layers_list.append(nn.BatchNorm1d(num_neurons))
layers_list.append(nn.ReLU())
layers_list.append(nn.Linear(num_neurons, 1))
layers_list.append(nn.ReLU())
self.layers = nn.Sequential(*layers_list)
def forward(self, x):
z = self.layers(x)
z = z.view(x.shape[0])
return z | 1,213 | 30.947368 | 73 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-SOC/src/nns/train.py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from models import SimpleNet
import eval
USE_GPU = True
if USE_GPU and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
print(device)
def load_csv_to_pd(csv_file_path):
df = pd.read_csv(csv_file_path, sep=r'\s*,\s*', engine='python')
df.drop_duplicates(subset=None, inplace=True)
return df
csv_file_path = r"C:\Users\kothi\Documents\individual_project\individual_project\data\S1AIW_S2AL2A_NDVI_EVI_SATVI_DEM_LUCASTIN_roi_points_0.02.csv"
lucas_csv_file_path = r"C:\Users\kothi\Documents\individual_project\individual_project\data\S1AIW_S2AL2A_NDVI_EVI_SATVI_DEM_LUCASTIN_LUCAS2009_zhou2020_points.csv"
# csv_file_path = r"C:\Users\kothi\Documents\individual_project\individual_project\data\S2A1C_DEM_LUCASTIN_roi_points.csv"
lr = 0.01
epochs = 20
data_df = load_csv_to_pd(csv_file_path)
lucas_data_df = load_csv_to_pd(lucas_csv_file_path)
msk = np.random.rand(len(data_df)) < 0.8
train_df = data_df[msk]
test_df = data_df[~msk]
print("Length of training set: ", len(train_df))
print("Length of test set: ", len(test_df))
features_list = [
'VH_1','VV_1','VH_2','VV_2','VH_3','VV_3','VH_4','VV_4','VH_5','VV_5',
'BAND_11','BAND_12','BAND_2','BAND_3','BAND_4','BAND_5','BAND_6','BAND_7','BAND_8','BAND_8A','NDVI','EVI','SATVI',
'DEM_ELEV','DEM_CS','DEM_LSF','DEM_SLOPE','DEM_TWI'
]
# features_list = [
# 'BAND_11','BAND_12','BAND_2','BAND_3','BAND_4','BAND_5','BAND_6','BAND_7','BAND_8','BAND_8A','NDVI','EVI','SATVI'
# ]
train_labels_tensor = torch.tensor(np.log(train_df['OC'].values.astype(np.float32)))
train_data_tensor = torch.tensor(train_df[features_list].values.astype(np.float32))
train_tensor = TensorDataset(train_data_tensor, train_labels_tensor)
train_loader = DataLoader(dataset=train_tensor, batch_size=128, shuffle=True)
test_labels_tensor = torch.tensor(np.log(test_df['OC'].values.astype(np.float32)))
test_data_tensor = torch.tensor(test_df[features_list].values.astype(np.float32))
test_tensor = TensorDataset(test_data_tensor, test_labels_tensor)
test_loader = DataLoader(dataset=test_tensor, batch_size = 1)
model = SimpleNet(len(features_list), layers=5, neurons=30, dropout=0.2)
model = model.to(device=device)
optimizer = optim.Adam(model.parameters(), lr=lr)
plt.ion()
fig, ax = plt.subplots(2, 3, figsize=(10, 5))
train_losses = []
test_rmses = []
test_maes = []
test_r2s = []
for e in tqdm(range(epochs)):
total_loss = 0
total_t = 0
zs = []
for t, (x, y) in enumerate(train_loader):
model.train()
x = x.to(device=device)
y = y.to(device=device)
z = model(x)
z_array = z.detach().cpu().numpy()
zs.extend(z_array)
loss = F.mse_loss(z, y)
total_t += 1
total_loss += loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
rmse = float(eval.check_rmse(model, test_loader, device))
mae = float(eval.check_mae(model, test_loader, device))
r2 = float(eval.check_r2(model, test_loader, device))
total_loss = total_loss.cpu().item() / total_t
train_losses.append(total_loss)
test_rmses.append(rmse)
test_maes.append(mae)
test_r2s.append(r2)
ax[0,0].plot(train_losses, c='black')
ax[0,0].set_title('Train loss')
ax[1,0].plot(test_rmses, c='black')
ax[1,0].set_title('Test RMSE')
ax[0,1].plot(test_maes, c='black')
ax[0,1].set_title('Test MAE')
ax[1,1].plot(test_r2s, c='black')
ax[1,1].set_title('Test R^2')
ax[0,2].clear()
ax[1,2].clear()
ax[0,2].hist(train_df['OC'].values.astype(np.float32), bins=np.linspace(0, 500, 100), histtype=u'step', density=True)
ax[1,2].hist(np.exp(zs), bins=np.linspace(0, 500, 100), histtype=u'step', density=True)
plt.pause(0.05)
print('Epoch {:d} | Loss {:.4f} | RMSE {:.4f} | MAE {:.4f} | R2 {:.4f}'.format(e, total_loss, rmse, mae, r2))
lucas_labels_tensor = torch.tensor(np.log(lucas_data_df['OC'].values.astype(np.float32)))
lucas_data_tensor = torch.tensor(lucas_data_df[features_list].values.astype(np.float32))
lucas_tensor = TensorDataset(lucas_data_tensor, lucas_labels_tensor)
lucas_loader = DataLoader(dataset=lucas_tensor, batch_size=1)
lucas_rmse = float(eval.check_rmse(model, lucas_loader, device))
lucas_mae = float(eval.check_mae(model, lucas_loader, device))
lucas_r2 = float(eval.check_r2(model, lucas_loader, device))
print('LUCAS2009 ZHOU2020 RESULTS: | RMSE {:.4f} | MAE {:.4f} | R2 {:.4f}'.format(lucas_rmse, lucas_mae, lucas_r2))
fig.savefig('nn_training_graph.png')
plt.close(fig)
plt.show()
plt.ioff()
model_save_path = 'models/nnmodel.pt'
torch.save(model, model_save_path) | 4,964 | 33.72028 | 163 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-SOC/src/plsr/eval.py | import numpy as np
import pandas as pd
import joblib
import rasterio
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.ensemble import GradientBoostingRegressor
print("Start")
model_path = "models/plsrmodel.joblib.pkl"
print("Loading model", model_path, "...")
plsr = joblib.load(model_path)
print("Loading Raster...")
image = rasterio.open(fr'C:\Users\kothi\Documents\individual_project\qgisdata\S1AIW_S2AL2A_NDVI_SATVI_EVI_DEM.tif')
num_bands = image.count
img_width = image.width
img_height = image.height
num_pixels = img_height * img_width
all_data = []
print("Image shape:", image.shape)
print("Converting Raster to Array...")
for i in tqdm(range(num_bands)):
data = image.read(i+1)
data = pd.DataFrame(data).fillna(0).to_numpy()
all_data.append(data)
all_data = np.dstack(all_data)
all_data_shape = all_data.shape
print("Raster array shape:", all_data_shape)
print(np.any(np.isnan(all_data))) # False
print(np.all(np.isfinite(all_data))) # True
print("Calculating SOC...")
result_data = []
non_zero = 0
for t in tqdm(all_data):
z = plsr.predict(t)
result_data.append(z)
non_zero += np.count_nonzero(z)
z[z!=z] = 0
# print(torch.min(z), torch.max(z))
print("non_zero:", non_zero)
result_data = np.exp(np.stack(result_data))
print("max val: ", np.max(result_data))
plt.hist(result_data.flatten(), bins=np.linspace(0, 500, 100), histtype=u'step', density=True)
plt.savefig('plsr_inference_histogram.png')
plt.show()
plt.imshow(result_data, cmap='viridis_r')
plt.colorbar()
plt.savefig('plsr_map.png')
plt.show()
with rasterio.open(
'out/plsr_map.tif',
'w',
driver='GTiff',
height=result_data.shape[0],
width=result_data.shape[1],
count=1,
dtype=result_data.dtype,
crs='+proj=latlong',
transform=image.transform,
) as dst:
dst.write(result_data, 1) | 1,928 | 26.169014 | 115 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-SOC/src/ensemble/eval_stack.py | import numpy as np
import pandas as pd
import joblib
import rasterio
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.ensemble import GradientBoostingRegressor
print("Start")
model_path = "models/stackmodel.joblib.pkl"
print("Loading model", model_path, "...")
stack = joblib.load(model_path)
print("Loading Raster...")
image = rasterio.open(fr'C:\Users\kothi\Documents\individual_project\qgisdata\S1AIW_S2AL2A_NDVI_SATVI_EVI_DEM.tif')
num_bands = image.count
img_width = image.width
img_height = image.height
num_pixels = img_height * img_width
all_data = []
print("Image shape:", image.shape)
print("Converting Raster to Array...")
for i in tqdm(range(num_bands)):
data = image.read(i+1)
data = pd.DataFrame(data).fillna(0).to_numpy()
all_data.append(data)
all_data = np.dstack(all_data)
all_data_shape = all_data.shape
print("Raster array shape:", all_data_shape)
print(np.any(np.isnan(all_data))) # False
print(np.all(np.isfinite(all_data))) # True
print("Calculating SOC...")
result_data = []
non_zero = 0
for t in tqdm(all_data):
z = stack.predict(t)
result_data.append(z)
non_zero += np.count_nonzero(z)
z[z!=z] = 0
# print(torch.min(z), torch.max(z))
print("non_zero:", non_zero)
result_data = np.exp(np.stack(result_data))
print("max val: ", np.max(result_data))
plt.hist(result_data.flatten(), bins=np.linspace(0, 500, 100), histtype=u'step', density=True)
plt.savefig('stack_inference_histogram.png')
plt.show()
plt.imshow(result_data, cmap='viridis_r')
plt.colorbar()
plt.savefig('stack_map.png')
plt.show()
with rasterio.open(
'out/stack_map.tif',
'w',
driver='GTiff',
height=result_data.shape[0],
width=result_data.shape[1],
count=1,
dtype=result_data.dtype,
crs='+proj=latlong',
transform=image.transform,
) as dst:
dst.write(result_data, 1) | 1,934 | 26.253521 | 115 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-SOC/src/ensemble/eval_vot.py | import numpy as np
import pandas as pd
import joblib
import rasterio
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.ensemble import GradientBoostingRegressor
print("Start")
model_path = "models/votmodel.joblib.pkl"
print("Loading model", model_path, "...")
vot = joblib.load(model_path)
print("Loading Raster...")
image = rasterio.open(fr'C:\Users\kothi\Documents\individual_project\qgisdata\S1AIW_S2AL2A_NDVI_SATVI_EVI_DEM.tif')
num_bands = image.count
img_width = image.width
img_height = image.height
num_pixels = img_height * img_width
all_data = []
print("Image shape:", image.shape)
print("Converting Raster to Array...")
for i in tqdm(range(num_bands)):
data = image.read(i+1)
data = pd.DataFrame(data).fillna(0).to_numpy()
all_data.append(data)
all_data = np.dstack(all_data)
all_data_shape = all_data.shape
print("Raster array shape:", all_data_shape)
print(np.any(np.isnan(all_data))) # False
print(np.all(np.isfinite(all_data))) # True
print("Calculating SOC...")
result_data = []
non_zero = 0
for t in tqdm(all_data):
z = vot.predict(t)
result_data.append(z)
non_zero += np.count_nonzero(z)
z[z!=z] = 0
# print(torch.min(z), torch.max(z))
print("non_zero:", non_zero)
result_data = np.exp(np.stack(result_data))
print("max val: ", np.max(result_data))
plt.hist(result_data.flatten(), bins=np.linspace(0, 500, 100), histtype=u'step', density=True)
plt.savefig('vot_inference_histogram.png')
plt.show()
plt.imshow(result_data, cmap='viridis_r')
plt.colorbar()
plt.savefig('vot_map.png')
plt.show()
with rasterio.open(
'out/vot_map.tif',
'w',
driver='GTiff',
height=result_data.shape[0],
width=result_data.shape[1],
count=1,
dtype=result_data.dtype,
crs='+proj=latlong',
transform=image.transform,
) as dst:
dst.write(result_data, 1) | 1,922 | 26.084507 | 115 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-SOC/src/rfs/eval.py | import numpy as np
import pandas as pd
import joblib
import rasterio
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.ensemble import GradientBoostingRegressor
print("Start")
model_path = "models/rfmodel.joblib.pkl"
print("Loading model", model_path, "...")
rf = joblib.load(model_path)
print("Loading Raster...")
image = rasterio.open(fr'C:\Users\kothi\Documents\individual_project\qgisdata\S1AIW_S2AL2A_NDVI_SATVI_EVI_DEM.tif')
num_bands = image.count
img_width = image.width
img_height = image.height
num_pixels = img_height * img_width
all_data = []
print("Image shape:", image.shape)
print("Converting Raster to Array...")
for i in tqdm(range(num_bands)):
data = image.read(i+1)
data = pd.DataFrame(data).fillna(0).to_numpy()
all_data.append(data)
all_data = np.dstack(all_data)
all_data_shape = all_data.shape
print("Raster array shape:", all_data_shape)
print(np.any(np.isnan(all_data))) # False
print(np.all(np.isfinite(all_data))) # True
print("Calculating SOC...")
result_data = []
non_zero = 0
for t in tqdm(all_data):
z = rf.predict(t)
result_data.append(z)
non_zero += np.count_nonzero(z)
z[z!=z] = 0
# print(torch.min(z), torch.max(z))
print("non_zero:", non_zero)
result_data = np.exp(np.stack(result_data))
print("max val: ", np.max(result_data))
plt.hist(result_data.flatten(), bins=np.linspace(0, 500, 100), histtype=u'step', density=True)
plt.savefig('rf_inference_histogram.png')
plt.show()
plt.imshow(result_data, cmap='viridis_r')
plt.colorbar()
plt.savefig('rf_map.png')
plt.show()
with rasterio.open(
'out/rf_map.tif',
'w',
driver='GTiff',
height=result_data.shape[0],
width=result_data.shape[1],
count=1,
dtype=result_data.dtype,
crs='+proj=latlong',
transform=image.transform,
) as dst:
dst.write(result_data, 1) | 1,916 | 26 | 115 | py |
Carbon-Trading-Verfication | Carbon-Trading-Verfication-master/archive/Estimating-SOC/src/mlp/eval.py | import numpy as np
import pandas as pd
import joblib
import rasterio
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.ensemble import GradientBoostingRegressor
print("Start")
model_path = "../../models/mlpmodel.joblib.pkl"
print("Loading model", model_path, "...")
mlp = joblib.load(model_path)
print("Loading Raster...")
image = rasterio.open(fr'D:\Carbon Project\qgis\qgisdata\S1AIW_S2AL2A_NDVI_SATVI_EVI_DEM.tif')
num_bands = image.count
img_width = image.width
img_height = image.height
num_pixels = img_height * img_width
all_data = []
print("Image shape:", image.shape)
print("Converting Raster to Array...")
for i in tqdm(range(num_bands)):
data = image.read(i+1)
data = pd.DataFrame(data).fillna(0).to_numpy()
all_data.append(data)
all_data = np.dstack(all_data)
all_data_shape = all_data.shape
print("Raster array shape:", all_data_shape)
print(np.any(np.isnan(all_data))) # False
print(np.all(np.isfinite(all_data))) # True
print("Calculating SOC...")
result_data = []
non_zero = 0
for t in tqdm(all_data):
z = mlp.predict(t)
result_data.append(z)
non_zero += np.count_nonzero(z)
z[z!=z] = 0
# print(torch.min(z), torch.max(z))
print("non_zero:", non_zero)
result_data = np.exp(np.stack(result_data))
print("max val: ", np.max(result_data))
plt.hist(result_data.flatten(), bins=np.linspace(0, 500, 100), histtype=u'step', density=True)
plt.savefig('mlp_inference_histogram.png')
plt.show()
plt.imshow(result_data, cmap='viridis_r')
plt.colorbar()
plt.savefig('mlp_map.png')
plt.show()
with rasterio.open(
'out/mlp_map.tif',
'w',
driver='GTiff',
height=result_data.shape[0],
width=result_data.shape[1],
count=1,
dtype=result_data.dtype,
crs='+proj=latlong',
transform=image.transform,
) as dst:
dst.write(result_data, 1) | 1,907 | 25.873239 | 94 | py |
video-classification | video-classification-master/ResNetCRNN/functions.py | import os
import numpy as np
from PIL import Image
from torch.utils import data
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torchvision.transforms as transforms
from tqdm import tqdm
## ------------------- label conversion tools ------------------ ##
def labels2cat(label_encoder, list):
return label_encoder.transform(list)
def labels2onehot(OneHotEncoder, label_encoder, list):
return OneHotEncoder.transform(label_encoder.transform(list).reshape(-1, 1)).toarray()
def onehot2labels(label_encoder, y_onehot):
return label_encoder.inverse_transform(np.where(y_onehot == 1)[1]).tolist()
def cat2labels(label_encoder, y_cat):
return label_encoder.inverse_transform(y_cat).tolist()
## ---------------------- Dataloaders ---------------------- ##
# for 3DCNN
class Dataset_3DCNN(data.Dataset):
"Characterizes a dataset for PyTorch"
def __init__(self, data_path, folders, labels, frames, transform=None):
"Initialization"
self.data_path = data_path
self.labels = labels
self.folders = folders
self.transform = transform
self.frames = frames
def __len__(self):
"Denotes the total number of samples"
return len(self.folders)
def read_images(self, path, selected_folder, use_transform):
X = []
for i in self.frames:
image = Image.open(os.path.join(path, selected_folder, 'frame{:06d}.jpg'.format(i))).convert('L')
if use_transform is not None:
image = use_transform(image)
X.append(image.squeeze_(0))
X = torch.stack(X, dim=0)
return X
def __getitem__(self, index):
"Generates one sample of data"
# Select sample
folder = self.folders[index]
# Load data
X = self.read_images(self.data_path, folder, self.transform).unsqueeze_(0) # (input) spatial images
y = torch.LongTensor([self.labels[index]]) # (labels) LongTensor are for int64 instead of FloatTensor
# print(X.shape)
return X, y
# for CRNN
class Dataset_CRNN(data.Dataset):
"Characterizes a dataset for PyTorch"
def __init__(self, data_path, folders, labels, frames, transform=None):
"Initialization"
self.data_path = data_path
self.labels = labels
self.folders = folders
self.transform = transform
self.frames = frames
def __len__(self):
"Denotes the total number of samples"
return len(self.folders)
def read_images(self, path, selected_folder, use_transform):
X = []
for i in self.frames:
image = Image.open(os.path.join(path, selected_folder, 'frame{:06d}.jpg'.format(i)))
if use_transform is not None:
image = use_transform(image)
X.append(image)
X = torch.stack(X, dim=0)
return X
def __getitem__(self, index):
"Generates one sample of data"
# Select sample
folder = self.folders[index]
# Load data
X = self.read_images(self.data_path, folder, self.transform) # (input) spatial images
y = torch.LongTensor([self.labels[index]]) # (labels) LongTensor are for int64 instead of FloatTensor
# print(X.shape)
return X, y
## ---------------------- end of Dataloaders ---------------------- ##
## -------------------- (reload) model prediction ---------------------- ##
def Conv3d_final_prediction(model, device, loader):
model.eval()
all_y_pred = []
with torch.no_grad():
for batch_idx, (X, y) in enumerate(tqdm(loader)):
# distribute data to device
X = X.to(device)
output = model(X)
y_pred = output.max(1, keepdim=True)[1] # location of max log-probability as prediction
all_y_pred.extend(y_pred.cpu().data.squeeze().numpy().tolist())
return all_y_pred
def CRNN_final_prediction(model, device, loader):
cnn_encoder, rnn_decoder = model
cnn_encoder.eval()
rnn_decoder.eval()
all_y_pred = []
with torch.no_grad():
for batch_idx, (X, y) in enumerate(tqdm(loader)):
# distribute data to device
X = X.to(device)
output = rnn_decoder(cnn_encoder(X))
y_pred = output.max(1, keepdim=True)[1] # location of max log-probability as prediction
all_y_pred.extend(y_pred.cpu().data.squeeze().numpy().tolist())
return all_y_pred
## -------------------- end of model prediction ---------------------- ##
## ------------------------ 3D CNN module ---------------------- ##
def conv3D_output_size(img_size, padding, kernel_size, stride):
# compute output shape of conv3D
outshape = (np.floor((img_size[0] + 2 * padding[0] - (kernel_size[0] - 1) - 1) / stride[0] + 1).astype(int),
np.floor((img_size[1] + 2 * padding[1] - (kernel_size[1] - 1) - 1) / stride[1] + 1).astype(int),
np.floor((img_size[2] + 2 * padding[2] - (kernel_size[2] - 1) - 1) / stride[2] + 1).astype(int))
return outshape
class CNN3D(nn.Module):
def __init__(self, t_dim=120, img_x=90, img_y=120, drop_p=0.2, fc_hidden1=256, fc_hidden2=128, num_classes=50):
super(CNN3D, self).__init__()
# set video dimension
self.t_dim = t_dim
self.img_x = img_x
self.img_y = img_y
# fully connected layer hidden nodes
self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
self.drop_p = drop_p
self.num_classes = num_classes
self.ch1, self.ch2 = 32, 48
self.k1, self.k2 = (5, 5, 5), (3, 3, 3) # 3d kernel size
self.s1, self.s2 = (2, 2, 2), (2, 2, 2) # 3d strides
self.pd1, self.pd2 = (0, 0, 0), (0, 0, 0) # 3d padding
# compute conv1 & conv2 output shape
self.conv1_outshape = conv3D_output_size((self.t_dim, self.img_x, self.img_y), self.pd1, self.k1, self.s1)
self.conv2_outshape = conv3D_output_size(self.conv1_outshape, self.pd2, self.k2, self.s2)
self.conv1 = nn.Conv3d(in_channels=1, out_channels=self.ch1, kernel_size=self.k1, stride=self.s1,
padding=self.pd1)
self.bn1 = nn.BatchNorm3d(self.ch1)
self.conv2 = nn.Conv3d(in_channels=self.ch1, out_channels=self.ch2, kernel_size=self.k2, stride=self.s2,
padding=self.pd2)
self.bn2 = nn.BatchNorm3d(self.ch2)
self.relu = nn.ReLU(inplace=True)
self.drop = nn.Dropout3d(self.drop_p)
self.pool = nn.MaxPool3d(2)
self.fc1 = nn.Linear(self.ch2 * self.conv2_outshape[0] * self.conv2_outshape[1] * self.conv2_outshape[2],
self.fc_hidden1) # fully connected hidden layer
self.fc2 = nn.Linear(self.fc_hidden1, self.fc_hidden2)
self.fc3 = nn.Linear(self.fc_hidden2, self.num_classes) # fully connected layer, output = multi-classes
def forward(self, x_3d):
# Conv 1
x = self.conv1(x_3d)
x = self.bn1(x)
x = self.relu(x)
x = self.drop(x)
# Conv 2
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.drop(x)
# FC 1 and 2
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc3(x)
return x
## --------------------- end of 3D CNN module ---------------- ##
## ------------------------ CRNN module ---------------------- ##
def conv2D_output_size(img_size, padding, kernel_size, stride):
# compute output shape of conv2D
outshape = (np.floor((img_size[0] + 2 * padding[0] - (kernel_size[0] - 1) - 1) / stride[0] + 1).astype(int),
np.floor((img_size[1] + 2 * padding[1] - (kernel_size[1] - 1) - 1) / stride[1] + 1).astype(int))
return outshape
# 2D CNN encoder train from scratch (no transfer learning)
class EncoderCNN(nn.Module):
def __init__(self, img_x=90, img_y=120, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300):
super(EncoderCNN, self).__init__()
self.img_x = img_x
self.img_y = img_y
self.CNN_embed_dim = CNN_embed_dim
# CNN architechtures
self.ch1, self.ch2, self.ch3, self.ch4 = 32, 64, 128, 256
self.k1, self.k2, self.k3, self.k4 = (5, 5), (3, 3), (3, 3), (3, 3) # 2d kernal size
self.s1, self.s2, self.s3, self.s4 = (2, 2), (2, 2), (2, 2), (2, 2) # 2d strides
self.pd1, self.pd2, self.pd3, self.pd4 = (0, 0), (0, 0), (0, 0), (0, 0) # 2d padding
# conv2D output shapes
self.conv1_outshape = conv2D_output_size((self.img_x, self.img_y), self.pd1, self.k1, self.s1) # Conv1 output shape
self.conv2_outshape = conv2D_output_size(self.conv1_outshape, self.pd2, self.k2, self.s2)
self.conv3_outshape = conv2D_output_size(self.conv2_outshape, self.pd3, self.k3, self.s3)
self.conv4_outshape = conv2D_output_size(self.conv3_outshape, self.pd4, self.k4, self.s4)
# fully connected layer hidden nodes
self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
self.drop_p = drop_p
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=self.ch1, kernel_size=self.k1, stride=self.s1, padding=self.pd1),
nn.BatchNorm2d(self.ch1, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=self.ch1, out_channels=self.ch2, kernel_size=self.k2, stride=self.s2, padding=self.pd2),
nn.BatchNorm2d(self.ch2, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=self.ch2, out_channels=self.ch3, kernel_size=self.k3, stride=self.s3, padding=self.pd3),
nn.BatchNorm2d(self.ch3, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels=self.ch3, out_channels=self.ch4, kernel_size=self.k4, stride=self.s4, padding=self.pd4),
nn.BatchNorm2d(self.ch4, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.drop = nn.Dropout2d(self.drop_p)
self.pool = nn.MaxPool2d(2)
self.fc1 = nn.Linear(self.ch4 * self.conv4_outshape[0] * self.conv4_outshape[1], self.fc_hidden1) # fully connected layer, output k classes
self.fc2 = nn.Linear(self.fc_hidden1, self.fc_hidden2)
self.fc3 = nn.Linear(self.fc_hidden2, self.CNN_embed_dim) # output = CNN embedding latent variables
def forward(self, x_3d):
cnn_embed_seq = []
for t in range(x_3d.size(1)):
# CNNs
x = self.conv1(x_3d[:, t, :, :, :])
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.view(x.size(0), -1) # flatten the output of conv
# FC layers
x = F.relu(self.fc1(x))
# x = F.dropout(x, p=self.drop_p, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc3(x)
cnn_embed_seq.append(x)
# swap time and sample dim such that (sample dim, time dim, CNN latent dim)
cnn_embed_seq = torch.stack(cnn_embed_seq, dim=0).transpose_(0, 1)
# cnn_embed_seq: shape=(batch, time_step, input_size)
return cnn_embed_seq
# 2D CNN encoder using ResNet-152 pretrained
class ResCNNEncoder(nn.Module):
def __init__(self, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300):
"""Load the pretrained ResNet-152 and replace top fc layer."""
super(ResCNNEncoder, self).__init__()
self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
self.drop_p = drop_p
resnet = models.resnet152(pretrained=True)
modules = list(resnet.children())[:-1] # delete the last fc layer.
self.resnet = nn.Sequential(*modules)
self.fc1 = nn.Linear(resnet.fc.in_features, fc_hidden1)
self.bn1 = nn.BatchNorm1d(fc_hidden1, momentum=0.01)
self.fc2 = nn.Linear(fc_hidden1, fc_hidden2)
self.bn2 = nn.BatchNorm1d(fc_hidden2, momentum=0.01)
self.fc3 = nn.Linear(fc_hidden2, CNN_embed_dim)
def forward(self, x_3d):
cnn_embed_seq = []
for t in range(x_3d.size(1)):
# ResNet CNN
with torch.no_grad():
x = self.resnet(x_3d[:, t, :, :, :]) # ResNet
x = x.view(x.size(0), -1) # flatten output of conv
# FC layers
x = self.bn1(self.fc1(x))
x = F.relu(x)
x = self.bn2(self.fc2(x))
x = F.relu(x)
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc3(x)
cnn_embed_seq.append(x)
# swap time and sample dim such that (sample dim, time dim, CNN latent dim)
cnn_embed_seq = torch.stack(cnn_embed_seq, dim=0).transpose_(0, 1)
# cnn_embed_seq: shape=(batch, time_step, input_size)
return cnn_embed_seq
class DecoderRNN(nn.Module):
def __init__(self, CNN_embed_dim=300, h_RNN_layers=3, h_RNN=256, h_FC_dim=128, drop_p=0.3, num_classes=50):
super(DecoderRNN, self).__init__()
self.RNN_input_size = CNN_embed_dim
self.h_RNN_layers = h_RNN_layers # RNN hidden layers
self.h_RNN = h_RNN # RNN hidden nodes
self.h_FC_dim = h_FC_dim
self.drop_p = drop_p
self.num_classes = num_classes
self.LSTM = nn.LSTM(
input_size=self.RNN_input_size,
hidden_size=self.h_RNN,
num_layers=h_RNN_layers,
batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
)
self.fc1 = nn.Linear(self.h_RNN, self.h_FC_dim)
self.fc2 = nn.Linear(self.h_FC_dim, self.num_classes)
def forward(self, x_RNN):
self.LSTM.flatten_parameters()
RNN_out, (h_n, h_c) = self.LSTM(x_RNN, None)
""" h_n shape (n_layers, batch, hidden_size), h_c shape (n_layers, batch, hidden_size) """
""" None represents zero initial hidden state. RNN_out has shape=(batch, time_step, output_size) """
# FC layers
x = self.fc1(RNN_out[:, -1, :]) # choose RNN_out at the last time step
x = F.relu(x)
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc2(x)
return x
## ---------------------- end of CRNN module ---------------------- ##
| 15,041 | 37.768041 | 149 | py |
video-classification | video-classification-master/ResNetCRNN/ResNetCRNN_check_prediction.py | import os
import numpy as np
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import matplotlib.pyplot as plt
from functions import *
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.metrics import accuracy_score
import pandas as pd
import pickle
# set path
data_path = "./jpegs_256/" # define UCF-101 RGB data path
action_name_path = "./UCF101actions.pkl"
save_model_path = "./ResNetCRNN_ckpt/"
# use same encoder CNN saved!
CNN_fc_hidden1, CNN_fc_hidden2 = 1024, 768
CNN_embed_dim = 512 # latent dim extracted by 2D CNN
res_size = 224 # ResNet image size
dropout_p = 0.0 # dropout probability
# use same decoder RNN saved!
RNN_hidden_layers = 3
RNN_hidden_nodes = 512
RNN_FC_dim = 256
# training parameters
k = 101 # number of target category
batch_size = 40
# Select which frame to begin & end in videos
begin_frame, end_frame, skip_frame = 1, 29, 1
with open(action_name_path, 'rb') as f:
action_names = pickle.load(f) # load UCF101 actions names
# convert labels -> category
le = LabelEncoder()
le.fit(action_names)
# show how many classes there are
list(le.classes_)
# convert category -> 1-hot
action_category = le.transform(action_names).reshape(-1, 1)
enc = OneHotEncoder()
enc.fit(action_category)
# # example
# y = ['HorseRace', 'YoYo', 'WalkingWithDog']
# y_onehot = labels2onehot(enc, le, y)
# y2 = onehot2labels(le, y_onehot)
actions = []
fnames = os.listdir(data_path)
all_names = []
for f in fnames:
loc1 = f.find('v_')
loc2 = f.find('_g')
actions.append(f[(loc1 + 2): loc2])
all_names.append(f)
# list all data files
all_X_list = all_names # all video file names
all_y_list = labels2cat(le, actions) # all video labels
# data loading parameters
use_cuda = torch.cuda.is_available() # check if GPU exists
device = torch.device("cuda" if use_cuda else "cpu") # use CPU or GPU
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
transform = transforms.Compose([transforms.Resize([res_size, res_size]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
selected_frames = np.arange(begin_frame, end_frame, skip_frame).tolist()
# reset data loader
all_data_params = {'batch_size': batch_size, 'shuffle': False, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
all_data_loader = data.DataLoader(Dataset_CRNN(data_path, all_X_list, all_y_list, selected_frames, transform=transform), **all_data_params)
# reload CRNN model
cnn_encoder = ResCNNEncoder(fc_hidden1=CNN_fc_hidden1, fc_hidden2=CNN_fc_hidden2, drop_p=dropout_p, CNN_embed_dim=CNN_embed_dim).to(device)
rnn_decoder = DecoderRNN(CNN_embed_dim=CNN_embed_dim, h_RNN_layers=RNN_hidden_layers, h_RNN=RNN_hidden_nodes,
h_FC_dim=RNN_FC_dim, drop_p=dropout_p, num_classes=k).to(device)
cnn_encoder.load_state_dict(torch.load(os.path.join(save_model_path, 'cnn_encoder_epoch41.pth')))
rnn_decoder.load_state_dict(torch.load(os.path.join(save_model_path, 'rnn_decoder_epoch41.pth')))
print('CRNN model reloaded!')
# make all video predictions by reloaded model
print('Predicting all {} videos:'.format(len(all_data_loader.dataset)))
all_y_pred = CRNN_final_prediction([cnn_encoder, rnn_decoder], device, all_data_loader)
# write in pandas dataframe
df = pd.DataFrame(data={'filename': fnames, 'y': cat2labels(le, all_y_list), 'y_pred': cat2labels(le, all_y_pred)})
df.to_pickle("./UCF101_videos_prediction.pkl") # save pandas dataframe
# pd.read_pickle("./all_videos_prediction.pkl")
print('video prediction finished!')
| 3,785 | 32.504425 | 139 | py |
video-classification | video-classification-master/ResNetCRNN/UCF101_ResNetCRNN.py | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torchvision.transforms as transforms
import torch.utils.data as data
import torchvision
from torch.autograd import Variable
import matplotlib.pyplot as plt
from functions import *
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.metrics import accuracy_score
import pickle
# set path
data_path = "./jpegs_256/" # define UCF-101 RGB data path
action_name_path = './UCF101actions.pkl'
save_model_path = "./ResNetCRNN_ckpt/"
# EncoderCNN architecture
CNN_fc_hidden1, CNN_fc_hidden2 = 1024, 768
CNN_embed_dim = 512 # latent dim extracted by 2D CNN
res_size = 224 # ResNet image size
dropout_p = 0.0 # dropout probability
# DecoderRNN architecture
RNN_hidden_layers = 3
RNN_hidden_nodes = 512
RNN_FC_dim = 256
# training parameters
k = 101 # number of target category
epochs = 120 # training epochs
batch_size = 40
learning_rate = 1e-3
log_interval = 10 # interval for displaying training info
# Select which frame to begin & end in videos
begin_frame, end_frame, skip_frame = 1, 29, 1
def train(log_interval, model, device, train_loader, optimizer, epoch):
# set model as training mode
cnn_encoder, rnn_decoder = model
cnn_encoder.train()
rnn_decoder.train()
losses = []
scores = []
N_count = 0 # counting total trained sample in one epoch
for batch_idx, (X, y) in enumerate(train_loader):
# distribute data to device
X, y = X.to(device), y.to(device).view(-1, )
N_count += X.size(0)
optimizer.zero_grad()
output = rnn_decoder(cnn_encoder(X)) # output has dim = (batch, number of classes)
loss = F.cross_entropy(output, y)
losses.append(loss.item())
# to compute accuracy
y_pred = torch.max(output, 1)[1] # y_pred != output
step_score = accuracy_score(y.cpu().data.squeeze().numpy(), y_pred.cpu().data.squeeze().numpy())
scores.append(step_score) # computed on CPU
loss.backward()
optimizer.step()
# show information
if (batch_idx + 1) % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, Accu: {:.2f}%'.format(
epoch + 1, N_count, len(train_loader.dataset), 100. * (batch_idx + 1) / len(train_loader), loss.item(), 100 * step_score))
return losses, scores
def validation(model, device, optimizer, test_loader):
# set model as testing mode
cnn_encoder, rnn_decoder = model
cnn_encoder.eval()
rnn_decoder.eval()
test_loss = 0
all_y = []
all_y_pred = []
with torch.no_grad():
for X, y in test_loader:
# distribute data to device
X, y = X.to(device), y.to(device).view(-1, )
output = rnn_decoder(cnn_encoder(X))
loss = F.cross_entropy(output, y, reduction='sum')
test_loss += loss.item() # sum up batch loss
y_pred = output.max(1, keepdim=True)[1] # (y_pred != output) get the index of the max log-probability
# collect all y and y_pred in all batches
all_y.extend(y)
all_y_pred.extend(y_pred)
test_loss /= len(test_loader.dataset)
# compute accuracy
all_y = torch.stack(all_y, dim=0)
all_y_pred = torch.stack(all_y_pred, dim=0)
test_score = accuracy_score(all_y.cpu().data.squeeze().numpy(), all_y_pred.cpu().data.squeeze().numpy())
# show information
print('\nTest set ({:d} samples): Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(len(all_y), test_loss, 100* test_score))
# save Pytorch models of best record
torch.save(cnn_encoder.state_dict(), os.path.join(save_model_path, 'cnn_encoder_epoch{}.pth'.format(epoch + 1))) # save spatial_encoder
torch.save(rnn_decoder.state_dict(), os.path.join(save_model_path, 'rnn_decoder_epoch{}.pth'.format(epoch + 1))) # save motion_encoder
torch.save(optimizer.state_dict(), os.path.join(save_model_path, 'optimizer_epoch{}.pth'.format(epoch + 1))) # save optimizer
print("Epoch {} model saved!".format(epoch + 1))
return test_loss, test_score
# Detect devices
use_cuda = torch.cuda.is_available() # check if GPU exists
device = torch.device("cuda" if use_cuda else "cpu") # use CPU or GPU
# Data loading parameters
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
# load UCF101 actions names
with open(action_name_path, 'rb') as f:
action_names = pickle.load(f)
# convert labels -> category
le = LabelEncoder()
le.fit(action_names)
# show how many classes there are
list(le.classes_)
# convert category -> 1-hot
action_category = le.transform(action_names).reshape(-1, 1)
enc = OneHotEncoder()
enc.fit(action_category)
# # example
# y = ['HorseRace', 'YoYo', 'WalkingWithDog']
# y_onehot = labels2onehot(enc, le, y)
# y2 = onehot2labels(le, y_onehot)
actions = []
fnames = os.listdir(data_path)
all_names = []
for f in fnames:
loc1 = f.find('v_')
loc2 = f.find('_g')
actions.append(f[(loc1 + 2): loc2])
all_names.append(f)
# list all data files
all_X_list = all_names # all video file names
all_y_list = labels2cat(le, actions) # all video labels
# train, test split
train_list, test_list, train_label, test_label = train_test_split(all_X_list, all_y_list, test_size=0.25, random_state=42)
transform = transforms.Compose([transforms.Resize([res_size, res_size]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
selected_frames = np.arange(begin_frame, end_frame, skip_frame).tolist()
train_set, valid_set = Dataset_CRNN(data_path, train_list, train_label, selected_frames, transform=transform), \
Dataset_CRNN(data_path, test_list, test_label, selected_frames, transform=transform)
train_loader = data.DataLoader(train_set, **params)
valid_loader = data.DataLoader(valid_set, **params)
# Create model
cnn_encoder = ResCNNEncoder(fc_hidden1=CNN_fc_hidden1, fc_hidden2=CNN_fc_hidden2, drop_p=dropout_p, CNN_embed_dim=CNN_embed_dim).to(device)
rnn_decoder = DecoderRNN(CNN_embed_dim=CNN_embed_dim, h_RNN_layers=RNN_hidden_layers, h_RNN=RNN_hidden_nodes,
h_FC_dim=RNN_FC_dim, drop_p=dropout_p, num_classes=k).to(device)
# Parallelize model to multiple GPUs
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs!")
cnn_encoder = nn.DataParallel(cnn_encoder)
rnn_decoder = nn.DataParallel(rnn_decoder)
# Combine all EncoderCNN + DecoderRNN parameters
crnn_params = list(cnn_encoder.module.fc1.parameters()) + list(cnn_encoder.module.bn1.parameters()) + \
list(cnn_encoder.module.fc2.parameters()) + list(cnn_encoder.module.bn2.parameters()) + \
list(cnn_encoder.module.fc3.parameters()) + list(rnn_decoder.parameters())
elif torch.cuda.device_count() == 1:
print("Using", torch.cuda.device_count(), "GPU!")
# Combine all EncoderCNN + DecoderRNN parameters
crnn_params = list(cnn_encoder.fc1.parameters()) + list(cnn_encoder.bn1.parameters()) + \
list(cnn_encoder.fc2.parameters()) + list(cnn_encoder.bn2.parameters()) + \
list(cnn_encoder.fc3.parameters()) + list(rnn_decoder.parameters())
optimizer = torch.optim.Adam(crnn_params, lr=learning_rate)
# record training process
epoch_train_losses = []
epoch_train_scores = []
epoch_test_losses = []
epoch_test_scores = []
# start training
for epoch in range(epochs):
# train, test model
train_losses, train_scores = train(log_interval, [cnn_encoder, rnn_decoder], device, train_loader, optimizer, epoch)
epoch_test_loss, epoch_test_score = validation([cnn_encoder, rnn_decoder], device, optimizer, valid_loader)
# save results
epoch_train_losses.append(train_losses)
epoch_train_scores.append(train_scores)
epoch_test_losses.append(epoch_test_loss)
epoch_test_scores.append(epoch_test_score)
# save all train test results
A = np.array(epoch_train_losses)
B = np.array(epoch_train_scores)
C = np.array(epoch_test_losses)
D = np.array(epoch_test_scores)
np.save('./CRNN_epoch_training_losses.npy', A)
np.save('./CRNN_epoch_training_scores.npy', B)
np.save('./CRNN_epoch_test_loss.npy', C)
np.save('./CRNN_epoch_test_score.npy', D)
# plot
fig = plt.figure(figsize=(10, 4))
plt.subplot(121)
plt.plot(np.arange(1, epochs + 1), A[:, -1]) # train loss (on epoch end)
plt.plot(np.arange(1, epochs + 1), C) # test loss (on epoch end)
plt.title("model loss")
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend(['train', 'test'], loc="upper left")
# 2nd figure
plt.subplot(122)
plt.plot(np.arange(1, epochs + 1), B[:, -1]) # train accuracy (on epoch end)
plt.plot(np.arange(1, epochs + 1), D) # test accuracy (on epoch end)
plt.title("training scores")
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend(['train', 'test'], loc="upper left")
title = "./fig_UCF101_ResNetCRNN.png"
plt.savefig(title, dpi=600)
# plt.close(fig)
plt.show()
| 9,379 | 34.938697 | 140 | py |
video-classification | video-classification-master/Conv3D/UCF101_3DCNN.py | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torchvision.transforms as transforms
import torch.utils.data as data
import torchvision
from torch.autograd import Variable
import matplotlib.pyplot as plt
from functions import *
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.metrics import accuracy_score
import pickle
# set path
data_path = "./jpegs_256/" # define UCF-101 spatial data path
action_name_path = "./UCF101actions.pkl" # load preprocessed action names
save_model_path = "./Conv3D_ckpt/" # save Pytorch models
# 3D CNN parameters
fc_hidden1, fc_hidden2 = 256, 256
dropout = 0.0 # dropout probability
# training parameters
k = 101 # number of target category
epochs = 15
batch_size = 30
learning_rate = 1e-4
log_interval = 10
img_x, img_y = 256, 342 # resize video 2d frame size
# Select which frame to begin & end in videos
begin_frame, end_frame, skip_frame = 1, 29, 1
def train(log_interval, model, device, train_loader, optimizer, epoch):
# set model as training mode
model.train()
losses = []
scores = []
N_count = 0 # counting total trained sample in one epoch
for batch_idx, (X, y) in enumerate(train_loader):
# distribute data to device
X, y = X.to(device), y.to(device).view(-1, )
N_count += X.size(0)
optimizer.zero_grad()
output = model(X) # output size = (batch, number of classes)
loss = F.cross_entropy(output, y)
losses.append(loss.item())
# to compute accuracy
y_pred = torch.max(output, 1)[1] # y_pred != output
step_score = accuracy_score(y.cpu().data.squeeze().numpy(), y_pred.cpu().data.squeeze().numpy())
scores.append(step_score) # computed on CPU
loss.backward()
optimizer.step()
# show information
if (batch_idx + 1) % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, Accu: {:.2f}%'.format(
epoch + 1, N_count, len(train_loader.dataset), 100. * (batch_idx + 1) / len(train_loader), loss.item(), 100 * step_score))
return losses, scores
def validation(model, device, optimizer, test_loader):
# set model as testing mode
model.eval()
test_loss = 0
all_y = []
all_y_pred = []
with torch.no_grad():
for X, y in test_loader:
# distribute data to device
X, y = X.to(device), y.to(device).view(-1, )
output = model(X)
loss = F.cross_entropy(output, y, reduction='sum')
test_loss += loss.item() # sum up batch loss
y_pred = output.max(1, keepdim=True)[1] # (y_pred != output) get the index of the max log-probability
# collect all y and y_pred in all batches
all_y.extend(y)
all_y_pred.extend(y_pred)
test_loss /= len(test_loader.dataset)
# to compute accuracy
all_y = torch.stack(all_y, dim=0)
all_y_pred = torch.stack(all_y_pred, dim=0)
test_score = accuracy_score(all_y.cpu().data.squeeze().numpy(), all_y_pred.cpu().data.squeeze().numpy())
# show information
print('\nTest set ({:d} samples): Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(len(all_y), test_loss, 100* test_score))
# save Pytorch models of best record
torch.save(model.state_dict(), os.path.join(save_model_path, '3dcnn_epoch{}.pth'.format(epoch + 1))) # save spatial_encoder
torch.save(optimizer.state_dict(), os.path.join(save_model_path, '3dcnn_optimizer_epoch{}.pth'.format(epoch + 1))) # save optimizer
print("Epoch {} model saved!".format(epoch + 1))
return test_loss, test_score
# Detect devices
use_cuda = torch.cuda.is_available() # check if GPU exists
device = torch.device("cuda" if use_cuda else "cpu") # use CPU or GPU
# load UCF101 actions names
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
# load UCF101 actions names
with open(action_name_path, 'rb') as f:
action_names = pickle.load(f) # load UCF101 actions names
# convert labels -> category
le = LabelEncoder()
le.fit(action_names)
# show how many classes there are
list(le.classes_)
# convert category -> 1-hot
action_category = le.transform(action_names).reshape(-1, 1)
enc = OneHotEncoder()
enc.fit(action_category)
# # example
# y = ['HorseRace', 'YoYo', 'WalkingWithDog']
# y_onehot = labels2onehot(enc, le, y)
# y2 = onehot2labels(le, y_onehot)
actions = []
fnames = os.listdir(data_path)
all_names = []
for f in fnames:
loc1 = f.find('v_')
loc2 = f.find('_g')
actions.append(f[(loc1 + 2): loc2])
all_names.append(f)
# list all data files
all_X_list = all_names # all video file names
all_y_list = labels2cat(le, actions) # all video labels
# train, test split
train_list, test_list, train_label, test_label = train_test_split(all_X_list, all_y_list, test_size=0.25, random_state=42)
# image transformation
transform = transforms.Compose([transforms.Resize([img_x, img_y]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])])
selected_frames = np.arange(begin_frame, end_frame, skip_frame).tolist()
train_set, valid_set = Dataset_3DCNN(data_path, train_list, train_label, selected_frames, transform=transform), \
Dataset_3DCNN(data_path, test_list, test_label, selected_frames, transform=transform)
train_loader = data.DataLoader(train_set, **params)
valid_loader = data.DataLoader(valid_set, **params)
# create model
cnn3d = CNN3D(t_dim=len(selected_frames), img_x=img_x, img_y=img_y,
drop_p=dropout, fc_hidden1=fc_hidden1, fc_hidden2=fc_hidden2, num_classes=k).to(device)
# Parallelize model to multiple GPUs
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs!")
cnn3d = nn.DataParallel(cnn3d)
optimizer = torch.optim.Adam(cnn3d.parameters(), lr=learning_rate) # optimize all cnn parameters
# record training process
epoch_train_losses = []
epoch_train_scores = []
epoch_test_losses = []
epoch_test_scores = []
# start training
for epoch in range(epochs):
# train, test model
train_losses, train_scores = train(log_interval, cnn3d, device, train_loader, optimizer, epoch)
epoch_test_loss, epoch_test_score = validation(cnn3d, device, optimizer, valid_loader)
# save results
epoch_train_losses.append(train_losses)
epoch_train_scores.append(train_scores)
epoch_test_losses.append(epoch_test_loss)
epoch_test_scores.append(epoch_test_score)
# save all train test results
A = np.array(epoch_train_losses)
B = np.array(epoch_train_scores)
C = np.array(epoch_test_losses)
D = np.array(epoch_test_scores)
np.save('./3DCNN_epoch_training_losses.npy', A)
np.save('./3DCNN_epoch_training_scores.npy', B)
np.save('./3DCNN_epoch_test_loss.npy', C)
np.save('./3DCNN_epoch_test_score.npy', D)
# plot
fig = plt.figure(figsize=(10, 4))
plt.subplot(121)
plt.plot(np.arange(1, epochs + 1), A[:, -1]) # train loss (on epoch end)
plt.plot(np.arange(1, epochs + 1), C) # test loss (on epoch end)
plt.title("model loss")
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend(['train', 'test'], loc="upper left")
# 2nd figure
plt.subplot(122)
plt.plot(np.arange(1, epochs + 1), B[:, -1]) # train accuracy (on epoch end)
plt.plot(np.arange(1, epochs + 1), D) # test accuracy (on epoch end)
# plt.plot(histories.losses_val)
plt.title("training scores")
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend(['train', 'test'], loc="upper left")
title = "./fig_UCF101_3DCNN.png"
plt.savefig(title, dpi=600)
# plt.close(fig)
plt.show()
| 7,929 | 32.744681 | 140 | py |
video-classification | video-classification-master/Conv3D/functions.py | import os
import numpy as np
from PIL import Image
from torch.utils import data
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torchvision.transforms as transforms
from tqdm import tqdm
## ------------------- label conversion tools ------------------ ##
def labels2cat(label_encoder, list):
return label_encoder.transform(list)
def labels2onehot(OneHotEncoder, label_encoder, list):
return OneHotEncoder.transform(label_encoder.transform(list).reshape(-1, 1)).toarray()
def onehot2labels(label_encoder, y_onehot):
return label_encoder.inverse_transform(np.where(y_onehot == 1)[1]).tolist()
def cat2labels(label_encoder, y_cat):
return label_encoder.inverse_transform(y_cat).tolist()
## ---------------------- Dataloaders ---------------------- ##
# for 3DCNN
class Dataset_3DCNN(data.Dataset):
"Characterizes a dataset for PyTorch"
def __init__(self, data_path, folders, labels, frames, transform=None):
"Initialization"
self.data_path = data_path
self.labels = labels
self.folders = folders
self.transform = transform
self.frames = frames
def __len__(self):
"Denotes the total number of samples"
return len(self.folders)
def read_images(self, path, selected_folder, use_transform):
X = []
for i in self.frames:
image = Image.open(os.path.join(path, selected_folder, 'frame{:06d}.jpg'.format(i))).convert('L')
if use_transform is not None:
image = use_transform(image)
X.append(image.squeeze_(0))
X = torch.stack(X, dim=0)
return X
def __getitem__(self, index):
"Generates one sample of data"
# Select sample
folder = self.folders[index]
# Load data
X = self.read_images(self.data_path, folder, self.transform).unsqueeze_(0) # (input) spatial images
y = torch.LongTensor([self.labels[index]]) # (labels) LongTensor are for int64 instead of FloatTensor
# print(X.shape)
return X, y
# for CRNN
class Dataset_CRNN(data.Dataset):
"Characterizes a dataset for PyTorch"
def __init__(self, data_path, folders, labels, frames, transform=None):
"Initialization"
self.data_path = data_path
self.labels = labels
self.folders = folders
self.transform = transform
self.frames = frames
def __len__(self):
"Denotes the total number of samples"
return len(self.folders)
def read_images(self, path, selected_folder, use_transform):
X = []
for i in self.frames:
image = Image.open(os.path.join(path, selected_folder, 'frame{:06d}.jpg'.format(i)))
if use_transform is not None:
image = use_transform(image)
X.append(image)
X = torch.stack(X, dim=0)
return X
def __getitem__(self, index):
"Generates one sample of data"
# Select sample
folder = self.folders[index]
# Load data
X = self.read_images(self.data_path, folder, self.transform) # (input) spatial images
y = torch.LongTensor([self.labels[index]]) # (labels) LongTensor are for int64 instead of FloatTensor
# print(X.shape)
return X, y
## ---------------------- end of Dataloaders ---------------------- ##
## -------------------- (reload) model prediction ---------------------- ##
def Conv3d_final_prediction(model, device, loader):
model.eval()
all_y_pred = []
with torch.no_grad():
for batch_idx, (X, y) in enumerate(tqdm(loader)):
# distribute data to device
X = X.to(device)
output = model(X)
y_pred = output.max(1, keepdim=True)[1] # location of max log-probability as prediction
all_y_pred.extend(y_pred.cpu().data.squeeze().numpy().tolist())
return all_y_pred
def CRNN_final_prediction(model, device, loader):
cnn_encoder, rnn_decoder = model
cnn_encoder.eval()
rnn_decoder.eval()
all_y_pred = []
with torch.no_grad():
for batch_idx, (X, y) in enumerate(tqdm(loader)):
# distribute data to device
X = X.to(device)
output = rnn_decoder(cnn_encoder(X))
y_pred = output.max(1, keepdim=True)[1] # location of max log-probability as prediction
all_y_pred.extend(y_pred.cpu().data.squeeze().numpy().tolist())
return all_y_pred
## -------------------- end of model prediction ---------------------- ##
## ------------------------ 3D CNN module ---------------------- ##
def conv3D_output_size(img_size, padding, kernel_size, stride):
# compute output shape of conv3D
outshape = (np.floor((img_size[0] + 2 * padding[0] - (kernel_size[0] - 1) - 1) / stride[0] + 1).astype(int),
np.floor((img_size[1] + 2 * padding[1] - (kernel_size[1] - 1) - 1) / stride[1] + 1).astype(int),
np.floor((img_size[2] + 2 * padding[2] - (kernel_size[2] - 1) - 1) / stride[2] + 1).astype(int))
return outshape
class CNN3D(nn.Module):
def __init__(self, t_dim=120, img_x=90, img_y=120, drop_p=0.2, fc_hidden1=256, fc_hidden2=128, num_classes=50):
super(CNN3D, self).__init__()
# set video dimension
self.t_dim = t_dim
self.img_x = img_x
self.img_y = img_y
# fully connected layer hidden nodes
self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
self.drop_p = drop_p
self.num_classes = num_classes
self.ch1, self.ch2 = 32, 48
self.k1, self.k2 = (5, 5, 5), (3, 3, 3) # 3d kernel size
self.s1, self.s2 = (2, 2, 2), (2, 2, 2) # 3d strides
self.pd1, self.pd2 = (0, 0, 0), (0, 0, 0) # 3d padding
# compute conv1 & conv2 output shape
self.conv1_outshape = conv3D_output_size((self.t_dim, self.img_x, self.img_y), self.pd1, self.k1, self.s1)
self.conv2_outshape = conv3D_output_size(self.conv1_outshape, self.pd2, self.k2, self.s2)
self.conv1 = nn.Conv3d(in_channels=1, out_channels=self.ch1, kernel_size=self.k1, stride=self.s1,
padding=self.pd1)
self.bn1 = nn.BatchNorm3d(self.ch1)
self.conv2 = nn.Conv3d(in_channels=self.ch1, out_channels=self.ch2, kernel_size=self.k2, stride=self.s2,
padding=self.pd2)
self.bn2 = nn.BatchNorm3d(self.ch2)
self.relu = nn.ReLU(inplace=True)
self.drop = nn.Dropout3d(self.drop_p)
self.pool = nn.MaxPool3d(2)
self.fc1 = nn.Linear(self.ch2 * self.conv2_outshape[0] * self.conv2_outshape[1] * self.conv2_outshape[2],
self.fc_hidden1) # fully connected hidden layer
self.fc2 = nn.Linear(self.fc_hidden1, self.fc_hidden2)
self.fc3 = nn.Linear(self.fc_hidden2, self.num_classes) # fully connected layer, output = multi-classes
def forward(self, x_3d):
# Conv 1
x = self.conv1(x_3d)
x = self.bn1(x)
x = self.relu(x)
x = self.drop(x)
# Conv 2
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.drop(x)
# FC 1 and 2
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc3(x)
return x
## --------------------- end of 3D CNN module ---------------- ##
## ------------------------ CRNN module ---------------------- ##
def conv2D_output_size(img_size, padding, kernel_size, stride):
# compute output shape of conv2D
outshape = (np.floor((img_size[0] + 2 * padding[0] - (kernel_size[0] - 1) - 1) / stride[0] + 1).astype(int),
np.floor((img_size[1] + 2 * padding[1] - (kernel_size[1] - 1) - 1) / stride[1] + 1).astype(int))
return outshape
# 2D CNN encoder train from scratch (no transfer learning)
class EncoderCNN(nn.Module):
def __init__(self, img_x=90, img_y=120, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300):
super(EncoderCNN, self).__init__()
self.img_x = img_x
self.img_y = img_y
self.CNN_embed_dim = CNN_embed_dim
# CNN architechtures
self.ch1, self.ch2, self.ch3, self.ch4 = 32, 64, 128, 256
self.k1, self.k2, self.k3, self.k4 = (5, 5), (3, 3), (3, 3), (3, 3) # 2d kernal size
self.s1, self.s2, self.s3, self.s4 = (2, 2), (2, 2), (2, 2), (2, 2) # 2d strides
self.pd1, self.pd2, self.pd3, self.pd4 = (0, 0), (0, 0), (0, 0), (0, 0) # 2d padding
# conv2D output shapes
self.conv1_outshape = conv2D_output_size((self.img_x, self.img_y), self.pd1, self.k1, self.s1) # Conv1 output shape
self.conv2_outshape = conv2D_output_size(self.conv1_outshape, self.pd2, self.k2, self.s2)
self.conv3_outshape = conv2D_output_size(self.conv2_outshape, self.pd3, self.k3, self.s3)
self.conv4_outshape = conv2D_output_size(self.conv3_outshape, self.pd4, self.k4, self.s4)
# fully connected layer hidden nodes
self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
self.drop_p = drop_p
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=self.ch1, kernel_size=self.k1, stride=self.s1, padding=self.pd1),
nn.BatchNorm2d(self.ch1, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=self.ch1, out_channels=self.ch2, kernel_size=self.k2, stride=self.s2, padding=self.pd2),
nn.BatchNorm2d(self.ch2, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=self.ch2, out_channels=self.ch3, kernel_size=self.k3, stride=self.s3, padding=self.pd3),
nn.BatchNorm2d(self.ch3, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels=self.ch3, out_channels=self.ch4, kernel_size=self.k4, stride=self.s4, padding=self.pd4),
nn.BatchNorm2d(self.ch4, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.drop = nn.Dropout2d(self.drop_p)
self.pool = nn.MaxPool2d(2)
self.fc1 = nn.Linear(self.ch4 * self.conv4_outshape[0] * self.conv4_outshape[1], self.fc_hidden1) # fully connected layer, output k classes
self.fc2 = nn.Linear(self.fc_hidden1, self.fc_hidden2)
self.fc3 = nn.Linear(self.fc_hidden2, self.CNN_embed_dim) # output = CNN embedding latent variables
def forward(self, x_3d):
cnn_embed_seq = []
for t in range(x_3d.size(1)):
# CNNs
x = self.conv1(x_3d[:, t, :, :, :])
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.view(x.size(0), -1) # flatten the output of conv
# FC layers
x = F.relu(self.fc1(x))
# x = F.dropout(x, p=self.drop_p, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc3(x)
cnn_embed_seq.append(x)
# swap time and sample dim such that (sample dim, time dim, CNN latent dim)
cnn_embed_seq = torch.stack(cnn_embed_seq, dim=0).transpose_(0, 1)
# cnn_embed_seq: shape=(batch, time_step, input_size)
return cnn_embed_seq
# 2D CNN encoder using ResNet-152 pretrained
class ResCNNEncoder(nn.Module):
def __init__(self, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300):
"""Load the pretrained ResNet-152 and replace top fc layer."""
super(ResCNNEncoder, self).__init__()
self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
self.drop_p = drop_p
resnet = models.resnet152(pretrained=True)
modules = list(resnet.children())[:-1] # delete the last fc layer.
self.resnet = nn.Sequential(*modules)
self.fc1 = nn.Linear(resnet.fc.in_features, fc_hidden1)
self.bn1 = nn.BatchNorm1d(fc_hidden1, momentum=0.01)
self.fc2 = nn.Linear(fc_hidden1, fc_hidden2)
self.bn2 = nn.BatchNorm1d(fc_hidden2, momentum=0.01)
self.fc3 = nn.Linear(fc_hidden2, CNN_embed_dim)
def forward(self, x_3d):
cnn_embed_seq = []
for t in range(x_3d.size(1)):
# ResNet CNN
with torch.no_grad():
x = self.resnet(x_3d[:, t, :, :, :]) # ResNet
x = x.view(x.size(0), -1) # flatten output of conv
# FC layers
x = self.bn1(self.fc1(x))
x = F.relu(x)
x = self.bn2(self.fc2(x))
x = F.relu(x)
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc3(x)
cnn_embed_seq.append(x)
# swap time and sample dim such that (sample dim, time dim, CNN latent dim)
cnn_embed_seq = torch.stack(cnn_embed_seq, dim=0).transpose_(0, 1)
# cnn_embed_seq: shape=(batch, time_step, input_size)
return cnn_embed_seq
class DecoderRNN(nn.Module):
def __init__(self, CNN_embed_dim=300, h_RNN_layers=3, h_RNN=256, h_FC_dim=128, drop_p=0.3, num_classes=50):
super(DecoderRNN, self).__init__()
self.RNN_input_size = CNN_embed_dim
self.h_RNN_layers = h_RNN_layers # RNN hidden layers
self.h_RNN = h_RNN # RNN hidden nodes
self.h_FC_dim = h_FC_dim
self.drop_p = drop_p
self.num_classes = num_classes
self.LSTM = nn.LSTM(
input_size=self.RNN_input_size,
hidden_size=self.h_RNN,
num_layers=h_RNN_layers,
batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
)
self.fc1 = nn.Linear(self.h_RNN, self.h_FC_dim)
self.fc2 = nn.Linear(self.h_FC_dim, self.num_classes)
def forward(self, x_RNN):
self.LSTM.flatten_parameters()
RNN_out, (h_n, h_c) = self.LSTM(x_RNN, None)
""" h_n shape (n_layers, batch, hidden_size), h_c shape (n_layers, batch, hidden_size) """
""" None represents zero initial hidden state. RNN_out has shape=(batch, time_step, output_size) """
# FC layers
x = self.fc1(RNN_out[:, -1, :]) # choose RNN_out at the last time step
x = F.relu(x)
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc2(x)
return x
## ---------------------- end of CRNN module ---------------------- ##
| 15,041 | 37.768041 | 149 | py |
video-classification | video-classification-master/Conv3D/Conv3D_check_prediction.py | import os
import numpy as np
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import matplotlib.pyplot as plt
from functions import *
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.metrics import accuracy_score
import pandas as pd
import pickle
# set path
data_path = "./jpegs_256/" # define UCF-101 RGB data path
action_name_path = './UCF101actions.pkl'
save_model_path = "./Conv3D_ckpt/"
# 3D CNN parameters
fc_hidden1, fc_hidden2 = 256, 256
dropout = 0.0 # dropout probability
# training parameters
k = 101 # number of target category
batch_size = 30
img_x, img_y = 256, 342 # resize video 2d frame size
# Select which frame to begin & end in videos
begin_frame, end_frame, skip_frame = 1, 29, 1
with open(action_name_path, 'rb') as f:
action_names = pickle.load(f) # load UCF101 actions names
# convert labels -> category
le = LabelEncoder()
le.fit(action_names)
# show how many classes there are
list(le.classes_)
# convert category -> 1-hot
action_category = le.transform(action_names).reshape(-1, 1)
enc = OneHotEncoder()
enc.fit(action_category)
# # example
# y = ['HorseRace', 'YoYo', 'WalkingWithDog']
# y_onehot = labels2onehot(enc, le, y)
# y2 = onehot2labels(le, y_onehot)
actions = []
fnames = os.listdir(data_path)
all_names = []
for f in fnames:
loc1 = f.find('v_')
loc2 = f.find('_g')
actions.append(f[(loc1 + 2): loc2])
all_names.append(f)
# list all data files
all_X_list = all_names # all video file names
all_y_list = labels2cat(le, actions) # all video labels
# data loading parameters
use_cuda = torch.cuda.is_available() # check if GPU exists
device = torch.device("cuda" if use_cuda else "cpu") # use CPU or GPU
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
# image transformation
transform = transforms.Compose([transforms.Resize([img_x, img_y]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])])
selected_frames = np.arange(begin_frame, end_frame, skip_frame).tolist()
# reset data loader
all_data_params = {'batch_size': batch_size, 'shuffle': False, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
all_data_loader = data.DataLoader(Dataset_3DCNN(data_path, all_X_list, all_y_list, selected_frames, transform=transform), **all_data_params)
# reload CRNN model
cnn3d = CNN3D(t_dim=len(selected_frames), img_x=img_x, img_y=img_y,
drop_p=dropout, fc_hidden1=fc_hidden1, fc_hidden2=fc_hidden2, num_classes=k).to(device)
# Parallelize model to multiple GPUs
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs!")
cnn3d = nn.DataParallel(cnn3d)
cnn3d.load_state_dict(torch.load(os.path.join(save_model_path, '3dcnn_epoch6.pth')))
print('CNN3D model reloaded!')
# make all video predictions by reloaded model
print('Predicting all {} videos:'.format(len(all_data_loader.dataset)))
all_y_pred = Conv3d_final_prediction(cnn3d, device, all_data_loader)
# write in pandas dataframe
df = pd.DataFrame(data={'filename': fnames, 'y': cat2labels(le, all_y_list), 'y_pred': cat2labels(le, all_y_pred)})
df.to_pickle("./UCF101_videos_prediction.pkl") # save pandas dataframe
# pd.read_pickle("./all_videos_prediction.pkl")
print('video prediction finished!')
| 3,461 | 30.189189 | 140 | py |
video-classification | video-classification-master/ResNetCRNN_varylength/UCF101_ResNetCRNN_varlen.py | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torchvision.transforms as transforms
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.utils.data as data
import torchvision
from torch.autograd import Variable
import matplotlib.pyplot as plt
from functions import *
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.metrics import accuracy_score
import pickle
# set visible CUDA device
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"
# set path
data_path = "/mnt/data/Datasets/UCF101/ucf101_jpegs_256/jpegs_256/" # define UCF-101 spatial data path
action_name_path = './UCF101actions.pkl'
frame_slice_file = './UCF101_frame_count.pkl'
save_model_path = "./model_ckpt"
# EncoderCNN architecture
CNN_fc_hidden1, CNN_fc_hidden2 = 1024, 768
CNN_embed_dim = 512 # latent dim extracted by 2D CNN
res_size = 224 # ResNet image size
dropout_p = 0.0 # dropout probability
# DecoderRNN architecture
RNN_hidden_layers = 3
RNN_hidden_nodes = 512
RNN_FC_dim = 256
# training parameters
k = 101 # number of target category
epochs = 150 # training epochs
batch_size = 120
learning_rate = 1e-3
lr_patience = 15
log_interval = 10 # interval for displaying training info
# Select frames to begin & end in videos
select_frame = {'begin': 1, 'end': 100, 'skip': 2}
def check_mkdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def train(log_interval, model, device, train_loader, optimizer, epoch):
# set model as training mode
cnn_encoder, rnn_decoder = model
cnn_encoder.train()
rnn_decoder.train()
epoch_loss, all_y, all_y_pred = 0, [], []
N_count = 0 # counting total trained sample in one epoch
for batch_idx, (X, X_lengths, y) in enumerate(train_loader):
# distribute data to device
X, X_lengths, y = X.to(device), X_lengths.to(device).view(-1, ), y.to(device).view(-1, )
N_count += X.size(0)
optimizer.zero_grad()
output = rnn_decoder(cnn_encoder(X), X_lengths) # output has dim = (batch, number of classes)
loss = F.cross_entropy(output, y) # mini-batch loss
epoch_loss += F.cross_entropy(output, y, reduction='sum').item() # sum up mini-batch loss
y_pred = torch.max(output, 1)[1] # y_pred != output
# collect all y and y_pred in all mini-batches
all_y.extend(y)
all_y_pred.extend(y_pred)
# to compute accuracy
step_score = accuracy_score(y.cpu().data.squeeze().numpy(), y_pred.cpu().data.squeeze().numpy())
loss.backward()
optimizer.step()
# show information
if (batch_idx + 1) % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, Accu: {:.2f}%'.format(
epoch + 1, N_count, len(train_loader.dataset), 100. * (batch_idx + 1) / len(train_loader), loss.item(), 100 * step_score))
epoch_loss /= len(train_loader)
# compute accuracy
all_y = torch.stack(all_y, dim=0)
all_y_pred = torch.stack(all_y_pred, dim=0)
epoch_score = accuracy_score(all_y.cpu().data.squeeze().numpy(), all_y_pred.cpu().data.squeeze().numpy())
return epoch_loss, epoch_score
def validation(model, device, optimizer, test_loader):
# set model as testing mode
cnn_encoder, rnn_decoder = model
cnn_encoder.eval()
rnn_decoder.eval()
test_loss = 0
all_y, all_y_pred = [], []
with torch.no_grad():
for X, X_lengths, y in test_loader:
# distribute data to device
X, X_lengths, y = X.to(device), X_lengths.to(device).view(-1, ), y.to(device).view(-1, )
output = rnn_decoder(cnn_encoder(X), X_lengths)
loss = F.cross_entropy(output, y, reduction='sum')
test_loss += loss.item() # sum up minibatch loss
y_pred = output.max(1, keepdim=True)[1] # (y_pred != output) get the index of the max log-probability
# collect all y and y_pred in all batches
all_y.extend(y)
all_y_pred.extend(y_pred)
test_loss /= len(test_loader.dataset)
# compute accuracy
all_y = torch.stack(all_y, dim=0)
all_y_pred = torch.stack(all_y_pred, dim=0)
test_score = accuracy_score(all_y.cpu().data.squeeze().numpy(), all_y_pred.cpu().data.squeeze().numpy())
# show information
print('\nTest set ({:d} samples): Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(len(all_y), test_loss, 100* test_score))
# save Pytorch models of best record
check_mkdir(save_model_path)
torch.save(cnn_encoder.state_dict(), os.path.join(save_model_path, 'cnn_encoder_epoch{}.pth'.format(epoch + 1))) # save spatial_encoder
torch.save(rnn_decoder.state_dict(), os.path.join(save_model_path, 'rnn_decoder_epoch{}.pth'.format(epoch + 1))) # save motion_encoder
torch.save(optimizer.state_dict(), os.path.join(save_model_path, 'optimizer_epoch{}.pth'.format(epoch + 1))) # save optimizer
print("Epoch {} model saved!".format(epoch + 1))
return test_loss, test_score
# Detect devices
use_cuda = torch.cuda.is_available() # check if GPU exists
device = torch.device("cuda" if use_cuda else "cpu") # use CPU or GPU
# Data loading parameters
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 8, 'pin_memory': True} if use_cuda else {}
# load UCF101 actions names
with open(action_name_path, 'rb') as f:
action_names = pickle.load(f)
# load UCF101 video length
with open(frame_slice_file, 'rb') as f:
slice_count = pickle.load(f)
# convert labels -> category
le = LabelEncoder()
le.fit(action_names)
# show how many classes there are
list(le.classes_)
# convert category -> 1-hot
action_category = le.transform(action_names).reshape(-1, 1)
enc = OneHotEncoder()
enc.fit(action_category)
# # example
# y = ['HorseRace', 'YoYo', 'WalkingWithDog']
# y_onehot = labels2onehot(enc, le, y)
# y2 = onehot2labels(le, y_onehot)
actions = []
fnames = os.listdir(data_path)
all_names = []
all_length = [] # each video length
for f in fnames:
loc1 = f.find('v_')
loc2 = f.find('_g')
actions.append(f[(loc1 + 2): loc2])
all_names.append(os.path.join(data_path, f))
all_length.append(slice_count[f])
# list all data files
all_X_list = list(zip(all_names, all_length)) # video (names, length)
all_y_list = labels2cat(le, actions) # video labels
# all_X_list = all_X_list[:200] # use only a few samples for testing
# all_y_list = all_y_list[:200]
# train, test split
train_list, test_list, train_label, test_label = train_test_split(all_X_list, all_y_list, test_size=0.25, random_state=42)
transform = transforms.Compose([transforms.Resize([res_size, res_size]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
train_set, valid_set = Dataset_CRNN_varlen(data_path, train_list, train_label, select_frame, transform=transform), \
Dataset_CRNN_varlen(data_path, test_list, test_label, select_frame, transform=transform)
train_loader = data.DataLoader(train_set, **params)
valid_loader = data.DataLoader(valid_set, **params)
# Create model
cnn_encoder = ResCNNEncoder(fc_hidden1=CNN_fc_hidden1, fc_hidden2=CNN_fc_hidden2, drop_p=dropout_p, CNN_embed_dim=CNN_embed_dim).to(device)
rnn_decoder = DecoderRNN_varlen(CNN_embed_dim=CNN_embed_dim, h_RNN_layers=RNN_hidden_layers, h_RNN=RNN_hidden_nodes,
h_FC_dim=RNN_FC_dim, drop_p=dropout_p, num_classes=k).to(device)
# Combine all EncoderCNN + DecoderRNN parameters
print("Using", torch.cuda.device_count(), "GPU!")
if torch.cuda.device_count() > 1:
# Parallelize model to multiple GPUs
cnn_encoder = nn.DataParallel(cnn_encoder)
rnn_decoder = nn.DataParallel(rnn_decoder)
crnn_params = list(cnn_encoder.module.fc1.parameters()) + list(cnn_encoder.module.bn1.parameters()) + \
list(cnn_encoder.module.fc2.parameters()) + list(cnn_encoder.module.bn2.parameters()) + \
list(cnn_encoder.module.fc3.parameters()) + list(rnn_decoder.parameters())
elif torch.cuda.device_count() == 1:
crnn_params = list(cnn_encoder.fc1.parameters()) + list(cnn_encoder.bn1.parameters()) + \
list(cnn_encoder.fc2.parameters()) + list(cnn_encoder.bn2.parameters()) + \
list(cnn_encoder.fc3.parameters()) + list(rnn_decoder.parameters())
optimizer = torch.optim.Adam(crnn_params, lr=learning_rate)
scheduler = ReduceLROnPlateau(optimizer, 'min', patience=lr_patience, min_lr=1e-10, verbose=True)
# record training process
epoch_train_losses = []
epoch_train_scores = []
epoch_test_losses = []
epoch_test_scores = []
# start training
for epoch in range(epochs):
# train, test model
epoch_train_loss, epoch_train_score = train(log_interval, [cnn_encoder, rnn_decoder], device, train_loader, optimizer, epoch)
epoch_test_loss, epoch_test_score = validation([cnn_encoder, rnn_decoder], device, optimizer, valid_loader)
scheduler.step(epoch_test_loss)
# save results
epoch_train_losses.append(epoch_train_loss)
epoch_train_scores.append(epoch_train_score)
epoch_test_losses.append(epoch_test_loss)
epoch_test_scores.append(epoch_test_score)
# save all train test results
A = np.array(epoch_train_losses)
B = np.array(epoch_train_scores)
C = np.array(epoch_test_losses)
D = np.array(epoch_test_scores)
np.save('./CRNN_varlen_epoch_training_loss.npy', A)
np.save('./CRNN_varlen_epoch_training_score.npy', B)
np.save('./CRNN_varlen_epoch_test_loss.npy', C)
np.save('./CRNN_varlen_epoch_test_score.npy', D)
# plot
fig = plt.figure(figsize=(10, 4))
plt.subplot(121)
plt.plot(np.arange(1, epochs + 1), A) # train loss (on epoch end)
plt.plot(np.arange(1, epochs + 1), C) # test loss (on epoch end)
plt.title("model loss")
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend(['train', 'test'], loc="upper left")
# 2nd figure (accuracy)
plt.subplot(122)
plt.plot(np.arange(1, epochs + 1), B) # train accuracy (on epoch end)
plt.plot(np.arange(1, epochs + 1), D) # test accuracy (on epoch end)
plt.title("training scores")
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend(['train', 'test'], loc="upper left")
title = "./fig_UCF101_ResNetCRNN.png"
plt.savefig(title, dpi=600)
# plt.close(fig)
plt.show()
| 10,635 | 36.059233 | 140 | py |
video-classification | video-classification-master/ResNetCRNN_varylength/functions.py | import os
import numpy as np
from PIL import Image
from torch.utils import data
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torchvision.transforms as transforms
from tqdm import tqdm
## ------------------- label conversion tools ------------------ ##
def labels2cat(label_encoder, list):
return label_encoder.transform(list)
def labels2onehot(OneHotEncoder, label_encoder, list):
return OneHotEncoder.transform(label_encoder.transform(list).reshape(-1, 1)).toarray()
def onehot2labels(label_encoder, y_onehot):
return label_encoder.inverse_transform(np.where(y_onehot == 1)[1]).tolist()
def cat2labels(label_encoder, y_cat):
return label_encoder.inverse_transform(y_cat).tolist()
## ---------------------- Dataloaders ---------------------- ##
# for CRNN- varying lengths (frames)
class Dataset_CRNN_varlen(data.Dataset):
"Characterizes a dataset for PyTorch"
def __init__(self, data_path, lists, labels, set_frame, transform=None):
"Initialization"
self.data_path = data_path
self.labels = labels
self.folders, self.video_len = list(zip(*lists))
self.set_frame = set_frame
self.transform = transform
def __len__(self):
"Denotes the total number of samples"
return len(self.folders)
def __getitem__(self, index):
"Generates one sample of data"
# select sample
selected_folder = self.folders[index]
video_len = self.video_len[index]
select = np.arange(self.set_frame['begin'], self.set_frame['end'] + 1, self.set_frame['skip'])
img_size = self.transform.__dict__['transforms'][0].__dict__['size'] # get image resize from Transformation
channels = len(self.transform.__dict__['transforms'][2].__dict__['mean']) # get number of channels from Transformation
selected_frames = np.intersect1d(np.arange(1, video_len + 1), select) if self.set_frame['begin'] < video_len else []
# Load video frames
X_padded = torch.zeros((len(select), channels, img_size[0], img_size[1])) # input size: (frames, channels, image size x, image size y)
for i, f in enumerate(selected_frames):
frame = Image.open(os.path.join(self.data_path, selected_folder, 'frame{:06d}.jpg'.format(f)))
frame = self.transform(frame) if self.transform is not None else frame # impose transformation if exists
X_padded[i, :, :, :] = frame
y = torch.LongTensor([self.labels[index]]) # (labels) LongTensor are for int64 instead of FloatTensor
video_len = torch.LongTensor([video_len])
return X_padded, video_len, y
## ---------------------- end of Dataloaders ---------------------- ##
## -------------------- (reload) model prediction ---------------------- ##
def CRNN_final_prediction(model, device, loader):
cnn_encoder, rnn_decoder = model
cnn_encoder.eval()
rnn_decoder.eval()
all_y_pred = []
with torch.no_grad():
for batch_idx, (X, y) in enumerate(tqdm(loader)):
# distribute data to device
X = X.to(device)
output = rnn_decoder(cnn_encoder(X))
y_pred = output.max(1, keepdim=True)[1] # location of max log-probability as prediction
all_y_pred.extend(y_pred.cpu().data.squeeze().numpy().tolist())
return all_y_pred
## -------------------- end of model prediction ---------------------- ##
## ------------------------ CRNN module ---------------------- ##
def conv2D_output_size(img_size, padding, kernel_size, stride):
# compute output shape of conv2D
outshape = (np.floor((img_size[0] + 2 * padding[0] - (kernel_size[0] - 1) - 1) / stride[0] + 1).astype(int),
np.floor((img_size[1] + 2 * padding[1] - (kernel_size[1] - 1) - 1) / stride[1] + 1).astype(int))
return outshape
# 2D CNN encoder train from scratch (no transfer learning)
class EncoderCNN(nn.Module):
def __init__(self, img_x=90, img_y=120, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300):
super(EncoderCNN, self).__init__()
self.img_x = img_x
self.img_y = img_y
self.CNN_embed_dim = CNN_embed_dim
# CNN architechtures
self.ch1, self.ch2, self.ch3, self.ch4 = 32, 64, 128, 256
self.k1, self.k2, self.k3, self.k4 = (5, 5), (3, 3), (3, 3), (3, 3) # 2d kernal size
self.s1, self.s2, self.s3, self.s4 = (2, 2), (2, 2), (2, 2), (2, 2) # 2d strides
self.pd1, self.pd2, self.pd3, self.pd4 = (0, 0), (0, 0), (0, 0), (0, 0) # 2d padding
# conv2D output shapes
self.conv1_outshape = conv2D_output_size((self.img_x, self.img_y), self.pd1, self.k1, self.s1) # Conv1 output shape
self.conv2_outshape = conv2D_output_size(self.conv1_outshape, self.pd2, self.k2, self.s2)
self.conv3_outshape = conv2D_output_size(self.conv2_outshape, self.pd3, self.k3, self.s3)
self.conv4_outshape = conv2D_output_size(self.conv3_outshape, self.pd4, self.k4, self.s4)
# fully connected layer hidden nodes
self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
self.drop_p = drop_p
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=self.ch1, kernel_size=self.k1, stride=self.s1, padding=self.pd1),
nn.BatchNorm2d(self.ch1, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=self.ch1, out_channels=self.ch2, kernel_size=self.k2, stride=self.s2, padding=self.pd2),
nn.BatchNorm2d(self.ch2, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=self.ch2, out_channels=self.ch3, kernel_size=self.k3, stride=self.s3, padding=self.pd3),
nn.BatchNorm2d(self.ch3, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels=self.ch3, out_channels=self.ch4, kernel_size=self.k4, stride=self.s4, padding=self.pd4),
nn.BatchNorm2d(self.ch4, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.drop = nn.Dropout2d(self.drop_p)
self.pool = nn.MaxPool2d(2)
self.fc1 = nn.Linear(self.ch4 * self.conv4_outshape[0] * self.conv4_outshape[1], self.fc_hidden1) # fully connected layer, output k classes
self.fc2 = nn.Linear(self.fc_hidden1, self.fc_hidden2)
self.fc3 = nn.Linear(self.fc_hidden2, self.CNN_embed_dim) # output = CNN embedding latent variables
def forward(self, x_3d):
cnn_embed_seq = []
for t in range(x_3d.size(1)):
# CNNs
x = self.conv1(x_3d[:, t, :, :, :])
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.view(x.size(0), -1) # flatten the output of conv
# FC layers
x = F.relu(self.fc1(x))
# x = F.dropout(x, p=self.drop_p, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc3(x)
cnn_embed_seq.append(x)
# swap time and sample dim such that (sample dim, time dim, CNN latent dim)
cnn_embed_seq = torch.stack(cnn_embed_seq, dim=0).transpose_(0, 1)
# cnn_embed_seq: shape=(batch, time_step, input_size)
return cnn_embed_seq
# 2D CNN encoder using ResNet-152 pretrained
class ResCNNEncoder(nn.Module):
def __init__(self, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300):
"""Load the pretrained ResNet-152 and replace top fc layer."""
super(ResCNNEncoder, self).__init__()
self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
self.drop_p = drop_p
resnet = models.resnet152(pretrained=True)
modules = list(resnet.children())[:-1] # delete the last fc layer.
self.resnet = nn.Sequential(*modules)
self.fc1 = nn.Linear(resnet.fc.in_features, fc_hidden1)
self.bn1 = nn.BatchNorm1d(fc_hidden1, momentum=0.01)
self.fc2 = nn.Linear(fc_hidden1, fc_hidden2)
self.bn2 = nn.BatchNorm1d(fc_hidden2, momentum=0.01)
self.fc3 = nn.Linear(fc_hidden2, CNN_embed_dim)
def forward(self, x_3d):
cnn_embed_seq = []
for t in range(x_3d.size(1)):
# ResNet CNN
with torch.no_grad():
x = self.resnet(x_3d[:, t, :, :, :]) # ResNet
x = x.view(x.size(0), -1) # flatten output of conv
# FC layers
x = self.bn1(self.fc1(x))
x = F.relu(x)
x = self.bn2(self.fc2(x))
x = F.relu(x)
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc3(x)
cnn_embed_seq.append(x)
# swap time and sample dim such that (sample dim, time dim, CNN latent dim)
cnn_embed_seq = torch.stack(cnn_embed_seq, dim=0).transpose_(0, 1)
# cnn_embed_seq: shape=(batch, time_step, input_size)
return cnn_embed_seq
class DecoderRNN_varlen(nn.Module):
def __init__(self, CNN_embed_dim=300, h_RNN_layers=3, h_RNN=256, h_FC_dim=128, drop_p=0.3, num_classes=50):
super(DecoderRNN_varlen, self).__init__()
self.RNN_input_size = CNN_embed_dim
self.h_RNN_layers = h_RNN_layers # RNN hidden layers
self.h_RNN = h_RNN # RNN hidden nodes
self.h_FC_dim = h_FC_dim
self.drop_p = drop_p
self.num_classes = num_classes
self.LSTM = nn.LSTM(
input_size=self.RNN_input_size,
hidden_size=self.h_RNN,
num_layers=h_RNN_layers,
batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
)
self.fc1 = nn.Linear(self.h_RNN, self.h_FC_dim)
self.fc2 = nn.Linear(self.h_FC_dim, self.num_classes)
def forward(self, x_RNN, x_lengths):
N, T, n = x_RNN.size()
# print('x_RNN.size:', x_RNN.size(), 'x_lengths:', x_lengths)
for i in range(N):
if x_lengths[i] < T:
x_RNN[i, x_lengths[i]:, :] = torch.zeros(T - x_lengths[i], n, dtype=torch.float, device=x_RNN.device)
x_lengths[x_lengths > T] = T
lengths_ordered, perm_idx = x_lengths.sort(0, descending=True)
# use input of descending length
packed_x_RNN = torch.nn.utils.rnn.pack_padded_sequence(x_RNN[perm_idx], lengths_ordered, batch_first=True)
self.LSTM.flatten_parameters()
packed_RNN_out, (h_n_sorted, h_c_sorted) = self.LSTM(packed_x_RNN, None)
""" h_n shape (n_layers, batch, hidden_size), h_c shape (n_layers, batch, hidden_size) """
""" None represents zero initial hidden state. RNN_out has shape=(batch, time_step, output_size) """
RNN_out, _ = torch.nn.utils.rnn.pad_packed_sequence(packed_RNN_out, batch_first=True)
RNN_out = RNN_out.contiguous()
# RNN_out = RNN_out.view(-1, RNN_out.size(2))
# reverse back to original sequence order
_, unperm_idx = perm_idx.sort(0)
RNN_out = RNN_out[unperm_idx]
# FC layers
x = self.fc1(RNN_out[:, -1, :]) # choose RNN_out at the last time step
x = F.relu(x)
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc2(x)
return x
## ---------------------- end of CRNN module ---------------------- ##
| 11,857 | 40.607018 | 149 | py |
video-classification | video-classification-master/ResNetCRNN_varylength/ResNetCRNN_check_prediction.py | import os
import numpy as np
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import matplotlib.pyplot as plt
from functions import *
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.metrics import accuracy_score
import pandas as pd
import pickle
# set path
action_name_path = './UCF101actions.pkl'
save_model_path = "./ResNetCRNN_ckpt/"
# use same encoder CNN saved!
CNN_fc_hidden1, CNN_fc_hidden2 = 1024, 768
CNN_embed_dim = 512 # latent dim extracted by 2D CNN
res_size = 224 # ResNet image size
dropout_p = 0.0 # dropout probability
# use same decoder RNN saved!
RNN_hidden_layers = 3
RNN_hidden_nodes = 512
RNN_FC_dim = 256
# training parameters
k = 101 # number of target category
batch_size = 40
# Select which frame to begin & end in videos
begin_frame, end_frame, skip_frame = 1, 29, 1
with open(action_name_path, 'rb') as f:
action_names = pickle.load(f) # load UCF101 actions names
# convert labels -> category
le = LabelEncoder()
le.fit(action_names)
# show how many classes there are
list(le.classes_)
# convert category -> 1-hot
action_category = le.transform(action_names).reshape(-1, 1)
enc = OneHotEncoder()
enc.fit(action_category)
# # example
# y = ['HorseRace', 'YoYo', 'WalkingWithDog']
# y_onehot = labels2onehot(enc, le, y)
# y2 = onehot2labels(le, y_onehot)
actions = []
fnames = os.listdir(data_path)
all_names = []
for f in fnames:
loc1 = f.find('v_')
loc2 = f.find('_g')
actions.append(f[(loc1 + 2): loc2])
all_names.append(os.path.join(data_path, f))
# list all data files
all_X_list = all_names # all video file names
all_y_list = labels2cat(le, actions) # all video labels
# data loading parameters
use_cuda = torch.cuda.is_available() # check if GPU exists
device = torch.device("cuda" if use_cuda else "cpu") # use CPU or GPU
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
transform = transforms.Compose([transforms.Resize([res_size, res_size]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
selected_frames = np.arange(begin_frame, end_frame, skip_frame).tolist()
# reset data loader
all_data_params = {'batch_size': batch_size, 'shuffle': False, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
all_data_loader = data.DataLoader(Dataset_CRNN(all_X_list, all_y_list, selected_frames, transform=transform), **all_data_params)
# reload CRNN model
cnn_encoder = ResCNNEncoder(fc_hidden1=CNN_fc_hidden1, fc_hidden2=CNN_fc_hidden2, drop_p=dropout_p, CNN_embed_dim=CNN_embed_dim).to(device)
rnn_decoder = DecoderRNN(CNN_embed_dim=CNN_embed_dim, h_RNN_layers=RNN_hidden_layers, h_RNN=RNN_hidden_nodes,
h_FC_dim=RNN_FC_dim, drop_p=dropout_p, num_classes=k).to(device)
cnn_encoder.load_state_dict(torch.load(os.path.join(save_model_path, 'cnn_encoder_epoch41.pth')))
rnn_decoder.load_state_dict(torch.load(os.path.join(save_model_path, 'rnn_decoder_epoch41.pth')))
print('CRNN model reloaded!')
# make all video predictions by reloaded model
print('Predicting all {} videos:'.format(len(all_data_loader.dataset)))
all_y_pred = CRNN_final_prediction([cnn_encoder, rnn_decoder], device, all_data_loader)
# write in pandas dataframe
df = pd.DataFrame(data={'filename': fnames, 'y': cat2labels(le, all_y_list), 'y_pred': cat2labels(le, all_y_pred)})
df.to_pickle("./UCF101_videos_prediction.pkl") # save pandas dataframe
# pd.read_pickle("./all_videos_prediction.pkl")
print('video prediction finished!')
| 3,726 | 32.276786 | 139 | py |
video-classification | video-classification-master/CRNN/UCF101_CRNN.py | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torchvision.transforms as transforms
import torch.utils.data as data
import torchvision
from torch.autograd import Variable
import matplotlib.pyplot as plt
from functions import *
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.metrics import accuracy_score
import pickle
# set path
data_path = "./jpegs_256/" # define UCF-101 RGB data path
action_name_path = './UCF101actions.pkl'
save_model_path = "./CRNN_ckpt/"
# EncoderCNN architecture
CNN_fc_hidden1, CNN_fc_hidden2 = 1024, 768
CNN_embed_dim = 512 # latent dim extracted by 2D CNN
img_x, img_y = 256, 342 # resize video 2d frame size
dropout_p = 0.0 # dropout probability
# DecoderRNN architecture
RNN_hidden_layers = 3
RNN_hidden_nodes = 512
RNN_FC_dim = 256
# training parameters
k = 101 # number of target category
epochs = 120 # training epochs
batch_size = 30
learning_rate = 1e-4
log_interval = 10 # interval for displaying training info
# Select which frame to begin & end in videos
begin_frame, end_frame, skip_frame = 1, 29, 1
def train(log_interval, model, device, train_loader, optimizer, epoch):
# set model as training mode
cnn_encoder, rnn_decoder = model
cnn_encoder.train()
rnn_decoder.train()
losses = []
scores = []
N_count = 0 # counting total trained sample in one epoch
for batch_idx, (X, y) in enumerate(train_loader):
# distribute data to device
X, y = X.to(device), y.to(device).view(-1, )
N_count += X.size(0)
optimizer.zero_grad()
output = rnn_decoder(cnn_encoder(X)) # output has dim = (batch, number of classes)
loss = F.cross_entropy(output, y)
losses.append(loss.item())
# to compute accuracy
y_pred = torch.max(output, 1)[1] # y_pred != output
step_score = accuracy_score(y.cpu().data.squeeze().numpy(), y_pred.cpu().data.squeeze().numpy())
scores.append(step_score) # computed on CPU
loss.backward()
optimizer.step()
# show information
if (batch_idx + 1) % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, Accu: {:.2f}%'.format(
epoch + 1, N_count, len(train_loader.dataset), 100. * (batch_idx + 1) / len(train_loader), loss.item(), 100 * step_score))
return losses, scores
def validation(model, device, optimizer, test_loader):
# set model as testing mode
cnn_encoder, rnn_decoder = model
cnn_encoder.eval()
rnn_decoder.eval()
test_loss = 0
all_y = []
all_y_pred = []
with torch.no_grad():
for X, y in test_loader:
# distribute data to device
X, y = X.to(device), y.to(device).view(-1, )
output = rnn_decoder(cnn_encoder(X))
loss = F.cross_entropy(output, y, reduction='sum')
test_loss += loss.item() # sum up batch loss
y_pred = output.max(1, keepdim=True)[1] # (y_pred != output) get the index of the max log-probability
# collect all y and y_pred in all batches
all_y.extend(y)
all_y_pred.extend(y_pred)
test_loss /= len(test_loader.dataset)
# compute accuracy
all_y = torch.stack(all_y, dim=0)
all_y_pred = torch.stack(all_y_pred, dim=0)
test_score = accuracy_score(all_y.cpu().data.squeeze().numpy(), all_y_pred.cpu().data.squeeze().numpy())
# show information
print('\nTest set ({:d} samples): Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(len(all_y), test_loss, 100* test_score))
# save Pytorch models of best record
torch.save(cnn_encoder.state_dict(), os.path.join(save_model_path, 'cnn_encoder_epoch{}.pth'.format(epoch + 1))) # save spatial_encoder
torch.save(rnn_decoder.state_dict(), os.path.join(save_model_path, 'rnn_decoder_epoch{}.pth'.format(epoch + 1))) # save motion_encoder
torch.save(optimizer.state_dict(), os.path.join(save_model_path, 'optimizer_epoch{}.pth'.format(epoch + 1))) # save optimizer
print("Epoch {} model saved!".format(epoch + 1))
return test_loss, test_score
# Detect devices
use_cuda = torch.cuda.is_available() # check if GPU exists
device = torch.device("cuda" if use_cuda else "cpu") # use CPU or GPU
# Data loading parameters
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
# load UCF101 actions names
with open(action_name_path, 'rb') as f:
action_names = pickle.load(f)
# convert labels -> category
le = LabelEncoder()
le.fit(action_names)
# show how many classes there are
list(le.classes_)
# convert category -> 1-hot
action_category = le.transform(action_names).reshape(-1, 1)
enc = OneHotEncoder()
enc.fit(action_category)
# # example
# y = ['HorseRace', 'YoYo', 'WalkingWithDog']
# y_onehot = labels2onehot(enc, le, y)
# y2 = onehot2labels(le, y_onehot)
actions = []
fnames = os.listdir(data_path)
all_names = []
for f in fnames:
loc1 = f.find('v_')
loc2 = f.find('_g')
actions.append(f[(loc1 + 2): loc2])
all_names.append(f)
# list all data files
all_X_list = all_names # all video file names
all_y_list = labels2cat(le, actions) # all video labels
# train, test split
train_list, test_list, train_label, test_label = train_test_split(all_X_list, all_y_list, test_size=0.25, random_state=42)
transform = transforms.Compose([transforms.Resize([img_x, img_y]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
selected_frames = np.arange(begin_frame, end_frame, skip_frame).tolist()
train_set, valid_set = Dataset_CRNN(data_path, train_list, train_label, selected_frames, transform=transform), \
Dataset_CRNN(data_path, test_list, test_label, selected_frames, transform=transform)
train_loader = data.DataLoader(train_set, **params)
valid_loader = data.DataLoader(valid_set, **params)
# Create model
cnn_encoder = EncoderCNN(img_x=img_x, img_y=img_y, fc_hidden1=CNN_fc_hidden1, fc_hidden2=CNN_fc_hidden2,
drop_p=dropout_p, CNN_embed_dim=CNN_embed_dim).to(device)
rnn_decoder = DecoderRNN(CNN_embed_dim=CNN_embed_dim, h_RNN_layers=RNN_hidden_layers, h_RNN=RNN_hidden_nodes,
h_FC_dim=RNN_FC_dim, drop_p=dropout_p, num_classes=k).to(device)
# Parallelize model to multiple GPUs
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs!")
cnn_encoder = nn.DataParallel(cnn_encoder)
rnn_decoder = nn.DataParallel(rnn_decoder)
crnn_params = list(cnn_encoder.parameters()) + list(rnn_decoder.parameters())
optimizer = torch.optim.Adam(crnn_params, lr=learning_rate)
# record training process
epoch_train_losses = []
epoch_train_scores = []
epoch_test_losses = []
epoch_test_scores = []
# start training
for epoch in range(epochs):
# train, test model
train_losses, train_scores = train(log_interval, [cnn_encoder, rnn_decoder], device, train_loader, optimizer, epoch)
epoch_test_loss, epoch_test_score = validation([cnn_encoder, rnn_decoder], device, optimizer, valid_loader)
# save results
epoch_train_losses.append(train_losses)
epoch_train_scores.append(train_scores)
epoch_test_losses.append(epoch_test_loss)
epoch_test_scores.append(epoch_test_score)
# save all train test results
A = np.array(epoch_train_losses)
B = np.array(epoch_train_scores)
C = np.array(epoch_test_losses)
D = np.array(epoch_test_scores)
np.save('./CRNN_epoch_training_losses.npy', A)
np.save('./CRNN_epoch_training_scores.npy', B)
np.save('./CRNN_epoch_test_loss.npy', C)
np.save('./CRNN_epoch_test_score.npy', D)
# plot
fig = plt.figure(figsize=(10, 4))
plt.subplot(121)
plt.plot(np.arange(1, epochs + 1), A[:, -1]) # train loss (on epoch end)
plt.plot(np.arange(1, epochs + 1), C) # test loss (on epoch end)
plt.title("model loss")
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend(['train', 'test'], loc="upper left")
# 2nd figure
plt.subplot(122)
plt.plot(np.arange(1, epochs + 1), B[:, -1]) # train accuracy (on epoch end)
plt.plot(np.arange(1, epochs + 1), D) # test accuracy (on epoch end)
plt.title("training scores")
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend(['train', 'test'], loc="upper left")
title = "./fig_UCF101_CRNN.png"
plt.savefig(title, dpi=600)
# plt.close(fig)
plt.show()
| 8,722 | 33.892 | 140 | py |
video-classification | video-classification-master/CRNN/functions.py | import os
import numpy as np
from PIL import Image
from torch.utils import data
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torchvision.transforms as transforms
from tqdm import tqdm
## ------------------- label conversion tools ------------------ ##
def labels2cat(label_encoder, list):
return label_encoder.transform(list)
def labels2onehot(OneHotEncoder, label_encoder, list):
return OneHotEncoder.transform(label_encoder.transform(list).reshape(-1, 1)).toarray()
def onehot2labels(label_encoder, y_onehot):
return label_encoder.inverse_transform(np.where(y_onehot == 1)[1]).tolist()
def cat2labels(label_encoder, y_cat):
return label_encoder.inverse_transform(y_cat).tolist()
## ---------------------- Dataloaders ---------------------- ##
# for 3DCNN
class Dataset_3DCNN(data.Dataset):
"Characterizes a dataset for PyTorch"
def __init__(self, data_path, folders, labels, frames, transform=None):
"Initialization"
self.data_path = data_path
self.labels = labels
self.folders = folders
self.transform = transform
self.frames = frames
def __len__(self):
"Denotes the total number of samples"
return len(self.folders)
def read_images(self, path, selected_folder, use_transform):
X = []
for i in self.frames:
image = Image.open(os.path.join(path, selected_folder, 'frame{:06d}.jpg'.format(i))).convert('L')
if use_transform is not None:
image = use_transform(image)
X.append(image.squeeze_(0))
X = torch.stack(X, dim=0)
return X
def __getitem__(self, index):
"Generates one sample of data"
# Select sample
folder = self.folders[index]
# Load data
X = self.read_images(self.data_path, folder, self.transform).unsqueeze_(0) # (input) spatial images
y = torch.LongTensor([self.labels[index]]) # (labels) LongTensor are for int64 instead of FloatTensor
# print(X.shape)
return X, y
# for CRNN
class Dataset_CRNN(data.Dataset):
"Characterizes a dataset for PyTorch"
def __init__(self, data_path, folders, labels, frames, transform=None):
"Initialization"
self.data_path = data_path
self.labels = labels
self.folders = folders
self.transform = transform
self.frames = frames
def __len__(self):
"Denotes the total number of samples"
return len(self.folders)
def read_images(self, path, selected_folder, use_transform):
X = []
for i in self.frames:
image = Image.open(os.path.join(path, selected_folder, 'frame{:06d}.jpg'.format(i)))
if use_transform is not None:
image = use_transform(image)
X.append(image)
X = torch.stack(X, dim=0)
return X
def __getitem__(self, index):
"Generates one sample of data"
# Select sample
folder = self.folders[index]
# Load data
X = self.read_images(self.data_path, folder, self.transform) # (input) spatial images
y = torch.LongTensor([self.labels[index]]) # (labels) LongTensor are for int64 instead of FloatTensor
# print(X.shape)
return X, y
## ---------------------- end of Dataloaders ---------------------- ##
## -------------------- (reload) model prediction ---------------------- ##
def Conv3d_final_prediction(model, device, loader):
model.eval()
all_y_pred = []
with torch.no_grad():
for batch_idx, (X, y) in enumerate(tqdm(loader)):
# distribute data to device
X = X.to(device)
output = model(X)
y_pred = output.max(1, keepdim=True)[1] # location of max log-probability as prediction
all_y_pred.extend(y_pred.cpu().data.squeeze().numpy().tolist())
return all_y_pred
def CRNN_final_prediction(model, device, loader):
cnn_encoder, rnn_decoder = model
cnn_encoder.eval()
rnn_decoder.eval()
all_y_pred = []
with torch.no_grad():
for batch_idx, (X, y) in enumerate(tqdm(loader)):
# distribute data to device
X = X.to(device)
output = rnn_decoder(cnn_encoder(X))
y_pred = output.max(1, keepdim=True)[1] # location of max log-probability as prediction
all_y_pred.extend(y_pred.cpu().data.squeeze().numpy().tolist())
return all_y_pred
## -------------------- end of model prediction ---------------------- ##
## ------------------------ 3D CNN module ---------------------- ##
def conv3D_output_size(img_size, padding, kernel_size, stride):
# compute output shape of conv3D
outshape = (np.floor((img_size[0] + 2 * padding[0] - (kernel_size[0] - 1) - 1) / stride[0] + 1).astype(int),
np.floor((img_size[1] + 2 * padding[1] - (kernel_size[1] - 1) - 1) / stride[1] + 1).astype(int),
np.floor((img_size[2] + 2 * padding[2] - (kernel_size[2] - 1) - 1) / stride[2] + 1).astype(int))
return outshape
class CNN3D(nn.Module):
def __init__(self, t_dim=120, img_x=90, img_y=120, drop_p=0.2, fc_hidden1=256, fc_hidden2=128, num_classes=50):
super(CNN3D, self).__init__()
# set video dimension
self.t_dim = t_dim
self.img_x = img_x
self.img_y = img_y
# fully connected layer hidden nodes
self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
self.drop_p = drop_p
self.num_classes = num_classes
self.ch1, self.ch2 = 32, 48
self.k1, self.k2 = (5, 5, 5), (3, 3, 3) # 3d kernel size
self.s1, self.s2 = (2, 2, 2), (2, 2, 2) # 3d strides
self.pd1, self.pd2 = (0, 0, 0), (0, 0, 0) # 3d padding
# compute conv1 & conv2 output shape
self.conv1_outshape = conv3D_output_size((self.t_dim, self.img_x, self.img_y), self.pd1, self.k1, self.s1)
self.conv2_outshape = conv3D_output_size(self.conv1_outshape, self.pd2, self.k2, self.s2)
self.conv1 = nn.Conv3d(in_channels=1, out_channels=self.ch1, kernel_size=self.k1, stride=self.s1,
padding=self.pd1)
self.bn1 = nn.BatchNorm3d(self.ch1)
self.conv2 = nn.Conv3d(in_channels=self.ch1, out_channels=self.ch2, kernel_size=self.k2, stride=self.s2,
padding=self.pd2)
self.bn2 = nn.BatchNorm3d(self.ch2)
self.relu = nn.ReLU(inplace=True)
self.drop = nn.Dropout3d(self.drop_p)
self.pool = nn.MaxPool3d(2)
self.fc1 = nn.Linear(self.ch2 * self.conv2_outshape[0] * self.conv2_outshape[1] * self.conv2_outshape[2],
self.fc_hidden1) # fully connected hidden layer
self.fc2 = nn.Linear(self.fc_hidden1, self.fc_hidden2)
self.fc3 = nn.Linear(self.fc_hidden2, self.num_classes) # fully connected layer, output = multi-classes
def forward(self, x_3d):
# Conv 1
x = self.conv1(x_3d)
x = self.bn1(x)
x = self.relu(x)
x = self.drop(x)
# Conv 2
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.drop(x)
# FC 1 and 2
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc3(x)
return x
## --------------------- end of 3D CNN module ---------------- ##
## ------------------------ CRNN module ---------------------- ##
def conv2D_output_size(img_size, padding, kernel_size, stride):
# compute output shape of conv2D
outshape = (np.floor((img_size[0] + 2 * padding[0] - (kernel_size[0] - 1) - 1) / stride[0] + 1).astype(int),
np.floor((img_size[1] + 2 * padding[1] - (kernel_size[1] - 1) - 1) / stride[1] + 1).astype(int))
return outshape
# 2D CNN encoder train from scratch (no transfer learning)
class EncoderCNN(nn.Module):
def __init__(self, img_x=90, img_y=120, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300):
super(EncoderCNN, self).__init__()
self.img_x = img_x
self.img_y = img_y
self.CNN_embed_dim = CNN_embed_dim
# CNN architechtures
self.ch1, self.ch2, self.ch3, self.ch4 = 32, 64, 128, 256
self.k1, self.k2, self.k3, self.k4 = (5, 5), (3, 3), (3, 3), (3, 3) # 2d kernal size
self.s1, self.s2, self.s3, self.s4 = (2, 2), (2, 2), (2, 2), (2, 2) # 2d strides
self.pd1, self.pd2, self.pd3, self.pd4 = (0, 0), (0, 0), (0, 0), (0, 0) # 2d padding
# conv2D output shapes
self.conv1_outshape = conv2D_output_size((self.img_x, self.img_y), self.pd1, self.k1, self.s1) # Conv1 output shape
self.conv2_outshape = conv2D_output_size(self.conv1_outshape, self.pd2, self.k2, self.s2)
self.conv3_outshape = conv2D_output_size(self.conv2_outshape, self.pd3, self.k3, self.s3)
self.conv4_outshape = conv2D_output_size(self.conv3_outshape, self.pd4, self.k4, self.s4)
# fully connected layer hidden nodes
self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
self.drop_p = drop_p
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=self.ch1, kernel_size=self.k1, stride=self.s1, padding=self.pd1),
nn.BatchNorm2d(self.ch1, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=self.ch1, out_channels=self.ch2, kernel_size=self.k2, stride=self.s2, padding=self.pd2),
nn.BatchNorm2d(self.ch2, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=self.ch2, out_channels=self.ch3, kernel_size=self.k3, stride=self.s3, padding=self.pd3),
nn.BatchNorm2d(self.ch3, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels=self.ch3, out_channels=self.ch4, kernel_size=self.k4, stride=self.s4, padding=self.pd4),
nn.BatchNorm2d(self.ch4, momentum=0.01),
nn.ReLU(inplace=True),
# nn.MaxPool2d(kernel_size=2),
)
self.drop = nn.Dropout2d(self.drop_p)
self.pool = nn.MaxPool2d(2)
self.fc1 = nn.Linear(self.ch4 * self.conv4_outshape[0] * self.conv4_outshape[1], self.fc_hidden1) # fully connected layer, output k classes
self.fc2 = nn.Linear(self.fc_hidden1, self.fc_hidden2)
self.fc3 = nn.Linear(self.fc_hidden2, self.CNN_embed_dim) # output = CNN embedding latent variables
def forward(self, x_3d):
cnn_embed_seq = []
for t in range(x_3d.size(1)):
# CNNs
x = self.conv1(x_3d[:, t, :, :, :])
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.view(x.size(0), -1) # flatten the output of conv
# FC layers
x = F.relu(self.fc1(x))
# x = F.dropout(x, p=self.drop_p, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc3(x)
cnn_embed_seq.append(x)
# swap time and sample dim such that (sample dim, time dim, CNN latent dim)
cnn_embed_seq = torch.stack(cnn_embed_seq, dim=0).transpose_(0, 1)
# cnn_embed_seq: shape=(batch, time_step, input_size)
return cnn_embed_seq
# 2D CNN encoder using ResNet-152 pretrained
class ResCNNEncoder(nn.Module):
def __init__(self, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300):
"""Load the pretrained ResNet-152 and replace top fc layer."""
super(ResCNNEncoder, self).__init__()
self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
self.drop_p = drop_p
resnet = models.resnet152(pretrained=True)
modules = list(resnet.children())[:-1] # delete the last fc layer.
self.resnet = nn.Sequential(*modules)
self.fc1 = nn.Linear(resnet.fc.in_features, fc_hidden1)
self.bn1 = nn.BatchNorm1d(fc_hidden1, momentum=0.01)
self.fc2 = nn.Linear(fc_hidden1, fc_hidden2)
self.bn2 = nn.BatchNorm1d(fc_hidden2, momentum=0.01)
self.fc3 = nn.Linear(fc_hidden2, CNN_embed_dim)
def forward(self, x_3d):
cnn_embed_seq = []
for t in range(x_3d.size(1)):
# ResNet CNN
with torch.no_grad():
x = self.resnet(x_3d[:, t, :, :, :]) # ResNet
x = x.view(x.size(0), -1) # flatten output of conv
# FC layers
x = self.bn1(self.fc1(x))
x = F.relu(x)
x = self.bn2(self.fc2(x))
x = F.relu(x)
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc3(x)
cnn_embed_seq.append(x)
# swap time and sample dim such that (sample dim, time dim, CNN latent dim)
cnn_embed_seq = torch.stack(cnn_embed_seq, dim=0).transpose_(0, 1)
# cnn_embed_seq: shape=(batch, time_step, input_size)
return cnn_embed_seq
class DecoderRNN(nn.Module):
def __init__(self, CNN_embed_dim=300, h_RNN_layers=3, h_RNN=256, h_FC_dim=128, drop_p=0.3, num_classes=50):
super(DecoderRNN, self).__init__()
self.RNN_input_size = CNN_embed_dim
self.h_RNN_layers = h_RNN_layers # RNN hidden layers
self.h_RNN = h_RNN # RNN hidden nodes
self.h_FC_dim = h_FC_dim
self.drop_p = drop_p
self.num_classes = num_classes
self.LSTM = nn.LSTM(
input_size=self.RNN_input_size,
hidden_size=self.h_RNN,
num_layers=h_RNN_layers,
batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
)
self.fc1 = nn.Linear(self.h_RNN, self.h_FC_dim)
self.fc2 = nn.Linear(self.h_FC_dim, self.num_classes)
def forward(self, x_RNN):
self.LSTM.flatten_parameters()
RNN_out, (h_n, h_c) = self.LSTM(x_RNN, None)
""" h_n shape (n_layers, batch, hidden_size), h_c shape (n_layers, batch, hidden_size) """
""" None represents zero initial hidden state. RNN_out has shape=(batch, time_step, output_size) """
# FC layers
x = self.fc1(RNN_out[:, -1, :]) # choose RNN_out at the last time step
x = F.relu(x)
x = F.dropout(x, p=self.drop_p, training=self.training)
x = self.fc2(x)
return x
## ---------------------- end of CRNN module ---------------------- ##
| 15,041 | 37.768041 | 149 | py |
video-classification | video-classification-master/CRNN/CRNN_check_prediction.py | import os
import numpy as np
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import matplotlib.pyplot as plt
from functions import *
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.metrics import accuracy_score
import pandas as pd
import pickle
# set path
data_path = "./jpegs_256/" # define UCF-101 RGB data path
action_name_path = "./UCF101actions.pkl"
save_model_path = "./CRNN_ckpt/"
# use same encoder CNN saved!
CNN_fc_hidden1, CNN_fc_hidden2 = 1024, 768
CNN_embed_dim = 512 # latent dim extracted by 2D CNN
img_x, img_y = 256, 342 # resize video 2d frame size
dropout_p = 0.0 # dropout probability
# use same decoder RNN saved!
RNN_hidden_layers = 3
RNN_hidden_nodes = 512
RNN_FC_dim = 256
# training parameters
k = 101 # number of target category
batch_size = 40
# Select which frame to begin & end in videos
begin_frame, end_frame, skip_frame = 1, 29, 1
with open(action_name_path, 'rb') as f:
action_names = pickle.load(f) # load UCF101 actions names
# convert labels -> category
le = LabelEncoder()
le.fit(action_names)
# show how many classes there are
list(le.classes_)
# convert category -> 1-hot
action_category = le.transform(action_names).reshape(-1, 1)
enc = OneHotEncoder()
enc.fit(action_category)
# # example
# y = ['HorseRace', 'YoYo', 'WalkingWithDog']
# y_onehot = labels2onehot(enc, le, y)
# y2 = onehot2labels(le, y_onehot)
actions = []
fnames = os.listdir(data_path)
all_names = []
for f in fnames:
loc1 = f.find('v_')
loc2 = f.find('_g')
actions.append(f[(loc1 + 2): loc2])
all_names.append(f)
# list all data files
all_X_list = all_names # all video file names
all_y_list = labels2cat(le, actions) # all video labels
# data loading parameters
use_cuda = torch.cuda.is_available() # check if GPU exists
device = torch.device("cuda" if use_cuda else "cpu") # use CPU or GPU
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
transform = transforms.Compose([transforms.Resize([img_x, img_y]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
selected_frames = np.arange(begin_frame, end_frame, skip_frame).tolist()
# reset data loader
all_data_params = {'batch_size': batch_size, 'shuffle': False, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
all_data_loader = data.DataLoader(Dataset_CRNN(data_path, all_X_list, all_y_list, selected_frames, transform=transform), **all_data_params)
# reload CRNN model
cnn_encoder = EncoderCNN(img_x=img_x, img_y=img_y, fc_hidden1=CNN_fc_hidden1, fc_hidden2=CNN_fc_hidden2,
drop_p=dropout_p, CNN_embed_dim=CNN_embed_dim).to(device)
rnn_decoder = DecoderRNN(CNN_embed_dim=CNN_embed_dim, h_RNN_layers=RNN_hidden_layers, h_RNN=RNN_hidden_nodes,
h_FC_dim=RNN_FC_dim, drop_p=dropout_p, num_classes=k).to(device)
cnn_encoder.load_state_dict(torch.load(os.path.join(save_model_path, 'cnn_encoder_epoch41.pth')))
rnn_decoder.load_state_dict(torch.load(os.path.join(save_model_path, 'rnn_decoder_epoch41.pth')))
print('CRNN model reloaded!')
# make all video predictions by reloaded model
print('Predicting all {} videos:'.format(len(all_data_loader.dataset)))
all_y_pred = CRNN_final_prediction([cnn_encoder, rnn_decoder], device, all_data_loader)
# write in pandas dataframe
df = pd.DataFrame(data={'filename': fnames, 'y': cat2labels(le, all_y_list), 'y_pred': cat2labels(le, all_y_pred)})
df.to_pickle("./UCF101_videos_prediction.pkl") # save pandas dataframe
# pd.read_pickle("./all_videos_prediction.pkl")
print('video prediction finished!')
| 3,820 | 32.814159 | 139 | py |
MuHDi | MuHDi-master/muhdi/dataset/base_dataset.py | from pathlib import Path
import random
import numpy as np
from PIL import Image
from torch.utils import data
class BaseDataset(data.Dataset):
def __init__(self, root, list_path, set_,
max_iters, image_size, labels_size, mean):
self.root = Path(root)
self.set = set_
self.list_path = list_path.format(self.set)
self.image_size = image_size
if labels_size is None:
self.labels_size = self.image_size
else:
self.labels_size = labels_size
self.mean = mean
with open(self.list_path) as f:
self.img_ids = [i_id.strip() for i_id in f]
if max_iters is not None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
for name in self.img_ids:
img_file, label_file = self.get_metadata(name)
self.files.append((img_file, label_file, name))
def get_metadata(self, name):
raise NotImplementedError
def __len__(self):
return len(self.files)
def preprocess(self, image):
image = image[:, :, ::-1] # change to BGR
image -= self.mean
return image.transpose((2, 0, 1))
def get_image(self, file):
return _load_img(file, self.image_size, Image.BICUBIC, rgb=True)
def get_labels(self, file):
return _load_img(file, self.labels_size, Image.NEAREST, rgb=False)
def _load_img(file, size, interpolation, rgb):
img = Image.open(file)
if rgb:
img = img.convert('RGB')
img = img.resize(size, interpolation)
return np.asarray(img, np.float32)
| 1,648 | 30.113208 | 92 | py |
MuHDi | MuHDi-master/muhdi/dataset/mapillary.py | import json
import warnings
from pathlib import Path
import numpy as np
from PIL import Image
from skimage import color
from torch.utils import data
from muhdi.utils import project_root
from muhdi.utils.serialization import json_load
# from valeodata import download
DEFAULT_INFO_PATH = project_root / 'muhdi/dataset/mapillary_list/info.json'
class MapillaryDataSet(data.Dataset):
classes_ids = {'flat': 0,
'construction': 1,
'object': 2,
'nature': 3,
'sky': 4,
'human': 5,
'vehicle': 6,
'other': 255}
classes_mappings_mapillary_to_cityscapes = {'bird': 'other',
'ground animal': 'other',
'curb': 'construction',
'fence': 'construction',
'guard rail': 'construction',
'barrier': 'construction',
'wall': 'construction',
'bike lane': 'flat',
'crosswalk - plain': 'flat',
'curb cut': 'flat',
'parking': 'flat',
'pedestrian area': 'flat',
'rail track': 'flat',
'road': 'flat',
'service lane': 'flat',
'sidewalk': 'flat',
'bridge': 'construction',
'building': 'construction',
'tunnel': 'construction',
'person': 'human',
'bicyclist': 'human',
'motorcyclist': 'human',
'other rider': 'human',
'lane marking - crosswalk': 'flat',
'lane marking - general': 'flat',
'mountain': 'other',
'sand': 'other',
'sky': 'sky',
'snow': 'other',
'terrain': 'flat',
'vegetation': 'nature',
'water': 'other',
'banner': 'other',
'bench': 'other',
'bike rack': 'other',
'billboard': 'other',
'catch basin': 'other',
'cctv camera': 'other',
'fire hydrant': 'other',
'junction box': 'other',
'mailbox': 'other',
'manhole': 'other',
'phone booth': 'other',
'pothole': 'object',
'street light': 'object',
'pole': 'object',
'traffic sign frame': 'object',
'utility pole': 'object',
'traffic light': 'object',
'traffic sign (back)': 'object',
'traffic sign (front)': 'object',
'trash can': 'other',
'bicycle': 'vehicle',
'boat': 'vehicle',
'bus': 'vehicle',
'car': 'vehicle',
'caravan': 'vehicle',
'motorcycle': 'vehicle',
'on rails': 'vehicle',
'other vehicle': 'vehicle',
'trailer': 'vehicle',
'truck': 'vehicle',
'wheeled slow': 'vehicle',
'car mount': 'other',
'ego vehicle': 'other',
'unlabeled': 'other'}
def __init__(self, root, set='train', max_iters=None, crop_size=(321, 321), mean=(128, 128, 128),
class_mappings=classes_mappings_mapillary_to_cityscapes, model_classes=classes_ids,
load_instances=False, scale_label=True, labels_size=None, info_path=DEFAULT_INFO_PATH):
self.path = Path(root) / set
self.crop_size = crop_size
if labels_size is None:
self.labels_size = crop_size
else:
self.labels_size = labels_size
self.load_instances = load_instances
self.mean = mean
self.scale_label = scale_label
self.info = json_load(info_path)
sorted_paths = map(lambda x: sorted((self.path / x).iterdir()),
('images', 'labels'))
self.data_paths = list(zip(*sorted_paths))
if max_iters is not None:
self.data_paths = self.data_paths * int(np.ceil(float(max_iters) / len(self.data_paths)))
self.labels = json.loads((Path(root) / 'config.json').read_text())['labels']
self.vector_mappings = None
if class_mappings is not None:
dataset_classes = [label['readable'] for label in self.labels]
self.vector_mappings = array_from_class_mappings(dataset_classes,
class_mappings,
model_classes)
def __len__(self):
return len(self.data_paths)
def __getitem__(self, index):
# image_path, labels_path, instances_path = self.data_paths[index]
image_path, labels_path = self.data_paths[index]
image_array = Image.open(image_path) # 3D #double-check if this is RGB
image_array = resize_with_pad(self.crop_size, image_array, Image.BICUBIC)
size = image_array.shape
image_array = (image_array[:, :, ::-1] - self.mean).transpose((2, 0, 1))
labels_array = Image.open(labels_path) # 2D
if self.scale_label:
labels_array = resize_with_pad(self.labels_size, labels_array,
Image.NEAREST,
fill_value=len(self.labels) - 1)
else:
labels_array = pad_with_fixed_AS(self.labels_size[0]/self.labels_size[1], labels_array,
fill_value=len(self.labels) - 1)
if self.vector_mappings is not None:
labels_array = label_mapping_mapilliary(labels_array, self.vector_mappings)
return image_array, labels_array, np.array(size), str(image_path)
def label_mapping_mapilliary(input, mapping):
output = np.copy(input)
for ind,val in enumerate(mapping):
output[input == ind] = val
return np.array(output, dtype=np.int64)
def array_from_class_mappings(dataset_classes, class_mappings, model_classes):
"""
:param dataset_classes: list or dict. Mapping between indexes and name of classes.
If using a list, it's equivalent
to {x: i for i, x in enumerate(dataset_classes)}
:param class_mappings: Dictionary mapping names of the dataset to
names of classes of the model.
:param model_classes: list or dict. Same as dataset_classes,
but for the model classes.
:return: A numpy array representing the mapping to be done.
"""
# Assert all classes are different.
assert len(model_classes) == len(set(model_classes))
# to generate the template to fill the dictionary for class_mappings
# uncomment this code.
"""
for x in dataset_classes:
print((' ' * 20) + f'\'{name}\': \'\',')
"""
# Not case sensitive to make it easier to write.
if isinstance(dataset_classes, list):
dataset_classes = {x: i for i, x in enumerate(dataset_classes)}
dataset_classes = {k.lower(): v for k, v in dataset_classes.items()}
class_mappings = {k.lower(): v.lower() for k, v in class_mappings.items()}
if isinstance(model_classes, list):
model_classes = {x: i for i, x in enumerate(model_classes)}
model_classes = {k.lower(): v for k, v in model_classes.items()}
result = np.zeros((max(dataset_classes.values()) + 1,), dtype=np.uint8)
for dataset_class_name, i in dataset_classes.items():
result[i] = model_classes[class_mappings[dataset_class_name]]
return result
def resize_with_pad(target_size, image, resize_type, fill_value=0):
if target_size is None:
return np.array(image)
# find which size to fit to the target size
target_ratio = target_size[0] / target_size[1]
image_ratio = image.size[0] / image.size[1]
if image_ratio > target_ratio:
resize_ratio = target_size[0] / image.size[0]
new_image_shape = (target_size[0], int(image.size[1] * resize_ratio))
else:
resize_ratio = target_size[1] / image.size[1]
new_image_shape = (int(image.size[0] * resize_ratio), target_size[1])
image_resized = image.resize(new_image_shape, resize_type)
image_resized = np.array(image_resized)
if image_resized.ndim == 2:
image_resized = image_resized[:, :, None]
tmp = target_size[::-1] + [image_resized.shape[2],]
result = np.ones(tmp, image_resized.dtype) * fill_value
assert image_resized.shape[0] <= result.shape[0]
assert image_resized.shape[1] <= result.shape[1]
placeholder = result[:image_resized.shape[0], :image_resized.shape[1]]
placeholder[:] = image_resized
return result
def pad_with_fixed_AS(target_ratio, image, fill_value=0):
dimW = float(image.size[0])
dimH = float(image.size[1])
image_ratio = dimW/dimH
if target_ratio > image_ratio:
dimW = target_ratio*dimH
elif target_ratio < image_ratio:
dimH = dimW/target_ratio
else:
return np.array(image)
image = np.array(image)
result = np.ones((int(dimH), int(dimW)), image.dtype) * fill_value
placeholder = result[:image.shape[0], :image.shape[1]]
placeholder[:] = image
return result
| 11,481 | 48.27897 | 104 | py |
MuHDi | MuHDi-master/muhdi/scripts/test.py | import argparse
import os
import os.path as osp
import pprint
import warnings
from torch.utils import data
from muhdi.model.deeplabv2 import get_deeplab_v2
from muhdi.dataset.cityscapes import CityscapesDataSet
from muhdi.dataset.gta5 import GTA5DataSet
from muhdi.dataset.mapillary import MapillaryDataSet
from muhdi.dataset.idd import IDDDataSet
from muhdi.dataset.acdc import ACDCDataSet
from muhdi.domain_adaptation.config import cfg, cfg_from_file
from muhdi.domain_adaptation.eval_UDA import evaluate_domain_adaptation
#import valeodata
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore")
def get_arguments():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description="Code for evaluation")
parser.add_argument('--cfg', type=str, default=None,
help='optional config file', )
parser.add_argument("--exp-suffix", type=str, default=None,
help="optional experiment suffix")
return parser.parse_args()
def main(config_file, exp_suffix):
# LOAD ARGS
assert config_file is not None, 'Missing cfg file'
cfg_from_file(config_file)
# auto-generate exp name if not specified
if cfg.EXP_NAME == '':
cfg.EXP_NAME = f'{cfg.SOURCE}2{cfg.TARGET}_{cfg.TRAIN.MODEL}_{cfg.TRAIN.DA_METHOD}'
if exp_suffix:
cfg.EXP_NAME += f'_{exp_suffix}'
# auto-generate snapshot path if not specified
if cfg.TEST.SNAPSHOT_DIR[0] == '':
cfg.TEST.SNAPSHOT_DIR[0] = osp.join(cfg.EXP_ROOT_SNAPSHOT, cfg.EXP_NAME)
os.makedirs(cfg.TEST.SNAPSHOT_DIR[0], exist_ok=True)
print('Using config:')
pprint.pprint(cfg)
# load models
models = []
n_models = len(cfg.TEST.MODEL)
if cfg.TEST.MODE == 'best':
assert n_models == 1, 'Not yet supported'
for i in range(n_models):
if cfg.TEST.MODEL[i] == 'DeepLabv2':
model = get_deeplab_v2(num_classes=cfg.NUM_CLASSES,
multi_level=cfg.TEST.MULTI_LEVEL[i])
else:
raise NotImplementedError(f"Not yet supported {cfg.TEST.MODEL[i]}")
models.append(model)
if os.environ.get('ADVENT_DRY_RUN', '0') == '1':
return
# dataloaders
test_loader_list = []
for i in range(len(cfg.TARGETS)):
target = cfg.TARGETS[i]
if target == 'Mapillary':
#valeodata.download('mapillary')
test_dataset = MapillaryDataSet(root=cfg.DATA_DIRECTORY_TARGET[i],
set=cfg.TEST.SET_TARGET[i],
crop_size=cfg.TEST.INPUT_SIZE_TARGET,
mean=cfg.TEST.IMG_MEAN,
labels_size=cfg.TEST.OUTPUT_SIZE_TARGET,
scale_label=True)
elif target == 'IDD':
test_dataset = IDDDataSet(root=cfg.DATA_DIRECTORY_TARGET[i],
list_path=cfg.DATA_LIST_TARGET[i],
set=cfg.TEST.SET_TARGET[i],
info_path=cfg.TEST.INFO_TARGET[i],
crop_size=cfg.TEST.INPUT_SIZE_TARGET,
mean=cfg.TEST.IMG_MEAN,
labels_size=cfg.TEST.OUTPUT_SIZE_TARGET,
num_classes=cfg.NUM_CLASSES)
elif target == 'GTA5':
test_dataset = GTA5DataSet(root=cfg.DATA_DIRECTORY_TARGET[i],
list_path=cfg.DATA_LIST_TARGET[i],
set=cfg.TEST.SET_TARGET[i],
crop_size=cfg.TEST.INPUT_SIZE_TARGET,
mean=cfg.TEST.IMG_MEAN,
num_classes=cfg.NUM_CLASSES)
elif target == 'ACDC':
test_dataset = ACDCDataSet(root=cfg.DATA_DIRECTORY_TARGET[i],
list_path=cfg.DATA_LIST_TARGET[i],
set=cfg.TEST.SET_TARGET[i],
info_path=cfg.TEST.INFO_TARGET[i],
crop_size=cfg.TEST.INPUT_SIZE_TARGET,
mean=cfg.TEST.IMG_MEAN,
labels_size=cfg.TEST.OUTPUT_SIZE_TARGET,
num_classes=cfg.NUM_CLASSES)
else:
#valeodata.download('cityscapes/leftImg8bit/')
#valeodata.download('cityscapes/gtFine/')
test_dataset = CityscapesDataSet(root=cfg.DATA_DIRECTORY_TARGET[i],
list_path=cfg.DATA_LIST_TARGET[i],
set=cfg.TEST.SET_TARGET[i],
info_path=cfg.TEST.INFO_TARGET[i],
crop_size=cfg.TEST.INPUT_SIZE_TARGET,
mean=cfg.TEST.IMG_MEAN,
labels_size=cfg.TEST.OUTPUT_SIZE_TARGET,
num_classes=cfg.NUM_CLASSES)
test_loader = data.DataLoader(test_dataset,
batch_size=cfg.TEST.BATCH_SIZE_TARGET,
num_workers=cfg.NUM_WORKERS,
shuffle=False,
pin_memory=True)
test_loader_list.append(test_loader)
# eval
evaluate_domain_adaptation(models, test_loader_list, cfg)
if __name__ == '__main__':
args = get_arguments()
print('Called with args:')
print(args)
main(args.cfg, args.exp_suffix)
| 5,988 | 44.371212 | 91 | py |
MuHDi | MuHDi-master/muhdi/scripts/train.py | import argparse
import os
import os.path as osp
import pprint
import random
import warnings
import numpy as np
import yaml
import torch
from torch.utils import data
from muhdi.model.deeplabv2 import get_deeplab_v2, get_deeplab_v2_attention, get_deeplab_v2_muhdi
from muhdi.dataset.gta5 import GTA5DataSet
from muhdi.dataset.synthia import SYNDataSet
from muhdi.dataset.cityscapes import CityscapesDataSet
from muhdi.dataset.idd import IDDDataSet
from muhdi.dataset.acdc import ACDCDataSet
from muhdi.dataset.mapillary import MapillaryDataSet
from muhdi.domain_adaptation.config import cfg, cfg_from_file
from muhdi.domain_adaptation.train_UDA import train_advent, train_advent_muhdi
#import valeodata
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore")
def get_arguments():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description="Code for domain adaptation (DA) training")
parser.add_argument('--cfg', type=str, default=None,
help='optional config file', )
parser.add_argument("--random-train", action="store_true",
help="not fixing random seed.")
parser.add_argument("--tensorboard", action="store_true",
help="visualize training loss with tensorboardX.")
parser.add_argument("--viz_every_iter", type=int, default=None,
help="visualize results.")
parser.add_argument("--exp-suffix", type=str, default=None,
help="optional experiment suffix")
return parser.parse_args()
def main():
# LOAD ARGS
args = get_arguments()
print('Called with args:')
print(args)
assert args.cfg is not None, 'Missing cfg file'
cfg_from_file(args.cfg)
# auto-generate exp name if not specified
if cfg.EXP_NAME == '':
cfg.EXP_NAME = f'{cfg.SOURCE}2{cfg.TARGET}_{cfg.TRAIN.MODEL}_{cfg.TRAIN.DA_METHOD}'
if args.exp_suffix:
cfg.EXP_NAME += f'_{args.exp_suffix}'
# auto-generate snapshot path if not specified
if cfg.TRAIN.SNAPSHOT_DIR == '':
cfg.TRAIN.SNAPSHOT_DIR = osp.join(cfg.EXP_ROOT_SNAPSHOT, cfg.EXP_NAME)
os.makedirs(cfg.TRAIN.SNAPSHOT_DIR, exist_ok=True)
# tensorboard
if args.tensorboard:
if cfg.TRAIN.TENSORBOARD_LOGDIR == '':
cfg.TRAIN.TENSORBOARD_LOGDIR = osp.join(cfg.EXP_ROOT_LOGS, 'tensorboard', cfg.EXP_NAME)
os.makedirs(cfg.TRAIN.TENSORBOARD_LOGDIR, exist_ok=True)
if args.viz_every_iter is not None:
cfg.TRAIN.TENSORBOARD_VIZRATE = args.viz_every_iter
else:
cfg.TRAIN.TENSORBOARD_LOGDIR = ''
print('Using config:')
pprint.pprint(cfg)
# INIT
_init_fn = None
if not args.random_train:
torch.manual_seed(cfg.TRAIN.RANDOM_SEED)
torch.cuda.manual_seed(cfg.TRAIN.RANDOM_SEED)
np.random.seed(cfg.TRAIN.RANDOM_SEED)
random.seed(cfg.TRAIN.RANDOM_SEED)
def _init_fn(worker_id):
np.random.seed(cfg.TRAIN.RANDOM_SEED + worker_id)
if os.environ.get('ADVENT_DRY_RUN', '0') == '1':
return
# LOAD SEGMENTATION NET
assert osp.exists(cfg.TRAIN.RESTORE_FROM), f'Missing init model {cfg.TRAIN.RESTORE_FROM}'
if cfg.TRAIN.MODEL == 'DeepLabv2':
model = get_deeplab_v2(num_classes=cfg.NUM_CLASSES, multi_level=cfg.TRAIN.MULTI_LEVEL)
# old_model = get_deeplab_v2(num_classes=cfg.NUM_CLASSES, multi_level=cfg.TRAIN.MULTI_LEVEL)
saved_state_dict = torch.load(cfg.TRAIN.RESTORE_FROM)
if 'DeepLab_resnet_pretrained_imagenet' in cfg.TRAIN.RESTORE_FROM:
new_params = model.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[1] == 'layer5':
new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
model.load_state_dict(new_params)
# new_params = old_model.state_dict().copy()
# for i in saved_state_dict:
# i_parts = i.split('.')
# if not i_parts[1] == 'layer5':
# new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
# old_model.load_state_dict(new_params)
else:
model.load_state_dict(saved_state_dict)
# old_model.load_state_dict(saved_state_dict)
elif cfg.TRAIN.MODEL == 'DeepLabv2Attention':
model = get_deeplab_v2_attention(num_classes=cfg.NUM_CLASSES, multi_level=cfg.TRAIN.MULTI_LEVEL)
old_model = get_deeplab_v2_attention(num_classes=cfg.NUM_CLASSES, multi_level=cfg.TRAIN.MULTI_LEVEL)
saved_state_dict = torch.load(cfg.TRAIN.RESTORE_FROM)
if 'DeepLab_resnet_pretrained_imagenet' in cfg.TRAIN.RESTORE_FROM:
new_params = model.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[1] == 'layer5':
new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
model.load_state_dict(new_params)
new_params = old_model.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[1] == 'layer5':
new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
old_model.load_state_dict(new_params)
else:
model.load_state_dict(saved_state_dict)
old_model.load_state_dict(torch.load(cfg.TRAIN.RESTORE_FROM))
elif cfg.TRAIN.MODEL == 'DeepLabv2MuHDi':
model = get_deeplab_v2_muhdi(num_classes=cfg.NUM_CLASSES, multi_level=cfg.TRAIN.MULTI_LEVEL)
old_model = get_deeplab_v2_attention(num_classes=cfg.NUM_CLASSES, multi_level=cfg.TRAIN.MULTI_LEVEL)
saved_state_dict = torch.load(cfg.TRAIN.RESTORE_FROM)
if 'DeepLab_resnet_pretrained_imagenet' in cfg.TRAIN.RESTORE_FROM:
new_params = model.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[1] == 'layer5' and not i_parts[1] == 'layer7':
new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
if i_parts[1] == 'layer8':
i_parts[1] = 'layer6'
new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
model.load_state_dict(new_params)
new_params = old_model.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[1] == 'layer5' and not i_parts[1] == 'layer7' and not i_parts[1] == 'layer8':
new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
old_model.load_state_dict(new_params)
else:
new_params = model.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
new_params[i] = saved_state_dict[i]
if i_parts[0] == 'layer7':
i_parts[0] = 'layer5'
new_params['.'.join(i_parts[:])] = saved_state_dict[i]
elif i_parts[0] == 'layer8':
i_parts[0] = 'layer6'
new_params['.'.join(i_parts[:])] = saved_state_dict[i]
model.load_state_dict(new_params)
new_params = old_model.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[0] == 'layer7' and not i_parts[0] == 'layer8':
new_params[i] = saved_state_dict[i]
old_model.load_state_dict(new_params)
else:
raise NotImplementedError(f"Not yet supported {cfg.TRAIN.MODEL}")
print('Model loaded')
# DATALOADERS
if cfg.SOURCE == 'SYNTHIA':
source_dataset = SYNDataSet(root=cfg.DATA_DIRECTORY_SOURCE,
list_path=cfg.DATA_LIST_SOURCE,
set=cfg.TRAIN.SET_SOURCE,
max_iters=cfg.TRAIN.MAX_ITERS * cfg.TRAIN.BATCH_SIZE_SOURCE,
crop_size=cfg.TRAIN.INPUT_SIZE_SOURCE,
mean=cfg.TRAIN.IMG_MEAN,
num_classes=cfg.NUM_CLASSES)
elif cfg.SOURCE == 'Cityscapes':
source_dataset = CityscapesDataSet(root=cfg.DATA_DIRECTORY_SOURCE,
list_path=cfg.DATA_LIST_SOURCE,
set=cfg.TRAIN.SET_SOURCE,
max_iters=cfg.TRAIN.MAX_ITERS * cfg.TRAIN.BATCH_SIZE_SOURCE,
crop_size=cfg.TRAIN.INPUT_SIZE_SOURCE,
mean=cfg.TRAIN.IMG_MEAN,
num_classes=cfg.NUM_CLASSES)
else:
source_dataset = GTA5DataSet(root=cfg.DATA_DIRECTORY_SOURCE,
list_path=cfg.DATA_LIST_SOURCE,
set=cfg.TRAIN.SET_SOURCE,
max_iters=cfg.TRAIN.MAX_ITERS * cfg.TRAIN.BATCH_SIZE_SOURCE,
crop_size=cfg.TRAIN.INPUT_SIZE_SOURCE,
mean=cfg.TRAIN.IMG_MEAN,
num_classes=cfg.NUM_CLASSES
)
source_loader = data.DataLoader(source_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE_SOURCE,
num_workers=cfg.NUM_WORKERS,
shuffle=True,
pin_memory=True,
worker_init_fn=_init_fn)
assert len(cfg.TARGETS) == 1, 'expected to train on a single target domain'
target = cfg.TARGETS[0]
if target == 'Mapillary':
target_dataset = MapillaryDataSet(root=cfg.DATA_DIRECTORY_TARGET[0],
set=cfg.TRAIN.SET_TARGET[0],
max_iters=cfg.TRAIN.MAX_ITERS * cfg.TRAIN.BATCH_SIZE_TARGET,
crop_size=cfg.TRAIN.INPUT_SIZE_TARGET,
mean=cfg.TRAIN.IMG_MEAN)
elif target == 'IDD':
target_dataset = IDDDataSet(root=cfg.DATA_DIRECTORY_TARGET[0],
list_path=cfg.DATA_LIST_TARGET[0],
set=cfg.TRAIN.SET_TARGET[0],
info_path=cfg.TRAIN.INFO_TARGET[0],
max_iters=cfg.TRAIN.MAX_ITERS * cfg.TRAIN.BATCH_SIZE_TARGET,
crop_size=cfg.TRAIN.INPUT_SIZE_TARGET,
mean=cfg.TRAIN.IMG_MEAN,
num_classes=cfg.NUM_CLASSES)
elif target == 'ACDC':
target_dataset = ACDCDataSet(root=cfg.DATA_DIRECTORY_TARGET[0],
list_path=cfg.DATA_LIST_TARGET[0],
set=cfg.TRAIN.SET_TARGET[0],
info_path=cfg.TRAIN.INFO_TARGET[0],
max_iters=cfg.TRAIN.MAX_ITERS * cfg.TRAIN.BATCH_SIZE_TARGET,
crop_size=cfg.TRAIN.INPUT_SIZE_TARGET,
mean=cfg.TRAIN.IMG_MEAN,
num_classes=cfg.NUM_CLASSES)
else:
target_dataset = CityscapesDataSet(root=cfg.DATA_DIRECTORY_TARGET[0],
list_path=cfg.DATA_LIST_TARGET[0],
set=cfg.TRAIN.SET_TARGET[0],
info_path=cfg.TRAIN.INFO_TARGET[0],
max_iters=cfg.TRAIN.MAX_ITERS * cfg.TRAIN.BATCH_SIZE_TARGET,
crop_size=cfg.TRAIN.INPUT_SIZE_TARGET,
mean=cfg.TRAIN.IMG_MEAN,
num_classes=cfg.NUM_CLASSES)
target_loader = data.DataLoader(target_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE_TARGET,
num_workers=cfg.NUM_WORKERS,
shuffle=True,
pin_memory=True,
worker_init_fn=_init_fn)
with open(osp.join(cfg.TRAIN.SNAPSHOT_DIR, 'train_cfg.yml'), 'w') as yaml_file:
yaml.dump(cfg, yaml_file, default_flow_style=False)
# UDA TRAINING
if cfg.TRAIN.MODEL == "DeepLabv2MuHDi":
train_advent_muhdi(model, old_model, source_loader, target_loader, cfg)
else:
train_advent(model, source_loader, target_loader, cfg)
if __name__ == '__main__':
main()
| 12,633 | 46.318352 | 108 | py |
MuHDi | MuHDi-master/muhdi/domain_adaptation/eval_UDA.py | import os.path as osp
import time
import re
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
from muhdi.utils.func import per_class_iu, fast_hist
from muhdi.utils.serialization import pickle_dump, pickle_load
def evaluate_domain_adaptation(models, test_loader_list, cfg,
verbose=True):
device = cfg.GPU_ID
interp = nn.Upsample(size=(cfg.TEST.OUTPUT_SIZE_TARGET[1], cfg.TEST.OUTPUT_SIZE_TARGET[0]),
mode='bilinear', align_corners=True)
# eval
if cfg.TEST.MODE == 'single':
eval_single(cfg, models,
device, test_loader_list, interp,
verbose)
elif cfg.TEST.MODE == 'best':
eval_best(cfg, models,
device, test_loader_list, interp,
verbose)
else:
raise NotImplementedError(f"Not yet supported test mode {cfg.TEST.MODE}")
def eval_single(cfg, models,
device, test_loader_list, interp,
verbose):
## TODO: update for multi target
assert len(cfg.TEST.RESTORE_FROM) == len(models), 'Number of models are not matched'
for checkpoint, model in zip(cfg.TEST.RESTORE_FROM, models):
load_checkpoint_for_evaluation(model, checkpoint, device)
# eval
print("Evaluating model ", cfg.TEST.RESTORE_FROM[0])
num_targets = len(cfg.TARGETS)
computed_miou_list = []
for i_target in range(num_targets):
if cfg.TARGETS[i_target] == "ACDC":
set = re.split('_',cfg.TEST.SET_TARGET[i_target])
cfg.TARGETS[i_target] = "ACDC_" + set[0]
test_loader = test_loader_list[i_target]
hist = np.zeros((cfg.NUM_CLASSES, cfg.NUM_CLASSES))
test_iter = iter(test_loader)
for index in tqdm(range(len(test_loader))):
image, label, _, name = next(test_iter)
with torch.no_grad():
output = None
for model, model_weight in zip(models, cfg.TEST.MODEL_WEIGHT):
if cfg.TEST.MODEL[0] == 'DeepLabv2':
_, pred_main = model(image.cuda(device))
else:
raise NotImplementedError(f"Not yet supported {cfg.TEST.MODEL[0]}")
interp = nn.Upsample(size=(cfg.TEST.OUTPUT_SIZE_TARGET[1], cfg.TEST.OUTPUT_SIZE_TARGET[0]),
mode='bilinear', align_corners=True)
output_ = interp(pred_main).cpu().data[0].numpy()
if output is None:
output = model_weight * output_
else:
output += model_weight * output_
assert output is not None, 'Output is None'
output = output.transpose(1, 2, 0)
output = np.argmax(output, axis=2)
label = label.numpy()[0]
hist += fast_hist(label.flatten(), output.flatten(), cfg.NUM_CLASSES)
inters_over_union_classes = per_class_iu(hist)
computed_miou = round(np.nanmean(inters_over_union_classes) * 100, 2)
computed_miou_list.append(computed_miou)
print('\tTarget:', cfg.TARGETS[i_target])
print(f'mIoU = \t{computed_miou}')
if verbose:
name_classes = np.array(test_loader.dataset.info['label'], dtype=np.str)
display_stats(cfg, name_classes, inters_over_union_classes)
print('\tMulti-target:', cfg.TARGETS)
print('\mIoU:', round(np.nanmean(computed_miou_list), 2))
def eval_best(cfg, models,
device, test_loader_list, interp,
verbose):
assert len(models) == 1, 'Not yet supported multi models in this mode'
assert osp.exists(cfg.TEST.SNAPSHOT_DIR[0]), 'SNAPSHOT_DIR is not found'
start_iter = cfg.TEST.SNAPSHOT_STEP
step = cfg.TEST.SNAPSHOT_STEP
max_iter = cfg.TEST.SNAPSHOT_MAXITER
all_res_list = []
cache_path_list = []
num_targets = len(cfg.TARGETS)
for target in cfg.TARGETS:
cache_path = osp.join(osp.join(cfg.TEST.SNAPSHOT_DIR[0], target), cfg.TEST.ALL_RES)
cache_path_list.append(cache_path)
if osp.exists(cache_path):
all_res_list.append(pickle_load(cache_path))
else:
all_res_list.append({})
cur_best_miou = -1
cur_best_model = ''
cur_best_miou_list = []
cur_best_model_list = []
for i in range(num_targets):
cur_best_miou_list.append(-1)
cur_best_model_list.append('')
for i_iter in range(start_iter, max_iter+1, step): #
print(f'Loading model_{i_iter}.pth')
restore_from = osp.join(cfg.TEST.SNAPSHOT_DIR[0], f'model_{i_iter}.pth')
if not osp.exists(restore_from):
# continue
if cfg.TEST.WAIT_MODEL:
print('Waiting for model..!')
while not osp.exists(restore_from):
time.sleep(5)
print("Evaluating model", restore_from)
load_checkpoint_for_evaluation(models[0], restore_from, device)
computed_miou_list = []
for i_target in range(num_targets):
if cfg.TARGETS[i_target] == "ACDC":
set = re.split('_',cfg.TEST.SET_TARGET[i_target])
cfg.TARGETS[i_target] = "ACDC_" + set[0]
print("On target", cfg.TARGETS[i_target])
all_res = all_res_list[i_target]
cache_path = cache_path_list[i_target]
test_loader = test_loader_list[i_target]
if i_iter not in all_res.keys():
# eval
hist = np.zeros((cfg.NUM_CLASSES, cfg.NUM_CLASSES))
test_iter = iter(test_loader)
for index in tqdm(range(len(test_loader))):
image, label, _, name = next(test_iter)
with torch.no_grad():
output = None
if cfg.TEST.MODEL[0] == 'DeepLabv2':
_, pred_main = models[0](image.cuda(device))
else:
raise NotImplementedError(f"Not yet supported {cfg.TEST.MODEL[0]}")
interp = nn.Upsample(size=(cfg.TEST.OUTPUT_SIZE_TARGET[1], cfg.TEST.OUTPUT_SIZE_TARGET[0]),
mode='bilinear', align_corners=True)
output = interp(pred_main).cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
output = np.argmax(output, axis=2)
label = label.numpy()[0]
hist += fast_hist(label.flatten(), output.flatten(), cfg.NUM_CLASSES)
if verbose and index > 0 and index % 500 == 0:
print('{:d} / {:d}: {:0.2f}'.format(
index, len(test_loader), 100 * np.nanmean(per_class_iu(hist))))
inters_over_union_classes = per_class_iu(hist)
all_res[i_iter] = inters_over_union_classes
pickle_dump(all_res, cache_path)
else:
inters_over_union_classes = all_res[i_iter]
computed_miou = round(np.nanmean(inters_over_union_classes) * 100, 2)
computed_miou_list.append(computed_miou)
if cur_best_miou_list[i_target] < computed_miou:
cur_best_miou_list[i_target] = computed_miou
cur_best_model_list[i_target] = restore_from
print('\tTarget:', cfg.TARGETS[i_target])
print('\tCurrent mIoU:', computed_miou)
print('\tCurrent best model:', cur_best_model_list[i_target])
print('\tCurrent best mIoU:', cur_best_miou_list[i_target])
if verbose:
name_classes = np.array(test_loader.dataset.info['label'], dtype=np.str)
display_stats(cfg, name_classes, inters_over_union_classes)
computed_miou = round(np.nanmean(computed_miou_list), 2)
if cur_best_miou < computed_miou:
cur_best_miou = computed_miou
cur_best_model = restore_from
print('\tMulti-target:', cfg.TARGETS)
print('\tCurrent mIoU:', computed_miou)
print('\tCurrent best model:', cur_best_model)
print('\tCurrent best mIoU:', cur_best_miou)
def load_checkpoint_for_evaluation(model, checkpoint, device):
saved_state_dict = torch.load(checkpoint)
model.load_state_dict(saved_state_dict, strict=False)
model.eval()
model.cuda(device)
def display_stats(cfg, name_classes, inters_over_union_classes):
for ind_class in range(cfg.NUM_CLASSES):
print(name_classes[ind_class]
+ '\t' + str(round(inters_over_union_classes[ind_class] * 100, 2)))
| 8,729 | 44.947368 | 115 | py |
MuHDi | MuHDi-master/muhdi/domain_adaptation/train_UDA.py | import os
import math
import sys
from pathlib import Path
import os.path as osp
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.optim as optim
from torch import nn
from tqdm import tqdm
from muhdi.model.discriminator import get_fc_discriminator, restore_discriminator
from muhdi.utils.func import adjust_learning_rate, adjust_learning_rate_discriminator
from muhdi.utils.func import loss_calc, bce_loss
from muhdi.utils.loss import kl_divergence, mse_loss
from muhdi.utils.func import prob_2_entropy
def train_advent(model, source_loader, target_loader, cfg):
''' UDA training with advent
'''
# Create the model and start the training.
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
# SEGMNETATION NETWORK
model.train()
model.to(device)
cudnn.benchmark = True
cudnn.enabled = True
# DISCRIMINATOR NETWORK
# seg maps, i.e. output, level
d_main = get_fc_discriminator(num_classes=num_classes)
if cfg.TRAIN.RESTORE_D_MAIN is not '':
d_main = restore_discriminator(d_main, cfg.TRAIN.RESTORE_D_MAIN)
d_main.train()
d_main.to(device)
d_aux = get_fc_discriminator(num_classes=num_classes)
if cfg.TRAIN.RESTORE_D_AUX is not '':
d_aux = restore_discriminator(d_aux, cfg.TRAIN.RESTORE_D_AUX)
d_aux.train()
d_aux.to(device)
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# discriminators' optimizers
optimizer_d_main = optim.Adam(d_main.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
optimizer_d_aux = optim.Adam(d_aux.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear',
align_corners=True)
# labels for adversarial training
source_label = 0
target_label = 1
source_loader_iter = enumerate(source_loader)
target_loader_iter = enumerate(target_loader)
for i_iter in tqdm(range(cfg.TRAIN.EARLY_STOP+1)):
# reset optimizers
optimizer.zero_grad()
optimizer_d_aux.zero_grad()
optimizer_d_main.zero_grad()
# adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_aux, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_main, i_iter, cfg)
# UDA Training
# only train segnet. Don't accumulate grads in disciminators
for param in d_aux.parameters():
param.requires_grad = False
for param in d_main.parameters():
param.requires_grad = False
# train on source
_, batch = source_loader_iter.__next__()
images_source, labels, _, _ = batch
pred_src_aux, pred_src_main = model(images_source.cuda(device))
if cfg.TRAIN.MULTI_LEVEL:
pred_src_aux = interp(pred_src_aux)
loss_seg_src_aux = loss_calc(pred_src_aux, labels, device)
else:
loss_seg_src_aux = 0
pred_src_main = interp(pred_src_main)
loss_seg_src_main = loss_calc(pred_src_main, labels, device)
loss = (cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main
+ cfg.TRAIN.LAMBDA_SEG_AUX * loss_seg_src_aux)
loss.backward()
# adversarial training to fool the discriminators
# train on target if pseudo-labels
_, batch = target_loader_iter.__next__()
images, _, _, _ = batch
pred_trg_aux, pred_trg_main = model(images.float().cuda(device))
if cfg.TRAIN.MULTI_LEVEL:
pred_trg_aux = interp_target(pred_trg_aux)
d_out_aux = d_aux(prob_2_entropy(F.softmax(pred_trg_aux)))
loss_adv_trg_aux = bce_loss(d_out_aux, source_label)
else:
loss_adv_trg_aux = 0
pred_trg_main = interp_target(pred_trg_main)
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
loss_adv_trg_main = bce_loss(d_out_main, source_label)
loss = (cfg.TRAIN.LAMBDA_ADV_MAIN * loss_adv_trg_main
+ cfg.TRAIN.LAMBDA_ADV_AUX * loss_adv_trg_aux)
loss.backward()
# Train discriminator networks
# enable training mode on discriminator networks
for param in d_aux.parameters():
param.requires_grad = True
for param in d_main.parameters():
param.requires_grad = True
# train with source
pred_src_main = pred_src_main.detach()
d_out_main = d_main(prob_2_entropy(F.softmax(pred_src_main)))
loss_d_main = bce_loss(d_out_main, source_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
# train with target
if cfg.TRAIN.MULTI_LEVEL:
pred_trg_aux = pred_trg_aux.detach()
d_out_aux = d_aux(prob_2_entropy(F.softmax(pred_trg_aux)))
loss_d_aux = bce_loss(d_out_aux, target_label)
loss_d_aux = loss_d_aux / 2
loss_d_aux.backward()
else:
loss_d_aux = 0
pred_trg_main = pred_trg_main.detach()
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main)))
loss_d_main = bce_loss(d_out_main, target_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
optimizer.step()
if cfg.TRAIN.MULTI_LEVEL:
optimizer_d_aux.step()
optimizer_d_main.step()
current_losses = {'loss_seg_src_aux': loss_seg_src_aux,
'loss_seg_src_main': loss_seg_src_main,
'loss_adv_trg_aux': loss_adv_trg_aux,
'loss_adv_trg_main': loss_adv_trg_main,
'loss_d_aux': loss_d_aux,
'loss_d_main': loss_d_main}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
torch.save(d_aux.state_dict(), snapshot_dir / f'model_{i_iter}_D_aux.pth')
torch.save(d_main.state_dict(), snapshot_dir / f'model_{i_iter}_D_main.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP:
break
sys.stdout.flush()
def train_advent_muhdi(model, old_model, source_loader, target_loader, cfg):
''' UDA training with advent and attention
'''
# Create the model and start the training.
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
if cfg.TRAIN.TEACHER_LOSS == "MSE":
teacher_loss = mse_loss
elif cfg.TRAIN.TEACHER_LOSS == "KL":
teacher_loss = kl_divergence
else:
raise NotImplementedError(f"Not yet supported loss {cfg.TRAIN.TEACHER_LOSS}")
# SEGMNETATION NETWORK
model.train()
model.to(device)
old_model.eval()
old_model.to(device)
cudnn.benchmark = True
cudnn.enabled = True
# DISCRIMINATOR NETWORK
# seg maps, i.e. output, level
d_main = get_fc_discriminator(num_classes=num_classes)
if cfg.TRAIN.RESTORE_D_MAIN is not '':
d_main = restore_discriminator(d_main, cfg.TRAIN.RESTORE_D_MAIN)
d_main.train()
d_main.to(device)
d_aux = get_fc_discriminator(num_classes=num_classes)
if cfg.TRAIN.RESTORE_D_AUX is not '':
d_aux = restore_discriminator(d_aux, cfg.TRAIN.RESTORE_D_AUX)
d_aux.train()
d_aux.to(device)
# OPTIMIZERS
# segnet's optimizer
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# discriminators' optimizers
optimizer_d_main = optim.Adam(d_main.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
optimizer_d_aux = optim.Adam(d_aux.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
# interpolate output segmaps
interp = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear',
align_corners=True)
# labels for adversarial training
source_label = 0
target_label = 1
source_loader_iter = enumerate(source_loader)
target_loader_iter = enumerate(target_loader)
for i_iter in tqdm(range(cfg.TRAIN.EARLY_STOP+1)):
# reset optimizers
optimizer.zero_grad()
optimizer_d_aux.zero_grad()
optimizer_d_main.zero_grad()
# adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_aux, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_main, i_iter, cfg)
# UDA Training
# only train segnet. Don't accumulate grads in disciminators
for param in d_aux.parameters():
param.requires_grad = False
for param in d_main.parameters():
param.requires_grad = False
# train on source
_, batch = source_loader_iter.__next__()
images_source, labels, _, _ = batch
images_source_cuda = images_source.cuda(device)
pred_src_aux_agn, pred_src_main_agn, pred_src_aux_spe, pred_src_main_spe, attentions_new = model(images_source_cuda)
pred_src_aux_old, pred_src_main_old, attentions_old = old_model(images_source_cuda)
# source distillation
pred_prob_src_aux_agn = F.softmax(pred_src_aux_agn)
pred_prob_src_main_agn = F.softmax(pred_src_main_agn)
feat_distill_loss = 0
if cfg.TRAIN.DISTILL.FEAT:
feat_distill_loss += features_distillation(attentions_old, attentions_new)
kt_distill_loss = 0
if cfg.TRAIN.DISTILL.KT_LOGITS:
kt_distill_loss += kl_divergence(pred_src_main_agn, pred_src_main_old)
if cfg.TRAIN.MULTI_LEVEL:
kt_distill_loss += cfg.TRAIN.LAMBDA_SEG_AUX/cfg.TRAIN.LAMBDA_SEG_MAIN * kl_divergence(pred_src_aux_agn, pred_src_aux_old)
if cfg.TRAIN.MULTI_LEVEL:
pred_src_aux_spe = interp(pred_src_aux_spe)
loss_seg_src_aux_spe = loss_calc(pred_src_aux_spe, labels, device)
else:
loss_seg_src_aux_spe = 0
pred_src_main_spe = interp(pred_src_main_spe)
loss_seg_src_main_spe = loss_calc(pred_src_main_spe, labels, device)
loss = (cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main_spe
+ cfg.TRAIN.LAMBDA_SEG_AUX * loss_seg_src_aux_spe
+ cfg.TRAIN.DISTILL.FEAT_LAMBDA * feat_distill_loss
+ cfg.TRAIN.DISTILL.KT_LAMBDA * kt_distill_loss)
loss.backward()
# adversarial training to fool the discriminators
_, batch = target_loader_iter.__next__()
images, _, _, _ = batch
pred_trg_aux_agn, pred_trg_main_agn, pred_trg_aux_spe, pred_trg_main_spe, _ = model(images.float().cuda(device))
if cfg.TRAIN.MULTI_LEVEL:
pred_trg_aux_spe = interp_target(pred_trg_aux_spe)
d_out_aux = d_aux(prob_2_entropy(F.softmax(pred_trg_aux_spe)))
loss_adv_trg_aux = bce_loss(d_out_aux, source_label)
else:
loss_adv_trg_aux = 0
pred_trg_main_spe = interp_target(pred_trg_main_spe)
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main_spe)))
loss_adv_trg_main = bce_loss(d_out_main, source_label)
loss_div_trg = 0
loss_div_aux = 0
pred_trg_main_spe = pred_trg_main_spe.detach()
pred_trg_main_agn = interp_target(pred_trg_main_agn)
loss_div_trg = teacher_loss(pred_trg_main_agn, pred_trg_main_spe)
loss_kt_trg = loss_div_trg
if cfg.TRAIN.MULTI_LEVEL:
pred_trg_aux_agn = interp_target(pred_trg_aux_agn)
pred_trg_aux_spe = pred_trg_aux_spe.detach()
loss_div_aux = teacher_loss(pred_trg_aux_agn, pred_trg_aux_spe)
loss_kt_trg += cfg.TRAIN.LAMBDA_SEG_AUX * loss_div_aux
loss = (cfg.TRAIN.LAMBDA_ADV_MAIN * loss_adv_trg_main
+ cfg.TRAIN.LAMBDA_ADV_AUX * loss_adv_trg_aux
+ cfg.TRAIN.LAMBDA_KT_TARGET * loss_kt_trg)
loss.backward()
# Train discriminator networks
# enable training mode on discriminator networks
for param in d_aux.parameters():
param.requires_grad = True
for param in d_main.parameters():
param.requires_grad = True
# train with source
if cfg.TRAIN.MULTI_LEVEL:
pred_src_aux_spe = pred_src_aux_spe.detach()
d_out_aux = d_aux(prob_2_entropy(F.softmax(pred_src_aux_spe)))
loss_d_aux = bce_loss(d_out_aux, source_label)
loss_d_aux = loss_d_aux / 2
loss_d_aux.backward()
pred_src_main_spe = pred_src_main_spe.detach()
d_out_main = d_main(prob_2_entropy(F.softmax(pred_src_main_spe)))
loss_d_main = bce_loss(d_out_main, source_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
# train with target
if cfg.TRAIN.MULTI_LEVEL:
pred_trg_aux_spe = pred_trg_aux_spe.detach()
d_out_aux = d_aux(prob_2_entropy(F.softmax(pred_trg_aux_spe)))
loss_d_aux = bce_loss(d_out_aux, target_label)
loss_d_aux = loss_d_aux / 2
loss_d_aux.backward()
else:
loss_d_aux = 0
pred_trg_main_spe = pred_trg_main_spe.detach()
d_out_main = d_main(prob_2_entropy(F.softmax(pred_trg_main_spe)))
loss_d_main = bce_loss(d_out_main, target_label)
loss_d_main = loss_d_main / 2
loss_d_main.backward()
optimizer.step()
if cfg.TRAIN.MULTI_LEVEL:
optimizer_d_aux.step()
optimizer_d_main.step()
current_losses = {'loss_seg_src_aux_spe': loss_seg_src_aux_spe,
'loss_seg_src_main_spe': loss_seg_src_main_spe,
'loss_adv_trg_aux': loss_adv_trg_aux,
'loss_adv_trg_main': loss_adv_trg_main,
'loss_d_aux': loss_d_aux,
'loss_d_main': loss_d_main,
'feat_distill_loss': feat_distill_loss,
'kt_distill_loss': kt_distill_loss,
'loss_kt_trg': loss_kt_trg}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
torch.save(d_aux.state_dict(), snapshot_dir / f'model_{i_iter}_D_aux.pth')
torch.save(d_main.state_dict(), snapshot_dir / f'model_{i_iter}_D_main.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP:
break
sys.stdout.flush()
def print_losses(current_losses, i_iter):
list_strings = []
for loss_name, loss_value in current_losses.items():
list_strings.append(f'{loss_name} = {to_numpy(loss_value):.3f} ')
full_string = ' '.join(list_strings)
tqdm.write(f'iter = {i_iter} {full_string}')
def to_numpy(tensor):
if isinstance(tensor, (int, float)):
return tensor
else:
return tensor.data.cpu().numpy()
def features_distillation(
list_attentions_a,
list_attentions_b,
mask=None,
collapse_channels="local",
normalize=True,
labels=None,
mask_threshold=0.0,
pod_apply="all",
pod_deeplab_mask=False,
pod_deeplab_mask_factor=None,
interpolate_last=False,
pod_factor=1.,
pod_output_factor=0.05,
n_output_layers=0,
prepro="pow",
deeplabmask_upscale=True,
spp_scales=[1, 2, 4],
pod_options=None,
outputs_old=None,
):
"""A mega-function comprising several features-based distillation.
:param list_attentions_a: A list of attention maps, each of shape (b, n, w, h).
:param list_attentions_b: A list of attention maps, each of shape (b, n, w, h).
:param collapse_channels: How to pool the channels.
:param memory_flags: Integer flags denoting exemplars.
:param only_old: Only apply loss to exemplars.
:return: A float scalar loss.
"""
device = list_attentions_a[0].device
assert len(list_attentions_a) == len(list_attentions_b)
if pod_deeplab_mask_factor is None:
pod_deeplab_mask_factor = pod_factor
#if collapse_channels in ("spatial_tuple", "spp", "spp_noNorm", "spatial_noNorm"):
normalize = False
upscale_mask_topk = 1
mask_position = "all" # Others choices "all" "backbone"
use_adaptative_factor = False
mix_new_old = None
pod_output_factor = pod_factor * pod_output_factor
loss = torch.tensor(0.).to(list_attentions_a[0].device)
for i, (a, b) in enumerate(zip(list_attentions_a, list_attentions_b)):
adaptative_pod_factor = 1.0
difference_function = "frobenius"
pool = True
use_adaptative_factor = False
normalize_per_scale = False
if i >= len(list_attentions_a) - n_output_layers:
pod_factor = pod_output_factor
# shape of (b, n, w, h)
assert a.shape == b.shape, (a.shape, b.shape)
if not pod_deeplab_mask and use_adaptative_factor:
adaptative_pod_factor = (labels == 0).float().mean()
if prepro == "pow":
a = torch.pow(a, 2)
b = torch.pow(b, 2)
elif prepro == "none":
pass
elif prepro == "abs":
a = torch.abs(a, 2)
b = torch.abs(b, 2)
elif prepro == "relu":
a = torch.clamp(a, min=0.)
b = torch.clamp(b, min=0.)
if collapse_channels == "spatial":
a_h = a.sum(dim=3).view(a.shape[0], -1)
b_h = b.sum(dim=3).view(b.shape[0], -1)
a_w = a.sum(dim=2).view(a.shape[0], -1)
b_w = b.sum(dim=2).view(b.shape[0], -1)
a = torch.cat([a_h, a_w], dim=-1)
b = torch.cat([b_h, b_w], dim=-1)
elif collapse_channels == "local":
if pod_deeplab_mask and (
(i == len(list_attentions_a) - 1 and mask_position == "last") or
mask_position == "all"
):
if pod_deeplab_mask_factor == 0.:
continue
pod_factor = pod_deeplab_mask_factor
a = F.interpolate(
torch.topk(a, k=upscale_mask_topk, dim=1)[0],
size=labels.shape[-2:],
mode="bilinear",
align_corners=False
)
b = F.interpolate(
torch.topk(b, k=upscale_mask_topk, dim=1)[0],
size=labels.shape[-2:],
mode="bilinear",
align_corners=False
)
if use_adaptative_factor:
adaptative_pod_factor = mask.float().mean(dim=(1, 2))
a = _local_pod(
a, mask, spp_scales, normalize=False, normalize_per_scale=normalize_per_scale
)
b = _local_pod(
b, mask, spp_scales, normalize=False, normalize_per_scale=normalize_per_scale
)
else:
mask = None
a = _local_pod(
a, mask, spp_scales, normalize=False, normalize_per_scale=normalize_per_scale
)
b = _local_pod(
b, mask, spp_scales, normalize=False, normalize_per_scale=normalize_per_scale
)
else:
raise ValueError("Unknown method to collapse: {}".format(collapse_channels))
if i == len(list_attentions_a) - 1 and pod_options is not None:
if "difference_function" in pod_options:
difference_function = pod_options["difference_function"]
elif pod_options is not None:
if "difference_function_all" in pod_options:
difference_function = pod_options["difference_function_all"]
if normalize:
a = F.normalize(a, dim=1, p=2)
b = F.normalize(b, dim=1, p=2)
if difference_function == "frobenius":
if isinstance(a, list):
layer_loss = torch.tensor(
[torch.frobenius_norm(aa - bb, dim=-1) for aa, bb in zip(a, b)]
).to(device)
else:
layer_loss = torch.frobenius_norm(a - b, dim=-1)
elif difference_function == "frobenius_mix":
layer_loss_old = torch.frobenius_norm(a[0] - b[0], dim=-1)
layer_loss_new = torch.frobenius_norm(a[1] - b[1], dim=-1)
layer_loss = mix_new_old * layer_loss_old + (1 - mix_new_old) * layer_loss_new
elif difference_function == "l1":
if isinstance(a, list):
layer_loss = torch.tensor(
[torch.norm(aa - bb, p=1, dim=-1) for aa, bb in zip(a, b)]
).to(device)
else:
layer_loss = torch.norm(a - b, p=1, dim=-1)
elif difference_function == "kl":
d1, d2, d3 = a.shape
a = (a.view(d1 * d2, d3) + 1e-8).log()
b = b.view(d1 * d2, d3) + 1e-8
layer_loss = F.kl_div(a, b, reduction="none").view(d1, d2, d3).sum(dim=(1, 2))
elif difference_function == "bce":
d1, d2, d3 = a.shape
layer_loss = bce(a.view(d1 * d2, d3), b.view(d1 * d2, d3)).view(d1, d2,
d3).mean(dim=(1, 2))
else:
raise NotImplementedError(f"Unknown difference_function={difference_function}")
assert torch.isfinite(layer_loss).all(), layer_loss
assert (layer_loss >= 0.).all(), layer_loss
layer_loss = torch.mean(adaptative_pod_factor * layer_loss)
if pod_factor <= 0.:
continue
layer_loss = pod_factor * layer_loss
loss += layer_loss
return loss / len(list_attentions_a)
def _local_pod(x, mask, spp_scales=[1, 2, 4], square_crop=False, normalize=False, normalize_per_scale=False, per_level=False, median=False, dist=False):
b = x.shape[0]
h, w = x.shape[-2:]
emb = []
if mask is not None:
mask = mask[:, None].repeat(1, c, 1, 1)
x[mask] = 0.
min_side = min(h, w)
for scale_index, scale in enumerate(spp_scales):
nb_regions = scale**2
if square_crop:
scale_h = h // min_side * scale
scale_w = w // min_side * scale
kh = kw = min_side // scale
else:
kh, kw = h // scale, w // scale
scale_h = scale_w = scale
if per_level:
emb_per_level = []
for i in range(scale_h):
for j in range(scale_w):
tensor = x[..., i * kh:(i + 1) * kh, j * kw:(j + 1) * kw]
if any(shp == 0 for shp in tensor.shape):
print(f"Empty tensor {tensor.shape}, with (i={i}, j={j}) and scale_h={scale_h}, scale_w={scale_w}")
continue
if median:
horizontal_pool = tensor.median(dim=3, keepdims=True)[0]
vertical_pool = tensor.median(dim=2, keepdims=True)[0]
else:
horizontal_pool = tensor.mean(dim=3, keepdims=True)
vertical_pool = tensor.mean(dim=2, keepdims=True)
if dist:
# Compute the distance distribution compared to the mean/median
horizontal_pool = (tensor - horizontal_pool).mean(dim=3).view(b, -1)
vertical_pool = (tensor - vertical_pool).mean(dim=2).view(b, -1)
else:
horizontal_pool = horizontal_pool.view(b, -1)
vertical_pool = vertical_pool.view(b, -1)
if normalize_per_scale is True:
horizontal_pool = horizontal_pool / nb_regions
vertical_pool = vertical_pool / nb_regions
elif normalize_per_scale == "spm":
if scale_index == 0:
factor = 2 ** (len(spp_scales) - 1)
else:
factor = 2 ** (len(spp_scales) - scale_index)
horizontal_pool = horizontal_pool / factor
vertical_pool = vertical_pool / factor
if normalize:
horizontal_pool = F.normalize(horizontal_pool, dim=1, p=2)
vertical_pool = F.normalize(vertical_pool, dim=1, p=2)
if not per_level:
emb.append(horizontal_pool)
emb.append(vertical_pool)
else:
emb_per_level.append(torch.cat([horizontal_pool, vertical_pool], dim=1))
if per_level:
emb.append(torch.stack(emb_per_level, dim=1))
if not per_level:
return torch.cat(emb, dim=1)
return emb
| 26,171 | 39.451314 | 152 | py |
MuHDi | MuHDi-master/muhdi/utils/loss.py | import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def cross_entropy_2d(predict, target):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), f"{predict.size(0)} vs {target.size(0)}"
assert predict.size(2) == target.size(1), f"{predict.size(2)} vs {target.size(1)}"
assert predict.size(3) == target.size(2), f"{predict.size(3)} vs {target.size(3)}"
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target < 200)
target = target[target_mask]
if not target.data.dim():
return Variable(torch.zeros(1))
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, size_average=True)
return loss
def kl_divergence(predict_0, predict_1):
"""
Args:
predict_0:(n, c, h, w)
predict_1:(n, c, h, w)
"""
assert predict_0.dim() == 4
assert predict_1.dim() == 4
assert predict_0.size(0) == predict_1.size(0), f"{predict_0.size(0)} vs {predict_1.size(0)}"
assert predict_0.size(1) == predict_1.size(1), f"{predict_0.size(1)} vs {predict_1.size(1)}"
assert predict_0.size(2) == predict_1.size(2), f"{predict_0.size(2)} vs {predict_1.size(2)}"
assert predict_0.size(3) == predict_1.size(3), f"{predict_0.size(3)} vs {predict_1.size(3)}"
n, c, h, w = predict_0.size()
predict_0 = predict_0.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
predict_1 = predict_1.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
softmax_predict_0 = F.softmax(predict_0)
softmax_predict_1 = F.softmax(predict_1)
log_softmax_predict_0 = F.log_softmax(predict_0)
loss = F.kl_div(log_softmax_predict_0,softmax_predict_1,size_average=True)
return loss
def mse_loss(predict_0, predict_1):
"""
Args:
predict_0:(n, c, h, w)
predict_1:(n, c, h, w)
"""
assert predict_0.dim() == 4
assert predict_1.dim() == 4
assert predict_0.size(0) == predict_1.size(0), f"{predict_0.size(0)} vs {predict_1.size(0)}"
assert predict_0.size(1) == predict_1.size(1), f"{predict_0.size(1)} vs {predict_1.size(1)}"
assert predict_0.size(2) == predict_1.size(2), f"{predict_0.size(2)} vs {predict_1.size(2)}"
assert predict_0.size(3) == predict_1.size(3), f"{predict_0.size(3)} vs {predict_1.size(3)}"
n, c, h, w = predict_0.size()
predict_0 = predict_0.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
predict_1 = predict_1.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
softmax_predict_0 = F.softmax(predict_0)
softmax_predict_1 = F.softmax(predict_1)
loss = F.mse_loss(softmax_predict_0,softmax_predict_1,size_average=True)
return loss
| 2,970 | 42.057971 | 96 | py |
MuHDi | MuHDi-master/muhdi/utils/func.py | import numpy as np
import torch
import torch.nn as nn
from muhdi.utils.loss import cross_entropy_2d
def bce_loss(y_pred, y_label):
y_truth_tensor = torch.FloatTensor(y_pred.size())
y_truth_tensor.fill_(y_label)
y_truth_tensor = y_truth_tensor.to(y_pred.get_device())
return nn.BCEWithLogitsLoss()(y_pred, y_truth_tensor)
def loss_calc(pred, label, device):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
label = label.long().to(device)
return cross_entropy_2d(pred, label)
def lr_poly(base_lr, iter, max_iter, power):
""" Poly_LR scheduler
"""
return base_lr * ((1 - float(iter) / max_iter) ** power)
def _adjust_learning_rate(optimizer, i_iter, cfg, learning_rate):
lr = lr_poly(learning_rate, i_iter, cfg.TRAIN.MAX_ITERS, cfg.TRAIN.POWER)
optimizer.param_groups[0]['lr'] = lr
if len(optimizer.param_groups) > 1:
optimizer.param_groups[1]['lr'] = lr * 10
def adjust_learning_rate(optimizer, i_iter, cfg):
""" adject learning rate for main segnet
"""
_adjust_learning_rate(optimizer, i_iter, cfg, cfg.TRAIN.LEARNING_RATE)
def adjust_learning_rate_discriminator(optimizer, i_iter, cfg):
_adjust_learning_rate(optimizer, i_iter, cfg, cfg.TRAIN.LEARNING_RATE_D)
def prob_2_entropy(prob):
""" convert probabilistic prediction maps to weighted self-information maps
"""
n, c, h, w = prob.size()
return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
| 1,877 | 29.290323 | 83 | py |
MuHDi | MuHDi-master/muhdi/model/discriminator.py | import torch
from torch import nn
def get_fc_discriminator(num_classes, ndf=64):
return nn.Sequential(
nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1),
)
def restore_discriminator(model, checkpoint):
saved_state_dict = torch.load(checkpoint)
new_params = model.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[0] == 'conv5':
new_params[i] = saved_state_dict[i]
model.load_state_dict(new_params)
return model
def get_fc_tc_discriminator(num_classes, ndf=64):
return nn.Sequential(
nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(ndf * 8, 3, kernel_size=4, stride=2, padding=1),
)
| 1,695 | 41.4 | 72 | py |
MuHDi | MuHDi-master/muhdi/model/deeplabv2.py | import torch
import torch.nn as nn
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.nn.init as init
affine_par = True
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.BatchNorm = nn.BatchNorm2d
# change
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
self.bn1 = self.BatchNorm(planes, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = dilation
# change
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=padding, bias=False, dilation=dilation)
self.bn2 = self.BatchNorm(planes, affine=affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = self.BatchNorm(planes * 4, affine=affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ClassifierModule(nn.Module):
def __init__(self, inplanes, dilation_series, padding_series, num_classes):
super(ClassifierModule, self).__init__()
self.conv2d_list = nn.ModuleList()
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(
nn.Conv2d(inplanes, num_classes, kernel_size=3, stride=1, padding=padding,
dilation=dilation, bias=True))
for m in self.conv2d_list:
m.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.conv2d_list[0](x)
for i in range(len(self.conv2d_list) - 1):
out += self.conv2d_list[i + 1](x)
return out
class ResNetMulti(nn.Module):
def __init__(self, block, layers, num_classes, multi_level):
self.BatchNorm = nn.BatchNorm2d
self.multi_level = multi_level
self.inplanes = 64
super(ResNetMulti, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = self.BatchNorm(64, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
if self.multi_level:
self.layer5 = ClassifierModule(1024, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
self.layer6 = ClassifierModule(2048, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if (stride != 1
or self.inplanes != planes * block.expansion
or dilation == 2
or dilation == 4):
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.BatchNorm(planes * block.expansion, affine=affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(
block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
if self.multi_level:
x1 = self.layer5(x) # produce segmap 1
else:
x1 = None
x2 = self.layer4(x)
x2 = self.layer6(x2) # produce segmap 2
return x1, x2
def get_1x_lr_params_no_scale(self):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = []
b.append(self.conv1)
b.append(self.bn1)
b.append(self.layer1)
b.append(self.layer2)
b.append(self.layer3)
b.append(self.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj += 1
if k.requires_grad:
yield k
def get_10x_lr_params(self):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = []
if self.multi_level:
b.append(self.layer5.parameters())
b.append(self.layer6.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def optim_parameters(self, lr):
return [{'params': self.get_1x_lr_params_no_scale(), 'lr': lr},
{'params': self.get_10x_lr_params(), 'lr': 10 * lr}]
class BottleneckAttention(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, last=False):
super(BottleneckAttention, self).__init__()
self.BatchNorm = nn.BatchNorm2d
# change
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
self.bn1 = self.BatchNorm(planes, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = dilation
# change
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=padding, bias=False, dilation=dilation)
self.bn2 = self.BatchNorm(planes, affine=affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = self.BatchNorm(planes * 4, affine=affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.last = last
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.last:
att = out
out = self.relu(out)
return out, att
else:
out = self.relu(out)
return out
class ResNetMultiAttention(nn.Module):
def __init__(self, block, layers, num_classes, multi_level):
self.BatchNorm = nn.BatchNorm2d
self.multi_level = multi_level
self.inplanes = 64
super(ResNetMultiAttention, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = self.BatchNorm(64, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
if self.multi_level:
self.layer5 = ClassifierModule(1024, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
self.layer6 = ClassifierModule(2048, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
elif isinstance(m, self.BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if (stride != 1
or self.inplanes != planes * block.expansion
or dilation == 2
or dilation == 4):
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.BatchNorm(planes * block.expansion, affine=affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(
block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, last=(i==blocks-1)))
return nn.Sequential(*layers)
def forward(self, x):
attentions = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x, att = self.layer1(x)
attentions.append(att)
x,att = self.layer2(x)
attentions.append(att)
x,att = self.layer3(x)
attentions.append(att)
if self.multi_level:
x1 = self.layer5(x) # produce segmap 1
else:
x1 = None
x2, att = self.layer4(x)
attentions.append(att)
x2 = self.layer6(x2) # produce segmap 2
return x1, x2, attentions
def get_1x_lr_params_no_scale(self):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = []
b.append(self.conv1)
b.append(self.bn1)
b.append(self.layer1)
b.append(self.layer2)
b.append(self.layer3)
b.append(self.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj += 1
if k.requires_grad:
yield k
def get_10x_lr_params(self):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = []
if self.multi_level:
b.append(self.layer5.parameters())
b.append(self.layer6.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def optim_parameters(self, lr):
return [{'params': self.get_1x_lr_params_no_scale(), 'lr': lr},
{'params': self.get_10x_lr_params(), 'lr': 10 * lr}]
class ResNetMultiMuHDi(nn.Module):
def __init__(self, block, layers, num_classes, multi_level):
self.BatchNorm = nn.BatchNorm2d
self.multi_level = multi_level
self.inplanes = 64
super(ResNetMultiMuHDi, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = self.BatchNorm(64, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
if self.multi_level:
self.layer5 = ClassifierModule(1024, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
self.layer7 = ClassifierModule(1024, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
self.layer6 = ClassifierModule(2048, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
self.layer8 = ClassifierModule(2048, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
elif isinstance(m, self.BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if (stride != 1
or self.inplanes != planes * block.expansion
or dilation == 2
or dilation == 4):
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.BatchNorm(planes * block.expansion, affine=affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(
block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, last=(i==blocks-1)))
return nn.Sequential(*layers)
def forward(self, x):
attentions = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x, att = self.layer1(x)
attentions.append(att)
x,att = self.layer2(x)
attentions.append(att)
x,att = self.layer3(x)
attentions.append(att)
if self.multi_level:
x1_0 = self.layer5(x) # produce segmap 1
x1_1 = self.layer7(x)
else:
x1_0 = None
x1_1 = None
x2, att = self.layer4(x)
attentions.append(att)
x2_0 = self.layer6(x2) # produce segmap 2
x2_1 = self.layer8(x2)
return x1_0, x2_0, x1_1, x2_1, attentions
def get_1x_lr_params_no_scale(self):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = []
b.append(self.conv1)
b.append(self.bn1)
b.append(self.layer1)
b.append(self.layer2)
b.append(self.layer3)
b.append(self.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj += 1
if k.requires_grad:
yield k
def get_10x_lr_params(self):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = []
if self.multi_level:
b.append(self.layer5.parameters())
b.append(self.layer7.parameters())
b.append(self.layer6.parameters())
b.append(self.layer8.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def optim_parameters(self, lr):
return [{'params': self.get_1x_lr_params_no_scale(), 'lr': lr},
{'params': self.get_10x_lr_params(), 'lr': 10 * lr}]
def get_deeplab_v2_muhdi(num_classes=19, multi_level=True):
model = ResNetMultiMuHDi(BottleneckAttention, [3, 4, 23, 3], num_classes, multi_level)
return model
def get_deeplab_v2_attention(num_classes=19, multi_level=True):
model = ResNetMultiAttention(BottleneckAttention, [3, 4, 23, 3], num_classes, multi_level)
return model
def get_deeplab_v2(num_classes=19, multi_level=True):
model = ResNetMulti(Bottleneck, [3, 4, 23, 3], num_classes, multi_level)
return model
| 17,891 | 36.120332 | 99 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.