repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from warnings import warn
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer
from mmcv.ops.carafe import CARAFEPack
from mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.core import mask_target
from mmdet.models.builder import HEADS, build_loss
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
@HEADS.register_module()
class FCNMaskHead(BaseModule):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=80,
class_agnostic=False,
upsample_cfg=dict(type='deconv', scale_factor=2),
conv_cfg=None,
norm_cfg=None,
predictor_cfg=dict(type='Conv'),
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(FCNMaskHead, self).__init__(init_cfg)
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.predictor_cfg = predictor_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=upsample_in_channels,
out_channels=self.conv_out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=upsample_in_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = build_conv_layer(self.predictor_cfg,
logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
super(FCNMaskHead, self).init_weights()
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
elif isinstance(m, CARAFEPack):
m.init_weights()
else:
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
"""
Example:
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> # There are lots of variations depending on the configuration
>>> self = FCNMaskHead(num_classes=C, num_convs=1)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> sf = self.scale_factor
>>> labels = torch.randint(0, C, size=(N,))
>>> # With the default properties the mask targets should indicate
>>> # a (potentially soft) single-class label
>>> mask_targets = torch.rand(N, H * sf, W * sf)
>>> loss = self.loss(mask_pred, mask_targets, labels)
>>> print('loss = {!r}'.format(loss))
"""
loss = dict()
if mask_pred.size(0) == 0:
loss_mask = mask_pred.sum()
else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape (Tuple): original image height and width, shape (2,)
scale_factor(ndarray | Tensor): If ``rescale is True``, box
coordinates are divided by this scale factor to fit
``ori_shape``.
rescale (bool): If True, the resulting masks will be rescaled to
``ori_shape``.
Returns:
list[list]: encoded masks. The c-th item in the outer list
corresponds to the c-th class. Given the c-th outer list, the
i-th item in that inner list is the mask for the i-th box with
class label c.
Example:
>>> import mmcv
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> self = FCNMaskHead(num_classes=C, num_convs=0)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> # Each input is associated with some bounding box
>>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)
>>> det_labels = torch.randint(0, C, size=(N,))
>>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, })
>>> ori_shape = (H * 4, W * 4)
>>> scale_factor = torch.FloatTensor((1, 1))
>>> rescale = False
>>> # Encoded masks are a list for each category.
>>> encoded_masks = self.get_seg_masks(
>>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape,
>>> scale_factor, rescale
>>> )
>>> assert len(encoded_masks) == C
>>> assert sum(list(map(len, encoded_masks))) == N
"""
if isinstance(mask_pred, torch.Tensor):
mask_pred = mask_pred.sigmoid()
else:
# In AugTest, has been activated before
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
bboxes = det_bboxes[:, :4]
labels = det_labels
# In most cases, scale_factor should have been
# converted to Tensor when rescale the bbox
if not isinstance(scale_factor, torch.Tensor):
if isinstance(scale_factor, float):
scale_factor = np.array([scale_factor] * 4)
warn('Scale_factor should be a Tensor or ndarray '
'with shape (4,), float would be deprecated. ')
assert isinstance(scale_factor, np.ndarray)
scale_factor = torch.Tensor(scale_factor)
if rescale:
img_h, img_w = ori_shape[:2]
bboxes = bboxes / scale_factor
else:
w_scale, h_scale = scale_factor[0], scale_factor[1]
img_h = np.round(ori_shape[0] * h_scale.item()).astype(np.int32)
img_w = np.round(ori_shape[1] * w_scale.item()).astype(np.int32)
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
# the types of img_w and img_h are np.int32,
# when the image resolution is large,
# the calculation of num_chunks will overflow.
# so we neet to change the types of img_w and img_h to int.
# See https://github.com/open-mmlab/mmdetection/pull/5191
num_chunks = int(
np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT /
GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
for i in range(N):
cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())
return cls_segms
def onnx_export(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, **kwargs):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor): shape (n, #class, h, w).
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape (Tuple): original image height and width, shape (2,)
Returns:
Tensor: a mask of shape (N, img_h, img_w).
"""
mask_pred = mask_pred.sigmoid()
bboxes = det_bboxes[:, :4]
labels = det_labels
# No need to consider rescale and scale_factor while exporting to ONNX
img_h, img_w = ori_shape[:2]
threshold = rcnn_test_cfg.mask_thr_binary
if not self.class_agnostic:
box_inds = torch.arange(mask_pred.shape[0])
mask_pred = mask_pred[box_inds, labels][:, None]
masks, _ = _do_paste_mask(
mask_pred, bboxes, img_h, img_w, skip_empty=False)
if threshold >= 0:
# should convert to float to avoid problems in TRT
masks = (masks >= threshold).to(dtype=torch.float)
return masks
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks according to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
# IsInf op is not supported with ONNX<=1.7.0
if not torch.onnx.is_in_onnx_export():
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
| 17,394 | 41.118644 | 85 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
conv_cfg=None,
norm_cfg=None,
ignore_label=None,
loss_weight=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=0.2),
init_cfg=dict(
type='Kaiming', override=dict(name='conv_logits'))):
super(FusedSemanticHead, self).__init__(init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = build_loss(loss_seg)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
x += self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
| 4,150 | 34.177966 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/mask_point_head.py | # Copyright (c) OpenMMLab. All rights reserved.
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class MaskPointHead(BaseModule):
"""A mask point head use in PointRend.
``MaskPointHead`` use shared multi-layer perceptron (equivalent to
nn.Conv1d) to predict the logit of input points. The fine-grained feature
and coarse feature will be concatenate together for predication.
Args:
num_fcs (int): Number of fc layers in the head. Default: 3.
in_channels (int): Number of input channels. Default: 256.
fc_channels (int): Number of fc channels. Default: 256.
num_classes (int): Number of classes for logits. Default: 80.
class_agnostic (bool): Whether use class agnostic classification.
If so, the output channels of logits will be 1. Default: False.
coarse_pred_each_layer (bool): Whether concatenate coarse feature with
the output of each fc layer. Default: True.
conv_cfg (dict | None): Dictionary to construct and config conv layer.
Default: dict(type='Conv1d'))
norm_cfg (dict | None): Dictionary to construct and config norm layer.
Default: None.
loss_point (dict): Dictionary to construct and config loss layer of
point head. Default: dict(type='CrossEntropyLoss', use_mask=True,
loss_weight=1.0).
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_classes,
num_fcs=3,
in_channels=256,
fc_channels=256,
class_agnostic=False,
coarse_pred_each_layer=True,
conv_cfg=dict(type='Conv1d'),
norm_cfg=None,
act_cfg=dict(type='ReLU'),
loss_point=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg=dict(
type='Normal', std=0.001,
override=dict(name='fc_logits'))):
super().__init__(init_cfg)
self.num_fcs = num_fcs
self.in_channels = in_channels
self.fc_channels = fc_channels
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.coarse_pred_each_layer = coarse_pred_each_layer
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.loss_point = build_loss(loss_point)
fc_in_channels = in_channels + num_classes
self.fcs = nn.ModuleList()
for _ in range(num_fcs):
fc = ConvModule(
fc_in_channels,
fc_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.fcs.append(fc)
fc_in_channels = fc_channels
fc_in_channels += num_classes if self.coarse_pred_each_layer else 0
out_channels = 1 if self.class_agnostic else self.num_classes
self.fc_logits = nn.Conv1d(
fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, fine_grained_feats, coarse_feats):
"""Classify each point base on fine grained and coarse feats.
Args:
fine_grained_feats (Tensor): Fine grained feature sampled from FPN,
shape (num_rois, in_channels, num_points).
coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,
shape (num_rois, num_classes, num_points).
Returns:
Tensor: Point classification results,
shape (num_rois, num_class, num_points).
"""
x = torch.cat([fine_grained_feats, coarse_feats], dim=1)
for fc in self.fcs:
x = fc(x)
if self.coarse_pred_each_layer:
x = torch.cat((x, coarse_feats), dim=1)
return self.fc_logits(x)
def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,
cfg):
"""Get training targets of MaskPointHead for all images.
Args:
rois (Tensor): Region of Interest, shape (num_rois, 5).
rel_roi_points: Points coordinates relative to RoI, shape
(num_rois, num_points, 2).
sampling_results (:obj:`SamplingResult`): Sampling result after
sampling and assignment.
gt_masks (Tensor) : Ground truth segmentation masks of
corresponding boxes, shape (num_rois, height, width).
cfg (dict): Training cfg.
Returns:
Tensor: Point target, shape (num_rois, num_points).
"""
num_imgs = len(sampling_results)
rois_list = []
rel_roi_points_list = []
for batch_ind in range(num_imgs):
inds = (rois[:, 0] == batch_ind)
rois_list.append(rois[inds])
rel_roi_points_list.append(rel_roi_points[inds])
pos_assigned_gt_inds_list = [
res.pos_assigned_gt_inds for res in sampling_results
]
cfg_list = [cfg for _ in range(num_imgs)]
point_targets = map(self._get_target_single, rois_list,
rel_roi_points_list, pos_assigned_gt_inds_list,
gt_masks, cfg_list)
point_targets = list(point_targets)
if len(point_targets) > 0:
point_targets = torch.cat(point_targets)
return point_targets
def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,
gt_masks, cfg):
"""Get training target of MaskPointHead for each image."""
num_pos = rois.size(0)
num_points = cfg.num_points
if num_pos > 0:
gt_masks_th = (
gt_masks.to_tensor(rois.dtype, rois.device).index_select(
0, pos_assigned_gt_inds))
gt_masks_th = gt_masks_th.unsqueeze(1)
rel_img_points = rel_roi_point_to_rel_img_point(
rois, rel_roi_points, gt_masks_th)
point_targets = point_sample(gt_masks_th,
rel_img_points).squeeze(1)
else:
point_targets = rois.new_zeros((0, num_points))
return point_targets
def loss(self, point_pred, point_targets, labels):
"""Calculate loss for MaskPointHead.
Args:
point_pred (Tensor): Point predication result, shape
(num_rois, num_classes, num_points).
point_targets (Tensor): Point targets, shape (num_roi, num_points).
labels (Tensor): Class label of corresponding boxes,
shape (num_rois, )
Returns:
dict[str, Tensor]: a dictionary of point loss components
"""
loss = dict()
if self.class_agnostic:
loss_point = self.loss_point(point_pred, point_targets,
torch.zeros_like(labels))
else:
loss_point = self.loss_point(point_pred, point_targets, labels)
loss['loss_point'] = loss_point
return loss
def _get_uncertainty(self, mask_pred, labels):
"""Estimate uncertainty based on pred logits.
We estimate uncertainty as L1 distance between 0.0 and the logits
prediction in 'mask_pred' for the foreground class in `classes`.
Args:
mask_pred (Tensor): mask predication logits, shape (num_rois,
num_classes, mask_height, mask_width).
labels (list[Tensor]): Either predicted or ground truth label for
each predicted mask, of length num_rois.
Returns:
scores (Tensor): Uncertainty scores with the most uncertain
locations having the highest uncertainty score,
shape (num_rois, 1, mask_height, mask_width)
"""
if mask_pred.shape[1] == 1:
gt_class_logits = mask_pred.clone()
else:
inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
return -torch.abs(gt_class_logits)
def get_roi_rel_points_train(self, mask_pred, labels, cfg):
"""Get ``num_points`` most uncertain points with random points during
train.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'_get_uncertainty()' function that takes point's logit prediction as
input.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
labels (list): The ground truth class for each instance.
cfg (dict): Training config of point head.
Returns:
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains the coordinates sampled points.
"""
num_points = cfg.num_points
oversample_ratio = cfg.oversample_ratio
importance_sample_ratio = cfg.importance_sample_ratio
assert oversample_ratio >= 1
assert 0 <= importance_sample_ratio <= 1
batch_size = mask_pred.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(
batch_size, num_sampled, 2, device=mask_pred.device)
point_logits = point_sample(mask_pred, point_coords)
# It is crucial to calculate uncertainty based on the sampled
# prediction value for the points. Calculating uncertainties of the
# coarse predictions first and sampling them for points leads to
# incorrect results. To illustrate this: assume uncertainty func(
# logits)=-abs(logits), a sampled point between two coarse
# predictions with -1 and 1 logits has 0 logits, and therefore 0
# uncertainty value. However, if we calculate uncertainties for the
# coarse predictions first, both will have -1 uncertainty,
# and sampled point will get -1 uncertainty.
point_uncertainties = self._get_uncertainty(point_logits, labels)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(
point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(
batch_size, dtype=torch.long, device=mask_pred.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
batch_size, num_uncertain_points, 2)
if num_random_points > 0:
rand_roi_coords = torch.rand(
batch_size, num_random_points, 2, device=mask_pred.device)
point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
return point_coords
def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):
"""Get ``num_points`` most uncertain points during test.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
pred_label (list): The predication class for each instance.
cfg (dict): Testing config of point head.
Returns:
point_indices (Tensor): A tensor of shape (num_rois, num_points)
that contains indices from [0, mask_height x mask_width) of the
most uncertain points.
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the [mask_height, mask_width] grid .
"""
num_points = cfg.subdivision_num_points
uncertainty_map = self._get_uncertainty(mask_pred, pred_label)
num_rois, _, mask_height, mask_width = uncertainty_map.shape
# During ONNX exporting, the type of each elements of 'shape' is
# `Tensor(float)`, while it is `float` during PyTorch inference.
if isinstance(mask_height, torch.Tensor):
h_step = 1.0 / mask_height.float()
w_step = 1.0 / mask_width.float()
else:
h_step = 1.0 / mask_height
w_step = 1.0 / mask_width
# cast to int to avoid dynamic K for TopK op in ONNX
mask_size = int(mask_height * mask_width)
uncertainty_map = uncertainty_map.view(num_rois, mask_size)
num_points = min(mask_size, num_points)
point_indices = uncertainty_map.topk(num_points, dim=1)[1]
xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step
ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step
point_coords = torch.stack([xs, ys], dim=2)
return point_indices, point_coords
| 13,455 | 42.830619 | 126 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/ghm_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def _expand_onehot_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(
(labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module()
class GHMC(nn.Module):
"""GHM Classification Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
use_sigmoid (bool): Can only be true for BCE based loss now.
loss_weight (float): The weight of the total GHM-C loss.
reduction (str): Options are "none", "mean" and "sum".
Defaults to "mean"
"""
def __init__(self,
bins=10,
momentum=0,
use_sigmoid=True,
loss_weight=1.0,
reduction='mean'):
super(GHMC, self).__init__()
self.bins = bins
self.momentum = momentum
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] += 1e-6
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.use_sigmoid = use_sigmoid
if not self.use_sigmoid:
raise NotImplementedError
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self,
pred,
target,
label_weight,
reduction_override=None,
**kwargs):
"""Calculate the GHM-C loss.
Args:
pred (float tensor of size [batch_num, class_num]):
The direct prediction of classification fc layer.
target (float tensor of size [batch_num, class_num]):
Binary class target for each sample.
label_weight (float tensor of size [batch_num, class_num]):
the value is 1 if the sample is valid and 0 if ignored.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
The gradient harmonized loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
# the target should be binary class label
if pred.dim() != target.dim():
target, label_weight = _expand_onehot_labels(
target, label_weight, pred.size(-1))
target, label_weight = target.float(), label_weight.float()
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
# gradient length
g = torch.abs(pred.sigmoid().detach() - target)
valid = label_weight > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0 # n valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none')
loss = weight_reduce_loss(
loss, weights, reduction=reduction, avg_factor=tot)
return loss * self.loss_weight
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module()
class GHMR(nn.Module):
"""GHM Regression Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
mu (float): The parameter for the Authentic Smooth L1 loss.
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
loss_weight (float): The weight of the total GHM-R loss.
reduction (str): Options are "none", "mean" and "sum".
Defaults to "mean"
"""
def __init__(self,
mu=0.02,
bins=10,
momentum=0,
loss_weight=1.0,
reduction='mean'):
super(GHMR, self).__init__()
self.mu = mu
self.bins = bins
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] = 1e3
self.momentum = momentum
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.loss_weight = loss_weight
self.reduction = reduction
# TODO: support reduction parameter
def forward(self,
pred,
target,
label_weight,
avg_factor=None,
reduction_override=None):
"""Calculate the GHM-R loss.
Args:
pred (float tensor of size [batch_num, 4 (* class_num)]):
The prediction of box regression layer. Channel number can be 4
or 4 * class_num depending on whether it is class-agnostic.
target (float tensor of size [batch_num, 4 (* class_num)]):
The target regression values with the same size of pred.
label_weight (float tensor of size [batch_num, 4 (* class_num)]):
The weight of each sample, 0 if ignored.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
The gradient harmonized loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
mu = self.mu
edges = self.edges
mmt = self.momentum
# ASL1 loss
diff = pred - target
loss = torch.sqrt(diff * diff + mu * mu) - mu
# gradient length
g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
weights = torch.zeros_like(g)
valid = label_weight > 0
tot = max(label_weight.float().sum().item(), 1.0)
n = 0 # n: valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
n += 1
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
if n > 0:
weights /= n
loss = weight_reduce_loss(
loss, weights, reduction=reduction, avg_factor=tot)
return loss * self.loss_weight
| 7,923 | 36.028037 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/mse_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@weighted_loss
def mse_loss(pred, target):
"""Warpper of mse loss."""
return F.mse_loss(pred, target, reduction='none')
@LOSSES.register_module()
class MSELoss(nn.Module):
"""MSELoss.
Args:
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): Weight of the loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * mse_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss
| 1,905 | 31.862069 | 78 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/pisa_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.core import bbox_overlaps
@mmcv.jit(derivate=True, coderize=True)
def isr_p(cls_score,
bbox_pred,
bbox_targets,
rois,
sampling_results,
loss_cls,
bbox_coder,
k=2,
bias=0,
num_class=80):
"""Importance-based Sample Reweighting (ISR_P), positive part.
Args:
cls_score (Tensor): Predicted classification scores.
bbox_pred (Tensor): Predicted bbox deltas.
bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are
labels, label_weights, bbox_targets, bbox_weights, respectively.
rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs
(two_stage) in shape (n, 5).
sampling_results (obj): Sampling results.
loss_cls (func): Classification loss func of the head.
bbox_coder (obj): BBox coder of the head.
k (float): Power of the non-linear mapping.
bias (float): Shift of the non-linear mapping.
num_class (int): Number of classes, default: 80.
Return:
tuple([Tensor]): labels, imp_based_label_weights, bbox_targets,
bbox_target_weights
"""
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
pos_label_inds = ((labels >= 0) &
(labels < num_class)).nonzero().reshape(-1)
pos_labels = labels[pos_label_inds]
# if no positive samples, return the original targets
num_pos = float(pos_label_inds.size(0))
if num_pos == 0:
return labels, label_weights, bbox_targets, bbox_weights
# merge pos_assigned_gt_inds of per image to a single tensor
gts = list()
last_max_gt = 0
for i in range(len(sampling_results)):
gt_i = sampling_results[i].pos_assigned_gt_inds
gts.append(gt_i + last_max_gt)
if len(gt_i) != 0:
last_max_gt = gt_i.max() + 1
gts = torch.cat(gts)
assert len(gts) == num_pos
cls_score = cls_score.detach()
bbox_pred = bbox_pred.detach()
# For single stage detectors, rois here indicate anchors, in shape (N, 4)
# For two stage detectors, rois are in shape (N, 5)
if rois.size(-1) == 5:
pos_rois = rois[pos_label_inds][:, 1:]
else:
pos_rois = rois[pos_label_inds]
if bbox_pred.size(-1) > 4:
bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4)
else:
pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4)
# compute iou of the predicted bbox and the corresponding GT
pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4)
pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred)
target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target)
ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True)
pos_imp_weights = label_weights[pos_label_inds]
# Two steps to compute IoU-HLR. Samples are first sorted by IoU locally,
# then sorted again within the same-rank group
max_l_num = pos_labels.bincount().max()
for label in pos_labels.unique():
l_inds = (pos_labels == label).nonzero().view(-1)
l_gts = gts[l_inds]
for t in l_gts.unique():
t_inds = l_inds[l_gts == t]
t_ious = ious[t_inds]
_, t_iou_rank_idx = t_ious.sort(descending=True)
_, t_iou_rank = t_iou_rank_idx.sort()
ious[t_inds] += max_l_num - t_iou_rank.float()
l_ious = ious[l_inds]
_, l_iou_rank_idx = l_ious.sort(descending=True)
_, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR
# linearly map HLR to label weights
pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num
pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k)
# normalize to make the new weighted loss value equal to the original loss
pos_loss_cls = loss_cls(
cls_score[pos_label_inds], pos_labels, reduction_override='none')
if pos_loss_cls.dim() > 1:
ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:,
None]
new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None]
else:
ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds]
new_pos_loss_cls = pos_loss_cls * pos_imp_weights
pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum()
pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio
label_weights[pos_label_inds] = pos_imp_weights
bbox_targets = labels, label_weights, bbox_targets, bbox_weights
return bbox_targets
@mmcv.jit(derivate=True, coderize=True)
def carl_loss(cls_score,
labels,
bbox_pred,
bbox_targets,
loss_bbox,
k=1,
bias=0.2,
avg_factor=None,
sigmoid=False,
num_class=80):
"""Classification-Aware Regression Loss (CARL).
Args:
cls_score (Tensor): Predicted classification scores.
labels (Tensor): Targets of classification.
bbox_pred (Tensor): Predicted bbox deltas.
bbox_targets (Tensor): Target of bbox regression.
loss_bbox (func): Regression loss func of the head.
bbox_coder (obj): BBox coder of the head.
k (float): Power of the non-linear mapping.
bias (float): Shift of the non-linear mapping.
avg_factor (int): Average factor used in regression loss.
sigmoid (bool): Activation of the classification score.
num_class (int): Number of classes, default: 80.
Return:
dict: CARL loss dict.
"""
pos_label_inds = ((labels >= 0) &
(labels < num_class)).nonzero().reshape(-1)
if pos_label_inds.numel() == 0:
return dict(loss_carl=cls_score.sum()[None] * 0.)
pos_labels = labels[pos_label_inds]
# multiply pos_cls_score with the corresponding bbox weight
# and remain gradient
if sigmoid:
pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels]
else:
pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels]
carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k)
# normalize carl_loss_weight to make its sum equal to num positive
num_pos = float(pos_cls_score.size(0))
weight_ratio = num_pos / carl_loss_weights.sum()
carl_loss_weights *= weight_ratio
if avg_factor is None:
avg_factor = bbox_targets.size(0)
# if is class agnostic, bbox pred is in shape (N, 4)
# otherwise, bbox pred is in shape (N, #classes, 4)
if bbox_pred.size(-1) > 4:
bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels]
else:
pos_bbox_preds = bbox_pred[pos_label_inds]
ori_loss_reg = loss_bbox(
pos_bbox_preds,
bbox_targets[pos_label_inds],
reduction_override='none') / avg_factor
loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum()
return dict(loss_carl=loss_carl[None])
| 7,216 | 38.010811 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/balanced_l1_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
@LOSSES.register_module()
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
alpha=0.5,
gamma=1.5,
beta=1.0,
reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
weight (torch.Tensor, optional): Sample-wise loss weight with
shape (N, ).
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
| 4,252 | 33.024 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/iou_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import warnings
import mmcv
import torch
import torch.nn as nn
from mmdet.core import bbox_overlaps
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
linear (bool, optional): If True, use linear scale of loss instead of
log scale. Default: False.
mode (str): Loss scaling mode, including "linear", "square", and "log".
Default: 'log'
eps (float): Eps to avoid log(0).
Return:
torch.Tensor: Loss tensor.
"""
assert mode in ['linear', 'square', 'log']
if linear:
mode = 'linear'
warnings.warn('DeprecationWarning: Setting "linear=True" in '
'iou_loss is deprecated, please use "mode=`linear`" '
'instead.')
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
if mode == 'linear':
loss = 1 - ious
elif mode == 'square':
loss = 1 - ious**2
elif mode == 'log':
loss = -ious.log()
else:
raise NotImplementedError
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
"""BIoULoss.
This is an implementation of paper
`Improving Object Localization with Fitness NMS and Bounded IoU Loss.
<https://arxiv.org/abs/1711.00164>`_.
Args:
pred (torch.Tensor): Predicted bboxes.
target (torch.Tensor): Target bboxes.
beta (float): beta parameter in smoothl1.
eps (float): eps to avoid NaN.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0]
pred_h = pred[:, 3] - pred[:, 1]
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0]
target_h = target[:, 3] - target[:, 1]
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max(
(target_w - 2 * dx.abs()) /
(target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))
loss_dy = 1 - torch.max(
(target_h - 2 * dy.abs()) /
(target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /
(target_w + eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /
(target_h + eps))
# view(..., -1) does not work for empty tensor
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],
dim=-1).flatten(1)
loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
loss_comb - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def giou_loss(pred, target, eps=1e-7):
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def diou_loss(pred, target, eps=1e-7):
r"""`Implementation of Distance-IoU Loss: Faster and Better
Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.
Code is modified from https://github.com/Zzh-tju/DIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
# DIoU
dious = ious - rho2 / c2
loss = 1 - dious
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def ciou_loss(pred, target, eps=1e-7):
r"""`Implementation of paper `Enhancing Geometric Factors into
Model Learning and Inference for Object Detection and Instance
Segmentation <https://arxiv.org/abs/2005.03572>`_.
Code is modified from https://github.com/Zzh-tju/CIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
factor = 4 / math.pi**2
v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = (ious > 0.5).float() * v / (1 - ious + v)
# CIoU
cious = ious - (rho2 / c2 + alpha * v)
loss = 1 - cious.clamp(min=-1.0, max=1.0)
return loss
@LOSSES.register_module()
class IoULoss(nn.Module):
"""IoULoss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
Args:
linear (bool): If True, use linear scale of loss else determined
by mode. Default: False.
eps (float): Eps to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
mode (str): Loss scaling mode, including "linear", "square", and "log".
Default: 'log'
"""
def __init__(self,
linear=False,
eps=1e-6,
reduction='mean',
loss_weight=1.0,
mode='log'):
super(IoULoss, self).__init__()
assert mode in ['linear', 'square', 'log']
if linear:
mode = 'linear'
warnings.warn('DeprecationWarning: Setting "linear=True" in '
'IOULoss is deprecated, please use "mode=`linear`" '
'instead.')
self.mode = mode
self.linear = linear
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None. Options are "none", "mean" and "sum".
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if (weight is not None) and (not torch.any(weight > 0)) and (
reduction != 'none'):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# iou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * iou_loss(
pred,
target,
weight,
mode=self.mode,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class BoundedIoULoss(nn.Module):
def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0):
super(BoundedIoULoss, self).__init__()
self.beta = beta
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * bounded_iou_loss(
pred,
target,
weight,
beta=self.beta,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class GIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(GIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * giou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class DIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(DIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * diou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class CIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(CIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * ciou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
| 15,714 | 32.084211 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/smooth_l1_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
loss = torch.abs(pred - target)
return loss
@LOSSES.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@LOSSES.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
| 4,635 | 30.537415 | 78 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/gfocal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def quality_focal_loss(pred, target, beta=2.0):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert len(target) == 2, """target for QFL must be a tuple of two elements,
including category label and quality label, respectively"""
# label denotes the category id, score denotes the quality score
label, score = target
# negatives are supervised by 0 quality score
pred_sigmoid = pred.sigmoid()
scale_factor = pred_sigmoid
zerolabel = scale_factor.new_zeros(pred.shape)
loss = F.binary_cross_entropy_with_logits(
pred, zerolabel, reduction='none') * scale_factor.pow(beta)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = pred.size(1)
pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
pos_label = label[pos].long()
# positives are supervised by bbox quality (IoU) score
scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
pred[pos, pos_label], score[pos],
reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def distribution_focal_loss(pred, label):
r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted general distribution of bounding boxes
(before softmax) with shape (N, n+1), n is the max value of the
integral set `{0, ..., n}` in paper.
label (torch.Tensor): Target distance label for bounding boxes with
shape (N,).
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
dis_left = label.long()
dis_right = dis_left + 1
weight_left = dis_right.float() - label
weight_right = label - dis_left.float()
loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
+ F.cross_entropy(pred, dis_right, reduction='none') * weight_right
return loss
@LOSSES.register_module()
class QualityFocalLoss(nn.Module):
r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:
Learning Qualified and Distributed Bounding Boxes for Dense Object
Detection <https://arxiv.org/abs/2006.04388>`_.
Args:
use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
Defaults to True.
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self,
use_sigmoid=True,
beta=2.0,
reduction='mean',
loss_weight=1.0):
super(QualityFocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'
self.use_sigmoid = use_sigmoid
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): Predicted joint representation of
classification and quality (IoU) estimation with shape (N, C),
C is the number of classes.
target (tuple([torch.Tensor])): Target category label with shape
(N,) and target quality label with shape (N,).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = self.loss_weight * quality_focal_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
@LOSSES.register_module()
class DistributionFocalLoss(nn.Module):
r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:
Learning Qualified and Distributed Bounding Boxes for Dense Object
Detection <https://arxiv.org/abs/2006.04388>`_.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(DistributionFocalLoss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): Predicted general distribution of bounding
boxes (before softmax) with shape (N, n+1), n is the max value
of the integral set `{0, ..., n}` in paper.
target (torch.Tensor): Target distance label for bounding boxes
with shape (N,).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * distribution_focal_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_cls
| 7,458 | 38.257895 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/varifocal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
@mmcv.jit(derivate=True, coderize=True)
def varifocal_loss(pred,
target,
weight=None,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
reduction='mean',
avg_factor=None):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# pred and target should be of the same size
assert pred.size() == target.size()
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + \
alpha * (pred_sigmoid - target).abs().pow(gamma) * \
(target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + \
alpha * (pred_sigmoid - target).abs().pow(gamma) * \
(target <= 0.0).float()
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class VarifocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
reduction='mean',
loss_weight=1.0):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(VarifocalLoss, self).__init__()
assert use_sigmoid is True, \
'Only sigmoid varifocal loss supported now.'
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = self.loss_weight * varifocal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
iou_weighted=self.iou_weighted,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
| 5,365 | 38.748148 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
@mmcv.jit(derivate=True, coderize=True)
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
| 3,103 | 29.431373 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/seesaw_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .accuracy import accuracy
from .cross_entropy_loss import cross_entropy
from .utils import weight_reduce_loss
def seesaw_ce_loss(cls_score,
labels,
label_weights,
cum_samples,
num_classes,
p,
q,
eps,
reduction='mean',
avg_factor=None):
"""Calculate the Seesaw CrossEntropy loss.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C),
C is the number of classes.
labels (torch.Tensor): The learning label of the prediction.
label_weights (torch.Tensor): Sample-wise loss weight.
cum_samples (torch.Tensor): Cumulative samples for each category.
num_classes (int): The number of classes.
p (float): The ``p`` in the mitigation factor.
q (float): The ``q`` in the compenstation factor.
eps (float): The minimal value of divisor to smooth
the computation of compensation factor
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
assert cls_score.size(-1) == num_classes
assert len(cum_samples) == num_classes
onehot_labels = F.one_hot(labels, num_classes)
seesaw_weights = cls_score.new_ones(onehot_labels.size())
# mitigation factor
if p > 0:
sample_ratio_matrix = cum_samples[None, :].clamp(
min=1) / cum_samples[:, None].clamp(min=1)
index = (sample_ratio_matrix < 1.0).float()
sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index)
mitigation_factor = sample_weights[labels.long(), :]
seesaw_weights = seesaw_weights * mitigation_factor
# compensation factor
if q > 0:
scores = F.softmax(cls_score.detach(), dim=1)
self_scores = scores[
torch.arange(0, len(scores)).to(scores.device).long(),
labels.long()]
score_matrix = scores / self_scores[:, None].clamp(min=eps)
index = (score_matrix > 1.0).float()
compensation_factor = score_matrix.pow(q) * index + (1 - index)
seesaw_weights = seesaw_weights * compensation_factor
cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels))
loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none')
if label_weights is not None:
label_weights = label_weights.float()
loss = weight_reduce_loss(
loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor)
return loss
@LOSSES.register_module()
class SeesawLoss(nn.Module):
"""
Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021)
arXiv: https://arxiv.org/abs/2008.10032
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Only False is supported.
p (float, optional): The ``p`` in the mitigation factor.
Defaults to 0.8.
q (float, optional): The ``q`` in the compenstation factor.
Defaults to 2.0.
num_classes (int, optional): The number of classes.
Default to 1203 for LVIS v1 dataset.
eps (float, optional): The minimal value of divisor to smooth
the computation of compensation factor
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
return_dict (bool, optional): Whether return the losses as a dict.
Default to True.
"""
def __init__(self,
use_sigmoid=False,
p=0.8,
q=2.0,
num_classes=1203,
eps=1e-2,
reduction='mean',
loss_weight=1.0,
return_dict=True):
super(SeesawLoss, self).__init__()
assert not use_sigmoid
self.use_sigmoid = False
self.p = p
self.q = q
self.num_classes = num_classes
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
self.return_dict = return_dict
# 0 for pos, 1 for neg
self.cls_criterion = seesaw_ce_loss
# cumulative samples for each category
self.register_buffer(
'cum_samples',
torch.zeros(self.num_classes + 1, dtype=torch.float))
# custom output channels of the classifier
self.custom_cls_channels = True
# custom activation of cls_score
self.custom_activation = True
# custom accuracy of the classsifier
self.custom_accuracy = True
def _split_cls_score(self, cls_score):
# split cls_score to cls_score_classes and cls_score_objectness
assert cls_score.size(-1) == self.num_classes + 2
cls_score_classes = cls_score[..., :-2]
cls_score_objectness = cls_score[..., -2:]
return cls_score_classes, cls_score_objectness
def get_cls_channels(self, num_classes):
"""Get custom classification channels.
Args:
num_classes (int): The number of classes.
Returns:
int: The custom classification channels.
"""
assert num_classes == self.num_classes
return num_classes + 2
def get_activation(self, cls_score):
"""Get custom activation of cls_score.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C + 2).
Returns:
torch.Tensor: The custom activation of cls_score with shape
(N, C + 1).
"""
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
score_classes = F.softmax(cls_score_classes, dim=-1)
score_objectness = F.softmax(cls_score_objectness, dim=-1)
score_pos = score_objectness[..., [0]]
score_neg = score_objectness[..., [1]]
score_classes = score_classes * score_pos
scores = torch.cat([score_classes, score_neg], dim=-1)
return scores
def get_accuracy(self, cls_score, labels):
"""Get custom accuracy w.r.t. cls_score and labels.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C + 2).
labels (torch.Tensor): The learning label of the prediction.
Returns:
Dict [str, torch.Tensor]: The accuracy for objectness and classes,
respectively.
"""
pos_inds = labels < self.num_classes
obj_labels = (labels == self.num_classes).long()
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
acc_objectness = accuracy(cls_score_objectness, obj_labels)
acc_classes = accuracy(cls_score_classes[pos_inds], labels[pos_inds])
acc = dict()
acc['acc_objectness'] = acc_objectness
acc['acc_classes'] = acc_classes
return acc
def forward(self,
cls_score,
labels,
label_weights=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C + 2).
labels (torch.Tensor): The learning label of the prediction.
label_weights (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor | Dict [str, torch.Tensor]:
if return_dict == False: The calculated loss |
if return_dict == True: The dict of calculated losses
for objectness and classes, respectively.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
assert cls_score.size(-1) == self.num_classes + 2
pos_inds = labels < self.num_classes
# 0 for pos, 1 for neg
obj_labels = (labels == self.num_classes).long()
# accumulate the samples for each category
unique_labels = labels.unique()
for u_l in unique_labels:
inds_ = labels == u_l.item()
self.cum_samples[u_l] += inds_.sum()
if label_weights is not None:
label_weights = label_weights.float()
else:
label_weights = labels.new_ones(labels.size(), dtype=torch.float)
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
# calculate loss_cls_classes (only need pos samples)
if pos_inds.sum() > 0:
loss_cls_classes = self.loss_weight * self.cls_criterion(
cls_score_classes[pos_inds], labels[pos_inds],
label_weights[pos_inds], self.cum_samples[:self.num_classes],
self.num_classes, self.p, self.q, self.eps, reduction,
avg_factor)
else:
loss_cls_classes = cls_score_classes[pos_inds].sum()
# calculate loss_cls_objectness
loss_cls_objectness = self.loss_weight * cross_entropy(
cls_score_objectness, obj_labels, label_weights, reduction,
avg_factor)
if self.return_dict:
loss_cls = dict()
loss_cls['loss_cls_objectness'] = loss_cls_objectness
loss_cls['loss_cls_classes'] = loss_cls_classes
else:
loss_cls = loss_cls_classes + loss_cls_objectness
return loss_cls
| 10,136 | 37.543726 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/ae_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
@mmcv.jit(derivate=True, coderize=True)
def ae_loss_per_image(tl_preds, br_preds, match):
"""Associative Embedding Loss in one image.
Associative Embedding Loss including two parts: pull loss and push loss.
Pull loss makes embedding vectors from same object closer to each other.
Push loss distinguish embedding vector from different objects, and makes
the gap between them is large enough.
During computing, usually there are 3 cases:
- no object in image: both pull loss and push loss will be 0.
- one object in image: push loss will be 0 and pull loss is computed
by the two corner of the only object.
- more than one objects in image: pull loss is computed by corner pairs
from each object, push loss is computed by each object with all
other objects. We use confusion matrix with 0 in diagonal to
compute the push loss.
Args:
tl_preds (tensor): Embedding feature map of left-top corner.
br_preds (tensor): Embedding feature map of bottim-right corner.
match (list): Downsampled coordinates pair of each ground truth box.
"""
tl_list, br_list, me_list = [], [], []
if len(match) == 0: # no object in image
pull_loss = tl_preds.sum() * 0.
push_loss = tl_preds.sum() * 0.
else:
for m in match:
[tl_y, tl_x], [br_y, br_x] = m
tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
br_e = br_preds[:, br_y, br_x].view(-1, 1)
tl_list.append(tl_e)
br_list.append(br_e)
me_list.append((tl_e + br_e) / 2.0)
tl_list = torch.cat(tl_list)
br_list = torch.cat(br_list)
me_list = torch.cat(me_list)
assert tl_list.size() == br_list.size()
# N is object number in image, M is dimension of embedding vector
N, M = tl_list.size()
pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
pull_loss = pull_loss.sum() / N
margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
# confusion matrix of push loss
conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
conf_weight = 1 - torch.eye(N).type_as(me_list)
conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
if N > 1: # more than one object in current image
push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
else:
push_loss = tl_preds.sum() * 0.
return pull_loss, push_loss
@LOSSES.register_module()
class AssociativeEmbeddingLoss(nn.Module):
"""Associative Embedding Loss.
More details can be found in
`Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and
`CornerNet <https://arxiv.org/abs/1808.01244>`_ .
Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501
Args:
pull_weight (float): Loss weight for corners from same object.
push_weight (float): Loss weight for corners from different object.
"""
def __init__(self, pull_weight=0.25, push_weight=0.25):
super(AssociativeEmbeddingLoss, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
def forward(self, pred, target, match):
"""Forward function."""
batch = pred.size(0)
pull_all, push_all = 0.0, 0.0
for i in range(batch):
pull, push = ae_loss_per_image(pred[i], target[i], match[i])
pull_all += self.pull_weight * pull
push_all += self.push_weight * push
return pull_all, push_all
| 3,857 | 36.096154 | 143 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/accuracy.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
@mmcv.jit(coderize=True)
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class)
target (torch.Tensor): The target of each prediction, shape (N, )
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == 2 and target.ndim == 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t() # transpose to shape (maxk, N)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
| 2,990 | 36.3875 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/focal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss
from ..builder import LOSSES
from .utils import weight_reduce_loss
import ipdb
# This method is only for debugging
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
r"""A warpper of cuda version `Focal Loss
<https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma,
alpha, None, 'none')
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(FocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
if torch.cuda.is_available() and pred.is_cuda:
calculate_loss_func = sigmoid_focal_loss
else:
num_classes = pred.size(1)
target = F.one_hot(target, num_classes=num_classes + 1)
target = target[:, :num_classes]
calculate_loss_func = py_sigmoid_focal_loss
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
| 7,589 | 40.47541 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/cross_entropy_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
Returns:
torch.Tensor: The calculated loss
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
# element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(
valid_mask & (labels < label_channels), as_tuple=False)
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
valid_mask = valid_mask.view(-1, 1).expand(labels.size(0),
label_channels).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
Returns:
torch.Tensor: The calculated loss.
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
if pred.dim() != label.dim():
label, weight = _expand_onehot_labels(label, weight, pred.size(-1),
ignore_index)
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C, *), C is the
number of classes. The trailing * indicates arbitrary shape.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
Example:
>>> N, C = 3, 11
>>> H, W = 2, 2
>>> pred = torch.randn(N, C, H, W) * 1000
>>> target = torch.rand(N, H, W)
>>> label = torch.randint(0, C, size=(N,))
>>> reduction = 'mean'
>>> avg_factor = None
>>> class_weights = None
>>> loss = mask_cross_entropy(pred, target, label, reduction,
>>> avg_factor, class_weights)
>>> assert loss.shape == (1,)
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
ignore_index=None,
loss_weight=1.0):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
ignore_index (int | None): The label index to be ignored.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.ignore_index = ignore_index
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
ignore_index=None,
**kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss. Options are "none", "mean" and "sum".
ignore_index (int | None): The label index to be ignored.
If not None, it will override the default value. Default: None.
Returns:
torch.Tensor: The calculated loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if ignore_index is None:
ignore_index = self.ignore_index
if self.class_weight is not None:
class_weight = cls_score.new_tensor(
self.class_weight, device=cls_score.device)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
ignore_index=ignore_index,
**kwargs)
return loss_cls
| 9,696 | 37.480159 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/gaussian_focal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_loss + neg_loss
@LOSSES.register_module()
class GaussianFocalLoss(nn.Module):
"""GaussianFocalLoss is a variant of focal loss.
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_
Code is modified from `kp_utils.py
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
not 0/1 binary target.
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negative samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self,
alpha=2.0,
gamma=4.0,
reduction='mean',
loss_weight=1.0):
super(GaussianFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_reg = self.loss_weight * gaussian_focal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
reduction=reduction,
avg_factor=avg_factor)
return loss_reg
| 3,312 | 34.623656 | 108 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/semi_focal_loss.py | import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.core import reduce_mean
from ..builder import LOSSES
from .utils import weighted_loss, weight_reduce_loss
import ipdb
def diff_focal_loss(pred, target, weight=None, beta=2.0, hard_filter=False,
reduction='mean',
avg_factor=None):
assert len(target) == 3, """target for diff_focal_loss must be a tuple of three elements,
including category label, student score and teacher score, respectively."""
label, stu_score, tea_score = target
# negatives
if hard_filter:
scale_factor = torch.clamp(stu_score - tea_score, min=0)
else:
scale_factor = stu_score - tea_score
outlier_scale_factor = torch.min(scale_factor[scale_factor > 0].detach())
scale_factor[scale_factor < 0] = outlier_scale_factor
zerolabel = scale_factor.new_zeros(pred.shape)
loss = F.binary_cross_entropy_with_logits(
pred, zerolabel, reduction='none') * scale_factor.pow(beta)
# positives
bg_class_ind = pred.size(1)
pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
if pos.shape[0] > 0:
pos_label = label[pos].long()
filter_flags = torch.clamp(tea_score[pos, pos_label] - stu_score[pos, pos_label], min=0)
pre_filter_num = torch.tensor(pos.shape[0], device=pred.device, dtype=torch.float)
post_filter_num = torch.sum(filter_flags > 0).float()
if hard_filter:
scale_factor = filter_flags
else:
scale_factor = tea_score[pos, pos_label] - stu_score[pos, pos_label]
if scale_factor[filter_flags > 0].shape[0] > 0:
outlier_scale_factor = torch.min(scale_factor[filter_flags > 0].detach())
scale_factor[filter_flags == 0] = outlier_scale_factor
pos_pred = pred[pos, pos_label]
onelabel = pos_pred.new_ones(pos_pred.shape)
loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
pos_pred, onelabel, reduction='none') * scale_factor.pow(beta)
else:
pre_filter_num, post_filter_num = pred.sum() * 0, pred.sum() * 0
loss = loss.sum(dim=1, keepdim=False)
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss, pre_filter_num, post_filter_num
def robust_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25,
reduction='mean',
avg_factor=None):
assert len(target) == 2, """target for tea_guided_focal_loss must be a tuple of two elements,
including category label and teacher score, respectively."""
label, tea_score = target
num_classes = pred.size(1)
target = F.one_hot(label, num_classes=num_classes + 1)
target = target[:, :num_classes].type_as(pred)
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
# focal weight
pt = tea_score * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + 0.75 *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class DiffFocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
beta=2.0,
hard_filter=True,
reduction='mean',
loss_weight=1.0):
super(DiffFocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid in DFL supported now.'
self.use_sigmoid = use_sigmoid
self.beta = beta
self.hard_filter = hard_filter
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): Predicted joint representation of
classification and quality (IoU) estimation with shape (N, C),
C is the number of classes.
target (tuple([torch.Tensor])): Target category label with shape
(N,) and target quality label with shape (N,).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls, pre_filter_number, post_filter_number = diff_focal_loss(
pred,
target,
weight,
beta=self.beta,
hard_filter=self.hard_filter,
reduction=reduction,
avg_factor=avg_factor)
loss_cls *= self.loss_weight
else:
raise NotImplementedError
return loss_cls, pre_filter_number, post_filter_number
@LOSSES.register_module()
class RobustFocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0):
super(RobustFocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid in DFL supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): Predicted joint representation of
classification and quality (IoU) estimation with shape (N, C),
C is the number of classes.
target (tuple([torch.Tensor])): Target category label with shape
(N,) and target quality label with shape (N,).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = robust_focal_loss(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
loss_cls *= self.loss_weight
else:
raise NotImplementedError
return loss_cls | 8,302 | 39.305825 | 97 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/losses/kd_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
import ipdb
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
soft_label,
T,
detach_target=True):
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@LOSSES.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred,
soft_label,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
@LOSSES.register_module()
class FeatImitate_L2Loss(nn.Module):
"""Loss function for feature imitation in knowledge distilling using L2 Loss.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(FeatImitate_L2Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
self.relu = nn.ReLU()
def forward(self,
preds,
targets,
weight=None,
avg_factor=None,
reduction_override=None):
self.reduction = (
reduction_override if reduction_override else self.reduction)
total_loss = 0
if not isinstance(preds, list):
preds = [preds]
targets = [targets]
for pred, target in zip(preds, targets):
pred = self.relu(pred)
target = self.relu(target)
pred = self.normalize_feature(pred)
target = self.normalize_feature(target)
loss = torch.sum(torch.pow(torch.add(pred, target, alpha=-1) ,2)) / len(pred)
total_loss += loss
return self.loss_weight * (total_loss / len(preds))
def normalize_feature(self, x, mult=1.0):
x = x.reshape(x.size(0), -1)
return x / x.norm(2, dim=1, keepdim=True) * mult | 4,411 | 31.925373 | 89 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/hrnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import BaseModule, ModuleList, Sequential
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from .resnet import BasicBlock, Bottleneck
class HRModule(BaseModule):
"""High-Resolution Module for HRNet.
In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
is in this module.
"""
def __init__(self,
num_branches,
blocks,
num_blocks,
in_channels,
num_channels,
multiscale_output=True,
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
block_init_cfg=None,
init_cfg=None):
super(HRModule, self).__init__(init_cfg)
self.block_init_cfg = block_init_cfg
self._check_branches(num_branches, num_blocks, in_channels,
num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks,
num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels,
num_channels):
if num_branches != len(num_blocks):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_BLOCKS({len(num_blocks)})'
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_CHANNELS({len(num_channels)})'
raise ValueError(error_msg)
if num_branches != len(in_channels):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_INCHANNELS({len(in_channels)})'
raise ValueError(error_msg)
def _make_one_branch(self,
branch_index,
block,
num_blocks,
num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.in_channels[branch_index] != \
num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
self.conv_cfg,
self.in_channels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(self.norm_cfg, num_channels[branch_index] *
block.expansion)[1])
layers = []
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=self.block_init_cfg))
self.in_channels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=self.block_init_cfg))
return Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = num_branches if self.multiscale_output else 1
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[i],
kernel_size=1,
stride=1,
padding=0,
bias=False),
build_norm_layer(self.norm_cfg, in_channels[i])[1],
nn.Upsample(
scale_factor=2**(j - i), mode='nearest')))
elif j == i:
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range(i - j):
if k == i - j - 1:
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[i],
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
in_channels[i])[1]))
else:
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[j],
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
in_channels[j])[1],
nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
"""Forward function."""
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if i == j:
y += x[j]
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
@BACKBONES.register_module()
class HRNet(BaseModule):
"""HRNet backbone.
`High-Resolution Representations for Labeling Pixels and Regions
arXiv: <https://arxiv.org/abs/1904.04514>`_.
Args:
extra (dict): Detailed configuration for each stage of HRNet.
There must be 4 stages, the configuration for each stage must have
5 keys:
- num_modules(int): The number of HRModule in this stage.
- num_branches(int): The number of branches in the HRModule.
- block(str): The type of convolution block.
- num_blocks(tuple): The number of blocks in each branch.
The length must be equal to num_branches.
- num_channels(tuple): The number of channels in each branch.
The length must be equal to num_branches.
in_channels (int): Number of input image channels. Default: 3.
conv_cfg (dict): Dictionary to construct and config conv layer.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: True.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: False.
multiscale_output (bool): Whether to output multi-level features
produced by multiple branches. If False, only the first level
feature will be output. Default: True.
pretrained (str, optional): Model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
Example:
>>> from mmdet.models import HRNet
>>> import torch
>>> extra = dict(
>>> stage1=dict(
>>> num_modules=1,
>>> num_branches=1,
>>> block='BOTTLENECK',
>>> num_blocks=(4, ),
>>> num_channels=(64, )),
>>> stage2=dict(
>>> num_modules=1,
>>> num_branches=2,
>>> block='BASIC',
>>> num_blocks=(4, 4),
>>> num_channels=(32, 64)),
>>> stage3=dict(
>>> num_modules=4,
>>> num_branches=3,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4),
>>> num_channels=(32, 64, 128)),
>>> stage4=dict(
>>> num_modules=3,
>>> num_branches=4,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4, 4),
>>> num_channels=(32, 64, 128, 256)))
>>> self = HRNet(extra, in_channels=1)
>>> self.eval()
>>> inputs = torch.rand(1, 1, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 32, 8, 8)
(1, 64, 4, 4)
(1, 128, 2, 2)
(1, 256, 1, 1)
"""
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
def __init__(self,
extra,
in_channels=3,
conv_cfg=None,
norm_cfg=dict(type='BN'),
norm_eval=True,
with_cp=False,
zero_init_residual=False,
multiscale_output=True,
pretrained=None,
init_cfg=None):
super(HRNet, self).__init__(init_cfg)
self.pretrained = pretrained
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
# Assert configurations of 4 stages are in extra
assert 'stage1' in extra and 'stage2' in extra \
and 'stage3' in extra and 'stage4' in extra
# Assert whether the length of `num_blocks` and `num_channels` are
# equal to `num_branches`
for i in range(4):
cfg = extra[f'stage{i + 1}']
assert len(cfg['num_blocks']) == cfg['num_branches'] and \
len(cfg['num_channels']) == cfg['num_branches']
self.extra = extra
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
# stem net
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
64,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
self.conv_cfg,
64,
64,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
# stage 1
self.stage1_cfg = self.extra['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = self.stage1_cfg['block']
num_blocks = self.stage1_cfg['num_blocks'][0]
block = self.blocks_dict[block_type]
stage1_out_channels = num_channels * block.expansion
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
# stage 2
self.stage2_cfg = self.extra['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = self.stage2_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition1 = self._make_transition_layer([stage1_out_channels],
num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
# stage 3
self.stage3_cfg = self.extra['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = self.stage3_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition2 = self._make_transition_layer(pre_stage_channels,
num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
# stage 4
self.stage4_cfg = self.extra['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = self.stage4_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition3 = self._make_transition_layer(pre_stage_channels,
num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multiscale_output=multiscale_output)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: the normalization layer named "norm2" """
return getattr(self, self.norm2_name)
def _make_transition_layer(self, num_channels_pre_layer,
num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
num_channels_pre_layer[i],
num_channels_cur_layer[i],
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
num_channels_cur_layer[i])[1],
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv_downsamples = []
for j in range(i + 1 - num_branches_pre):
in_channels = num_channels_pre_layer[-1]
out_channels = num_channels_cur_layer[i] \
if j == i - num_branches_pre else in_channels
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
out_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, out_channels)[1],
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv_downsamples))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
self.conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
layers = []
block_init_cfg = None
if self.pretrained is None and not hasattr(
self, 'init_cfg') and self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm3'))
layers.append(
block(
inplanes,
planes,
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=block_init_cfg,
))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=block_init_cfg))
return Sequential(*layers)
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block = self.blocks_dict[layer_config['block']]
hr_modules = []
block_init_cfg = None
if self.pretrained is None and not hasattr(
self, 'init_cfg') and self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm3'))
for i in range(num_modules):
# multi_scale_output is only used for the last module
if not multiscale_output and i == num_modules - 1:
reset_multiscale_output = False
else:
reset_multiscale_output = True
hr_modules.append(
HRModule(
num_branches,
block,
num_blocks,
in_channels,
num_channels,
reset_multiscale_output,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
block_init_cfg=block_init_cfg))
return Sequential(*hr_modules), in_channels
def forward(self, x):
"""Forward function."""
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['num_branches']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['num_branches']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['num_branches']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
return y_list
def train(self, mode=True):
"""Convert the model into training mode will keeping the normalization
layer freezed."""
super(HRNet, self).train(mode)
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
| 23,106 | 38.164407 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/regnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import numpy as np
import torch.nn as nn
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from .resnet import ResNet
from .resnext import Bottleneck
@BACKBONES.register_module()
class RegNet(ResNet):
"""RegNet backbone.
More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ .
Args:
arch (dict): The parameter of RegNets.
- w0 (int): initial width
- wa (float): slope of width
- wm (float): quantization parameter to quantize the width
- depth (int): depth of the backbone
- group_w (int): width of group
- bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.
strides (Sequence[int]): Strides of the first block of each stage.
base_channels (int): Base channels after stem layer.
in_channels (int): Number of input image channels. Default: 3.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import RegNet
>>> import torch
>>> self = RegNet(
arch=dict(
w0=88,
wa=26.31,
wm=2.25,
group_w=48,
depth=25,
bot_mul=1.0))
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 96, 8, 8)
(1, 192, 4, 4)
(1, 432, 2, 2)
(1, 1008, 1, 1)
"""
arch_settings = {
'regnetx_400mf':
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),
'regnetx_800mf':
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0),
'regnetx_1.6gf':
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0),
'regnetx_3.2gf':
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0),
'regnetx_4.0gf':
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0),
'regnetx_6.4gf':
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0),
'regnetx_8.0gf':
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0),
'regnetx_12gf':
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0),
}
def __init__(self,
arch,
in_channels=3,
stem_channels=32,
base_channels=32,
strides=(2, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=None,
stage_with_dcn=(False, False, False, False),
plugins=None,
with_cp=False,
zero_init_residual=True,
pretrained=None,
init_cfg=None):
super(ResNet, self).__init__(init_cfg)
# Generate RegNet parameters first
if isinstance(arch, str):
assert arch in self.arch_settings, \
f'"arch": "{arch}" is not one of the' \
' arch_settings'
arch = self.arch_settings[arch]
elif not isinstance(arch, dict):
raise ValueError('Expect "arch" to be either a string '
f'or a dict, got {type(arch)}')
widths, num_stages = self.generate_regnet(
arch['w0'],
arch['wa'],
arch['wm'],
arch['depth'],
)
# Convert to per stage format
stage_widths, stage_blocks = self.get_stages_from_blocks(widths)
# Generate group widths and bot muls
group_widths = [arch['group_w'] for _ in range(num_stages)]
self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)]
# Adjust the compatibility of stage_widths and group_widths
stage_widths, group_widths = self.adjust_width_group(
stage_widths, self.bottleneck_ratio, group_widths)
# Group params by stage
self.stage_widths = stage_widths
self.group_widths = group_widths
self.depth = sum(stage_blocks)
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.plugins = plugins
self.zero_init_residual = zero_init_residual
self.block = Bottleneck
expansion_bak = self.block.expansion
self.block.expansion = 1
self.stage_blocks = stage_blocks[:num_stages]
self._make_stem_layer(in_channels, stem_channels)
block_init_cfg = None
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
if self.zero_init_residual:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.inplanes = stem_channels
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
group_width = self.group_widths[i]
width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i]))
stage_groups = width // group_width
dcn = self.dcn if self.stage_with_dcn[i] else None
if self.plugins is not None:
stage_plugins = self.make_stage_plugins(self.plugins, i)
else:
stage_plugins = None
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=self.stage_widths[i],
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
plugins=stage_plugins,
groups=stage_groups,
base_width=group_width,
base_channels=self.stage_widths[i],
init_cfg=block_init_cfg)
self.inplanes = self.stage_widths[i]
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = stage_widths[-1]
self.block.expansion = expansion_bak
def _make_stem_layer(self, in_channels, base_channels):
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
base_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, base_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
def generate_regnet(self,
initial_width,
width_slope,
width_parameter,
depth,
divisor=8):
"""Generates per block width from RegNet parameters.
Args:
initial_width ([int]): Initial width of the backbone
width_slope ([float]): Slope of the quantized linear function
width_parameter ([int]): Parameter used to quantize the width.
depth ([int]): Depth of the backbone.
divisor (int, optional): The divisor of channels. Defaults to 8.
Returns:
list, int: return a list of widths of each stage and the number \
of stages
"""
assert width_slope >= 0
assert initial_width > 0
assert width_parameter > 1
assert initial_width % divisor == 0
widths_cont = np.arange(depth) * width_slope + initial_width
ks = np.round(
np.log(widths_cont / initial_width) / np.log(width_parameter))
widths = initial_width * np.power(width_parameter, ks)
widths = np.round(np.divide(widths, divisor)) * divisor
num_stages = len(np.unique(widths))
widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist()
return widths, num_stages
@staticmethod
def quantize_float(number, divisor):
"""Converts a float to closest non-zero int divisible by divisor.
Args:
number (int): Original number to be quantized.
divisor (int): Divisor used to quantize the number.
Returns:
int: quantized number that is divisible by devisor.
"""
return int(round(number / divisor) * divisor)
def adjust_width_group(self, widths, bottleneck_ratio, groups):
"""Adjusts the compatibility of widths and groups.
Args:
widths (list[int]): Width of each stage.
bottleneck_ratio (float): Bottleneck ratio.
groups (int): number of groups in each stage
Returns:
tuple(list): The adjusted widths and groups of each stage.
"""
bottleneck_width = [
int(w * b) for w, b in zip(widths, bottleneck_ratio)
]
groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)]
bottleneck_width = [
self.quantize_float(w_bot, g)
for w_bot, g in zip(bottleneck_width, groups)
]
widths = [
int(w_bot / b)
for w_bot, b in zip(bottleneck_width, bottleneck_ratio)
]
return widths, groups
def get_stages_from_blocks(self, widths):
"""Gets widths/stage_blocks of network at each stage.
Args:
widths (list[int]): Width in each stage.
Returns:
tuple(list): width and depth of each stage
"""
width_diff = [
width != width_prev
for width, width_prev in zip(widths + [0], [0] + widths)
]
stage_widths = [
width for width, diff in zip(widths, width_diff[:-1]) if diff
]
stage_blocks = np.diff([
depth for depth, diff in zip(range(len(width_diff)), width_diff)
if diff
]).tolist()
return stage_widths, stage_blocks
def forward(self, x):
"""Forward function."""
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 13,605 | 37.112045 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/mobilenet_v2.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from ..utils import InvertedResidual, make_divisible
@BACKBONES.register_module()
class MobileNetV2(BaseModule):
"""MobileNetV2 backbone.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (Sequence[int], optional): Output from which stages.
Default: (1, 2, 4, 7).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(1, 2, 4, 7),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
norm_eval=False,
with_cp=False,
pretrained=None,
init_cfg=None):
super(MobileNetV2, self).__init__(init_cfg)
self.pretrained = pretrained
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
self.widen_factor = widen_factor
self.out_indices = out_indices
if not set(out_indices).issubset(set(range(0, 8))):
raise ValueError('out_indices must be a subset of range'
f'(0, 8). But received {out_indices}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
mid_channels=int(round(self.in_channels * expand_ratio)),
stride=stride,
with_expand_conv=expand_ratio != 1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def forward(self, x):
"""Forward function."""
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
frozen."""
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
| 7,599 | 37.383838 | 78 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/swin.py | import warnings
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import build_norm_layer, constant_init, trunc_normal_init
from mmcv.cnn.bricks.transformer import FFN, build_dropout
from mmcv.runner import BaseModule, ModuleList, _load_checkpoint
from mmcv.utils import to_2tuple
from ...utils import get_root_logger
from ..builder import BACKBONES
from ..utils.ckpt_convert import swin_converter
from ..utils.transformer import PatchEmbed, PatchMerging
class WindowMSA(BaseModule):
"""Window based multi-head self-attention (W-MSA) module with relative
position bias.
Args:
embed_dims (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (tuple[int]): The height and width of the window.
qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.
Default: True.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
attn_drop_rate (float, optional): Dropout ratio of attention weight.
Default: 0.0
proj_drop_rate (float, optional): Dropout ratio of output. Default: 0.
init_cfg (dict | None, optional): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
window_size,
qkv_bias=True,
qk_scale=None,
attn_drop_rate=0.,
proj_drop_rate=0.,
init_cfg=None):
super().__init__()
self.embed_dims = embed_dims
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_embed_dims = embed_dims // num_heads
self.scale = qk_scale or head_embed_dims**-0.5
self.init_cfg = init_cfg
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),
num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# About 2x faster than original impl
Wh, Ww = self.window_size
rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)
rel_position_index = rel_index_coords + rel_index_coords.T
rel_position_index = rel_position_index.flip(1).contiguous()
self.register_buffer('relative_position_index', rel_position_index)
self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop_rate)
self.proj = nn.Linear(embed_dims, embed_dims)
self.proj_drop = nn.Dropout(proj_drop_rate)
self.softmax = nn.Softmax(dim=-1)
def init_weights(self):
trunc_normal_init(self.relative_position_bias_table, std=0.02)
def forward(self, x, mask=None):
"""
Args:
x (tensor): input features with shape of (num_windows*B, N, C)
mask (tensor | None, Optional): mask with shape of (num_windows,
Wh*Ww, Wh*Ww), value should be between (-inf, 0].
"""
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)
# make torchscript happy (cannot use tensor as tuple)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1],
-1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B // nW, nW, self.num_heads, N,
N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@staticmethod
def double_step_seq(step1, len1, step2, len2):
seq1 = torch.arange(0, step1 * len1, step1)
seq2 = torch.arange(0, step2 * len2, step2)
return (seq1[:, None] + seq2[None, :]).reshape(1, -1)
class ShiftWindowMSA(BaseModule):
"""Shifted Window Multihead Self-Attention Module.
Args:
embed_dims (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): The height and width of the window.
shift_size (int, optional): The shift step of each window towards
right-bottom. If zero, act as regular window-msa. Defaults to 0.
qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.
Default: True
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Defaults: None.
attn_drop_rate (float, optional): Dropout ratio of attention weight.
Defaults: 0.
proj_drop_rate (float, optional): Dropout ratio of output.
Defaults: 0.
dropout_layer (dict, optional): The dropout_layer used before output.
Defaults: dict(type='DropPath', drop_prob=0.).
init_cfg (dict, optional): The extra config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
window_size,
shift_size=0,
qkv_bias=True,
qk_scale=None,
attn_drop_rate=0,
proj_drop_rate=0,
dropout_layer=dict(type='DropPath', drop_prob=0.),
init_cfg=None):
super().__init__(init_cfg)
self.window_size = window_size
self.shift_size = shift_size
assert 0 <= self.shift_size < self.window_size
self.w_msa = WindowMSA(
embed_dims=embed_dims,
num_heads=num_heads,
window_size=to_2tuple(window_size),
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop_rate=attn_drop_rate,
proj_drop_rate=proj_drop_rate,
init_cfg=None)
self.drop = build_dropout(dropout_layer)
def forward(self, query, hw_shape):
B, L, C = query.shape
H, W = hw_shape
assert L == H * W, 'input feature has wrong size'
query = query.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b))
H_pad, W_pad = query.shape[1], query.shape[2]
# cyclic shift
if self.shift_size > 0:
shifted_query = torch.roll(
query,
shifts=(-self.shift_size, -self.shift_size),
dims=(1, 2))
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device)
h_slices = (slice(0, -self.window_size),
slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
# nW, window_size, window_size, 1
mask_windows = self.window_partition(img_mask)
mask_windows = mask_windows.view(
-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0,
float(-100.0)).masked_fill(
attn_mask == 0, float(0.0))
else:
shifted_query = query
attn_mask = None
# nW*B, window_size, window_size, C
query_windows = self.window_partition(shifted_query)
# nW*B, window_size*window_size, C
query_windows = query_windows.view(-1, self.window_size**2, C)
# W-MSA/SW-MSA (nW*B, window_size*window_size, C)
attn_windows = self.w_msa(query_windows, mask=attn_mask)
# merge windows
attn_windows = attn_windows.view(-1, self.window_size,
self.window_size, C)
# B H' W' C
shifted_x = self.window_reverse(attn_windows, H_pad, W_pad)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(
shifted_x,
shifts=(self.shift_size, self.shift_size),
dims=(1, 2))
else:
x = shifted_x
if pad_r > 0 or pad_b:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
x = self.drop(x)
return x
def window_reverse(self, windows, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
window_size = self.window_size
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size,
window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
def window_partition(self, x):
"""
Args:
x: (B, H, W, C)
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
window_size = self.window_size
x = x.view(B, H // window_size, window_size, W // window_size,
window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous()
windows = windows.view(-1, window_size, window_size, C)
return windows
class SwinBlock(BaseModule):
""""
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
window_size (int, optional): The local window scale. Default: 7.
shift (bool, optional): whether to shift window or not. Default False.
qkv_bias (bool, optional): enable bias for qkv if True. Default: True.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
drop_rate (float, optional): Dropout rate. Default: 0.
attn_drop_rate (float, optional): Attention dropout rate. Default: 0.
drop_path_rate (float, optional): Stochastic depth rate. Default: 0.
act_cfg (dict, optional): The config dict of activation function.
Default: dict(type='GELU').
norm_cfg (dict, optional): The config dict of normalization.
Default: dict(type='LN').
with_cp (bool, optional): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
init_cfg (dict | list | None, optional): The init config.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
window_size=7,
shift=False,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
with_cp=False,
init_cfg=None):
super(SwinBlock, self).__init__()
self.init_cfg = init_cfg
self.with_cp = with_cp
self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]
self.attn = ShiftWindowMSA(
embed_dims=embed_dims,
num_heads=num_heads,
window_size=window_size,
shift_size=window_size // 2 if shift else 0,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop_rate=attn_drop_rate,
proj_drop_rate=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
init_cfg=None)
self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1]
self.ffn = FFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
num_fcs=2,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
act_cfg=act_cfg,
add_identity=True,
init_cfg=None)
def forward(self, x, hw_shape):
def _inner_forward(x):
identity = x
x = self.norm1(x)
x = self.attn(x, hw_shape)
x = x + identity
identity = x
x = self.norm2(x)
x = self.ffn(x, identity=identity)
return x
if self.with_cp and x.requires_grad:
x = cp.checkpoint(_inner_forward, x)
else:
x = _inner_forward(x)
return x
class SwinBlockSequence(BaseModule):
"""Implements one stage in Swin Transformer.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
depth (int): The number of blocks in this stage.
window_size (int, optional): The local window scale. Default: 7.
qkv_bias (bool, optional): enable bias for qkv if True. Default: True.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
drop_rate (float, optional): Dropout rate. Default: 0.
attn_drop_rate (float, optional): Attention dropout rate. Default: 0.
drop_path_rate (float | list[float], optional): Stochastic depth
rate. Default: 0.
downsample (BaseModule | None, optional): The downsample operation
module. Default: None.
act_cfg (dict, optional): The config dict of activation function.
Default: dict(type='GELU').
norm_cfg (dict, optional): The config dict of normalization.
Default: dict(type='LN').
with_cp (bool, optional): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
init_cfg (dict | list | None, optional): The init config.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
depth,
window_size=7,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
downsample=None,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
with_cp=False,
init_cfg=None):
super().__init__(init_cfg=init_cfg)
if isinstance(drop_path_rate, list):
drop_path_rates = drop_path_rate
assert len(drop_path_rates) == depth
else:
drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)]
self.blocks = ModuleList()
for i in range(depth):
block = SwinBlock(
embed_dims=embed_dims,
num_heads=num_heads,
feedforward_channels=feedforward_channels,
window_size=window_size,
shift=False if i % 2 == 0 else True,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=drop_path_rates[i],
act_cfg=act_cfg,
norm_cfg=norm_cfg,
with_cp=with_cp,
init_cfg=None)
self.blocks.append(block)
self.downsample = downsample
def forward(self, x, hw_shape):
for block in self.blocks:
x = block(x, hw_shape)
if self.downsample:
x_down, down_hw_shape = self.downsample(x, hw_shape)
return x_down, down_hw_shape, x, hw_shape
else:
return x, hw_shape, x, hw_shape
@BACKBONES.register_module()
class SwinTransformer(BaseModule):
""" Swin Transformer
A PyTorch implement of : `Swin Transformer:
Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/abs/2103.14030
Inspiration from
https://github.com/microsoft/Swin-Transformer
Args:
pretrain_img_size (int | tuple[int]): The size of input image when
pretrain. Defaults: 224.
in_channels (int): The num of input channels.
Defaults: 3.
embed_dims (int): The feature dimension. Default: 96.
patch_size (int | tuple[int]): Patch size. Default: 4.
window_size (int): Window size. Default: 7.
mlp_ratio (int): Ratio of mlp hidden dim to embedding dim.
Default: 4.
depths (tuple[int]): Depths of each Swin Transformer stage.
Default: (2, 2, 6, 2).
num_heads (tuple[int]): Parallel attention heads of each Swin
Transformer stage. Default: (3, 6, 12, 24).
strides (tuple[int]): The patch merging or patch embedding stride of
each Swin Transformer stage. (In swin, we set kernel size equal to
stride.) Default: (4, 2, 2, 2).
out_indices (tuple[int]): Output from which stages.
Default: (0, 1, 2, 3).
qkv_bias (bool, optional): If True, add a learnable bias to query, key,
value. Default: True
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
patch_norm (bool): If add a norm layer for patch embed and patch
merging. Default: True.
drop_rate (float): Dropout rate. Defaults: 0.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Defaults: 0.1.
use_abs_pos_embed (bool): If True, add absolute position embedding to
the patch embedding. Defaults: False.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LN').
norm_cfg (dict): Config dict for normalization layer at
output of backone. Defaults: dict(type='LN').
with_cp (bool, optional): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
pretrained (str, optional): model pretrained path. Default: None.
convert_weights (bool): The flag indicates whether the
pre-trained model is from the original repo. We may need
to convert some keys to make it compatible.
Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
init_cfg (dict, optional): The Config for initialization.
Defaults to None.
"""
def __init__(self,
pretrain_img_size=224,
in_channels=3,
embed_dims=96,
patch_size=4,
window_size=7,
mlp_ratio=4,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
strides=(4, 2, 2, 2),
out_indices=(0, 1, 2, 3),
qkv_bias=True,
qk_scale=None,
patch_norm=True,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
use_abs_pos_embed=False,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
with_cp=False,
pretrained=None,
convert_weights=False,
frozen_stages=-1,
init_cfg=None):
self.convert_weights = convert_weights
self.frozen_stages = frozen_stages
if isinstance(pretrain_img_size, int):
pretrain_img_size = to_2tuple(pretrain_img_size)
elif isinstance(pretrain_img_size, tuple):
if len(pretrain_img_size) == 1:
pretrain_img_size = to_2tuple(pretrain_img_size[0])
assert len(pretrain_img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(pretrain_img_size)}'
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = init_cfg
else:
raise TypeError('pretrained must be a str or None')
super(SwinTransformer, self).__init__(init_cfg=init_cfg)
num_layers = len(depths)
self.out_indices = out_indices
self.use_abs_pos_embed = use_abs_pos_embed
assert strides[0] == patch_size, 'Use non-overlapping patch embed.'
self.patch_embed = PatchEmbed(
in_channels=in_channels,
embed_dims=embed_dims,
conv_type='Conv2d',
kernel_size=patch_size,
stride=strides[0],
norm_cfg=norm_cfg if patch_norm else None,
init_cfg=None)
if self.use_abs_pos_embed:
patch_row = pretrain_img_size[0] // patch_size
patch_col = pretrain_img_size[1] // patch_size
num_patches = patch_row * patch_col
self.absolute_pos_embed = nn.Parameter(
torch.zeros((1, num_patches, embed_dims)))
self.drop_after_pos = nn.Dropout(p=drop_rate)
# set stochastic depth decay rule
total_depth = sum(depths)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, total_depth)
]
self.stages = ModuleList()
in_channels = embed_dims
for i in range(num_layers):
if i < num_layers - 1:
downsample = PatchMerging(
in_channels=in_channels,
out_channels=2 * in_channels,
stride=strides[i + 1],
norm_cfg=norm_cfg if patch_norm else None,
init_cfg=None)
else:
downsample = None
stage = SwinBlockSequence(
embed_dims=in_channels,
num_heads=num_heads[i],
feedforward_channels=mlp_ratio * in_channels,
depth=depths[i],
window_size=window_size,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])],
downsample=downsample,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
with_cp=with_cp,
init_cfg=None)
self.stages.append(stage)
if downsample:
in_channels = downsample.out_channels
self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)]
# Add a norm layer for each output
for i in out_indices:
layer = build_norm_layer(norm_cfg, self.num_features[i])[1]
layer_name = f'norm{i}'
self.add_module(layer_name, layer)
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer, self).train(mode)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.use_abs_pos_embed:
self.absolute_pos_embed.requires_grad = False
self.drop_after_pos.eval()
for i in range(1, self.frozen_stages + 1):
if (i - 1) in self.out_indices:
norm_layer = getattr(self, f'norm{i-1}')
norm_layer.eval()
for param in norm_layer.parameters():
param.requires_grad = False
m = self.stages[i - 1]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
logger = get_root_logger()
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
f'training start from scratch')
if self.use_abs_pos_embed:
trunc_normal_init(self.absolute_pos_embed, std=0.02)
for m in self.modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m.weight, std=.02)
if m.bias is not None:
constant_init(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
constant_init(m.bias, 0)
constant_init(m.weight, 1.0)
else:
assert 'checkpoint' in self.init_cfg, f'Only support ' \
f'specify `Pretrained` in ' \
f'`init_cfg` in ' \
f'{self.__class__.__name__} '
ckpt = _load_checkpoint(
self.init_cfg.checkpoint, logger=logger, map_location='cpu')
if 'state_dict' in ckpt:
_state_dict = ckpt['state_dict']
elif 'model' in ckpt:
_state_dict = ckpt['model']
else:
_state_dict = ckpt
if self.convert_weights:
# supported loading weight from original repo,
_state_dict = swin_converter(_state_dict)
state_dict = OrderedDict()
for k, v in _state_dict.items():
if k.startswith('backbone.'):
state_dict[k[9:]] = v
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# reshape absolute position embedding
if state_dict.get('absolute_pos_embed') is not None:
absolute_pos_embed = state_dict['absolute_pos_embed']
N1, L, C1 = absolute_pos_embed.size()
N2, C2, H, W = self.absolute_pos_embed.size()
if N1 != N2 or C1 != C2 or L != H * W:
logger.warning('Error in loading absolute_pos_embed, pass')
else:
state_dict['absolute_pos_embed'] = absolute_pos_embed.view(
N2, H, W, C2).permute(0, 3, 1, 2).contiguous()
# interpolate position bias table if needed
relative_position_bias_table_keys = [
k for k in state_dict.keys()
if 'relative_position_bias_table' in k
]
for table_key in relative_position_bias_table_keys:
table_pretrained = state_dict[table_key]
table_current = self.state_dict()[table_key]
L1, nH1 = table_pretrained.size()
L2, nH2 = table_current.size()
if nH1 != nH2:
logger.warning(f'Error in loading {table_key}, pass')
elif L1 != L2:
S1 = int(L1**0.5)
S2 = int(L2**0.5)
table_pretrained_resized = F.interpolate(
table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1),
size=(S2, S2),
mode='bicubic')
state_dict[table_key] = table_pretrained_resized.view(
nH2, L2).permute(1, 0).contiguous()
# load state_dict
self.load_state_dict(state_dict, False)
def forward(self, x):
x, hw_shape = self.patch_embed(x)
if self.use_abs_pos_embed:
x = x + self.absolute_pos_embed
x = self.drop_after_pos(x)
outs = []
for i, stage in enumerate(self.stages):
x, hw_shape, out, out_hw_shape = stage(x, hw_shape)
if i in self.out_indices:
norm_layer = getattr(self, f'norm{i}')
out = norm_layer(out)
out = out.view(-1, *out_hw_shape,
self.num_features[i]).permute(0, 3, 1,
2).contiguous()
outs.append(out)
return outs
| 30,173 | 38.443137 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/trident_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import BaseModule
from torch.nn.modules.utils import _pair
from mmdet.models.backbones.resnet import Bottleneck, ResNet
from mmdet.models.builder import BACKBONES
class TridentConv(BaseModule):
"""Trident Convolution Module.
Args:
in_channels (int): Number of channels in input.
out_channels (int): Number of channels in output.
kernel_size (int): Size of convolution kernel.
stride (int, optional): Convolution stride. Default: 1.
trident_dilations (tuple[int, int, int], optional): Dilations of
different trident branch. Default: (1, 2, 3).
test_branch_idx (int, optional): In inference, all 3 branches will
be used if `test_branch_idx==-1`, otherwise only branch with
index `test_branch_idx` will be used. Default: 1.
bias (bool, optional): Whether to use bias in convolution or not.
Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
trident_dilations=(1, 2, 3),
test_branch_idx=1,
bias=False,
init_cfg=None):
super(TridentConv, self).__init__(init_cfg)
self.num_branch = len(trident_dilations)
self.with_bias = bias
self.test_branch_idx = test_branch_idx
self.stride = _pair(stride)
self.kernel_size = _pair(kernel_size)
self.paddings = _pair(trident_dilations)
self.dilations = trident_dilations
self.in_channels = in_channels
self.out_channels = out_channels
self.bias = bias
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
def extra_repr(self):
tmpstr = f'in_channels={self.in_channels}'
tmpstr += f', out_channels={self.out_channels}'
tmpstr += f', kernel_size={self.kernel_size}'
tmpstr += f', num_branch={self.num_branch}'
tmpstr += f', test_branch_idx={self.test_branch_idx}'
tmpstr += f', stride={self.stride}'
tmpstr += f', paddings={self.paddings}'
tmpstr += f', dilations={self.dilations}'
tmpstr += f', bias={self.bias}'
return tmpstr
def forward(self, inputs):
if self.training or self.test_branch_idx == -1:
outputs = [
F.conv2d(input, self.weight, self.bias, self.stride, padding,
dilation) for input, dilation, padding in zip(
inputs, self.dilations, self.paddings)
]
else:
assert len(inputs) == 1
outputs = [
F.conv2d(inputs[0], self.weight, self.bias, self.stride,
self.paddings[self.test_branch_idx],
self.dilations[self.test_branch_idx])
]
return outputs
# Since TridentNet is defined over ResNet50 and ResNet101, here we
# only support TridentBottleneckBlock.
class TridentBottleneck(Bottleneck):
"""BottleBlock for TridentResNet.
Args:
trident_dilations (tuple[int, int, int]): Dilations of different
trident branch.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
concat_output (bool): Whether to concat the output list to a Tensor.
`True` only in the last Block.
"""
def __init__(self, trident_dilations, test_branch_idx, concat_output,
**kwargs):
super(TridentBottleneck, self).__init__(**kwargs)
self.trident_dilations = trident_dilations
self.num_branch = len(trident_dilations)
self.concat_output = concat_output
self.test_branch_idx = test_branch_idx
self.conv2 = TridentConv(
self.planes,
self.planes,
kernel_size=3,
stride=self.conv2_stride,
bias=False,
trident_dilations=self.trident_dilations,
test_branch_idx=test_branch_idx,
init_cfg=dict(
type='Kaiming',
distribution='uniform',
mode='fan_in',
override=dict(name='conv2')))
def forward(self, x):
def _inner_forward(x):
num_branch = (
self.num_branch
if self.training or self.test_branch_idx == -1 else 1)
identity = x
if not isinstance(x, list):
x = (x, ) * num_branch
identity = x
if self.downsample is not None:
identity = [self.downsample(b) for b in x]
out = [self.conv1(b) for b in x]
out = [self.norm1(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv1_plugin_names)
out = self.conv2(out)
out = [self.norm2(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv2_plugin_names)
out = [self.conv3(b) for b in out]
out = [self.norm3(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv3_plugin_names)
out = [
out_b + identity_b for out_b, identity_b in zip(out, identity)
]
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = [self.relu(b) for b in out]
if self.concat_output:
out = torch.cat(out, dim=0)
return out
def make_trident_res_layer(block,
inplanes,
planes,
num_blocks,
stride=1,
trident_dilations=(1, 2, 3),
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
test_branch_idx=-1):
"""Build Trident Res Layers."""
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
for i in range(num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride if i == 0 else 1,
trident_dilations=trident_dilations,
downsample=downsample if i == 0 else None,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=plugins,
test_branch_idx=test_branch_idx,
concat_output=True if i == num_blocks - 1 else False))
inplanes = planes * block.expansion
return nn.Sequential(*layers)
@BACKBONES.register_module()
class TridentResNet(ResNet):
"""The stem layer, stage 1 and stage 2 in Trident ResNet are identical to
ResNet, while in stage 3, Trident BottleBlock is utilized to replace the
normal BottleBlock to yield trident output. Different branch shares the
convolution weight but uses different dilations to achieve multi-scale
output.
/ stage3(b0) \
x - stem - stage1 - stage2 - stage3(b1) - output
\ stage3(b2) /
Args:
depth (int): Depth of resnet, from {50, 101, 152}.
num_branch (int): Number of branches in TridentNet.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
trident_dilations (tuple[int]): Dilations of different trident branch.
len(trident_dilations) should be equal to num_branch.
""" # noqa
def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,
**kwargs):
assert num_branch == len(trident_dilations)
assert depth in (50, 101, 152)
super(TridentResNet, self).__init__(depth, **kwargs)
assert self.num_stages == 3
self.test_branch_idx = test_branch_idx
self.num_branch = num_branch
last_stage_idx = self.num_stages - 1
stride = self.strides[last_stage_idx]
dilation = trident_dilations
dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None
if self.plugins is not None:
stage_plugins = self.make_stage_plugins(self.plugins,
last_stage_idx)
else:
stage_plugins = None
planes = self.base_channels * 2**last_stage_idx
res_layer = make_trident_res_layer(
TridentBottleneck,
inplanes=(self.block.expansion * self.base_channels *
2**(last_stage_idx - 1)),
planes=planes,
num_blocks=self.stage_blocks[last_stage_idx],
stride=stride,
trident_dilations=dilation,
style=self.style,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
plugins=stage_plugins,
test_branch_idx=self.test_branch_idx)
layer_name = f'layer{last_stage_idx + 1}'
self.__setattr__(layer_name, res_layer)
self.res_layers.pop(last_stage_idx)
self.res_layers.insert(last_stage_idx, layer_name)
self._freeze_stages()
| 11,129 | 36.22408 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/detectors_resnext.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from .detectors_resnet import Bottleneck as _Bottleneck
from .detectors_resnet import DetectoRS_ResNet
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
**kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_sac:
self.conv2 = build_conv_layer(
self.sac,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
elif not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
@BACKBONES.register_module()
class DetectoRS_ResNeXt(DetectoRS_ResNet):
"""ResNeXt backbone for DetectoRS.
Args:
groups (int): The number of groups in ResNeXt.
base_width (int): The base width of ResNeXt.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(DetectoRS_ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
return super().make_res_layer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
| 3,920 | 30.620968 | 77 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from ..utils import ResLayer
class BasicBlock(BaseModule):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_cfg=None):
super(BasicBlock, self).__init__(init_cfg)
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Bottleneck(BaseModule):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_cfg=None):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(init_cfg)
assert style in ['pytorch', 'caffe']
assert dcn is None or isinstance(dcn, dict)
assert plugins is None or isinstance(plugins, list)
if plugins is not None:
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
assert all(p['position'] in allowed_position for p in plugins)
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = dcn is not None
self.plugins = plugins
self.with_plugins = plugins is not None
if self.with_plugins:
# collect plugins for conv1/conv2/conv3
self.after_conv1_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv1'
]
self.after_conv2_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv2'
]
self.after_conv3_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv3'
]
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
dcn,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_plugins:
self.after_conv1_plugin_names = self.make_block_plugins(
planes, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(
planes, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins(
planes * self.expansion, self.after_conv3_plugins)
def make_block_plugins(self, in_channels, plugins):
"""make plugins for block.
Args:
in_channels (int): Input channels of plugin.
plugins (list[dict]): List of plugins cfg to build.
Returns:
list[str]: List of the names of plugin.
"""
assert isinstance(plugins, list)
plugin_names = []
for plugin in plugins:
plugin = plugin.copy()
name, layer = build_plugin_layer(
plugin,
in_channels=in_channels,
postfix=plugin.pop('postfix', ''))
assert not hasattr(self, name), f'duplicate plugin {name}'
self.add_module(name, layer)
plugin_names.append(name)
return plugin_names
def forward_plugin(self, x, plugin_names):
out = x
for name in plugin_names:
out = getattr(self, name)(x)
return out
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
@property
def norm3(self):
"""nn.Module: normalization layer after the third convolution layer"""
return getattr(self, self.norm3_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNet(BaseModule):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
stem_channels (int | None): Number of stem channels. If not specified,
it will be the same as `base_channels`. Default: None.
base_channels (int): Number of base channels of res layer. Default: 64.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Resnet stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
stem_channels=None,
base_channels=64,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=None,
stage_with_dcn=(False, False, False, False),
plugins=None,
with_cp=False,
zero_init_residual=True,
pretrained=None,
init_cfg=None):
super(ResNet, self).__init__(init_cfg)
self.zero_init_residual = zero_init_residual
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
block_init_cfg = None
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
block = self.arch_settings[depth][0]
if self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant',
val=0,
override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant',
val=0,
override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.depth = depth
if stem_channels is None:
stem_channels = base_channels
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.plugins = plugins
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
if plugins is not None:
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
planes = base_channels * 2**i
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=planes,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=stage_plugins,
init_cfg=block_init_cfg)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * base_channels * 2**(
len(self.stage_blocks) - 1)
def make_stage_plugins(self, plugins, stage_idx):
"""Make plugins for ResNet ``stage_idx`` th stage.
Currently we support to insert ``context_block``,
``empirical_attention_block``, ``nonlocal_block`` into the backbone
like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
Bottleneck.
An example of plugins format could be:
Examples:
>>> plugins=[
... dict(cfg=dict(type='xxx', arg1='xxx'),
... stages=(False, True, True, True),
... position='after_conv2'),
... dict(cfg=dict(type='yyy'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='1'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='2'),
... stages=(True, True, True, True),
... position='after_conv3')
... ]
>>> self = ResNet(depth=18)
>>> stage_plugins = self.make_stage_plugins(plugins, 0)
>>> assert len(stage_plugins) == 3
Suppose ``stage_idx=0``, the structure of blocks in the stage would be:
.. code-block:: none
conv1-> conv2->conv3->yyy->zzz1->zzz2
Suppose 'stage_idx=1', the structure of blocks in the stage would be:
.. code-block:: none
conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
If stages is missing, the plugin would be applied to all stages.
Args:
plugins (list[dict]): List of plugins cfg to build. The postfix is
required if multiple same type plugins are inserted.
stage_idx (int): Index of stage to build
Returns:
list[dict]: Plugins for current stage
"""
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert stages is None or len(stages) == self.num_stages
# whether to insert plugin into current stage
if stages is None or stages[stage_idx]:
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``."""
return ResLayer(**kwargs)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
if self.deep_stem:
self.stem = nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels)[1],
nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
"""Forward function."""
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
freezed."""
super(ResNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
@BACKBONES.register_module()
class ResNetV1d(ResNet):
r"""ResNetV1d variant described in `Bag of Tricks
<https://arxiv.org/pdf/1812.01187.pdf>`_.
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
"""
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(
deep_stem=True, avg_down=True, **kwargs)
| 23,838 | 34.421991 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/detectors_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
kaiming_init)
from mmcv.runner import Sequential, load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
from .resnet import BasicBlock
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottleneck(_Bottleneck):
r"""Bottleneck for the ResNet backbone in `DetectoRS
<https://arxiv.org/pdf/2006.02334.pdf>`_.
This bottleneck allows the users to specify whether to use
SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).
Args:
inplanes (int): The number of input channels.
planes (int): The number of output channels before expansion.
rfp_inplanes (int, optional): The number of channels from RFP.
Default: None. If specified, an additional conv layer will be
added for ``rfp_feat``. Otherwise, the structure is the same as
base class.
sac (dict, optional): Dictionary to construct SAC. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
expansion = 4
def __init__(self,
inplanes,
planes,
rfp_inplanes=None,
sac=None,
init_cfg=None,
**kwargs):
super(Bottleneck, self).__init__(
inplanes, planes, init_cfg=init_cfg, **kwargs)
assert sac is None or isinstance(sac, dict)
self.sac = sac
self.with_sac = sac is not None
if self.with_sac:
self.conv2 = build_conv_layer(
self.sac,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False)
self.rfp_inplanes = rfp_inplanes
if self.rfp_inplanes:
self.rfp_conv = build_conv_layer(
None,
self.rfp_inplanes,
planes * self.expansion,
1,
stride=1,
bias=True)
if init_cfg is None:
self.init_cfg = dict(
type='Constant', val=0, override=dict(name='rfp_conv'))
def rfp_forward(self, x, rfp_feat):
"""The forward function that also takes the RFP features as input."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
if self.rfp_inplanes:
rfp_feat = self.rfp_conv(rfp_feat)
out = out + rfp_feat
out = self.relu(out)
return out
class ResLayer(Sequential):
"""ResLayer to build ResNet style backbone for RPF in detectoRS.
The difference between this module and base class is that we pass
``rfp_inplanes`` to the first block.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
downsample_first (bool): Downsample at the first block or last block.
False for Hourglass, True for ResNet. Default: True
rfp_inplanes (int, optional): The number of channels from RFP.
Default: None. If specified, an additional conv layer will be
added for ``rfp_feat``. Otherwise, the structure is the same as
base class.
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
downsample_first=True,
rfp_inplanes=None,
**kwargs):
self.block = block
assert downsample_first, f'downsample_first={downsample_first} is ' \
'not supported in DetectoRS'
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down and stride != 1:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
rfp_inplanes=rfp_inplanes,
**kwargs))
inplanes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
@BACKBONES.register_module()
class DetectoRS_ResNet(ResNet):
"""ResNet backbone for DetectoRS.
Args:
sac (dict, optional): Dictionary to construct SAC (Switchable Atrous
Convolution). Default: None.
stage_with_sac (list): Which stage to use sac. Default: (False, False,
False, False).
rfp_inplanes (int, optional): The number of channels from RFP.
Default: None. If specified, an additional conv layer will be
added for ``rfp_feat``. Otherwise, the structure is the same as
base class.
output_img (bool): If ``True``, the input image will be inserted into
the starting position of output. Default: False.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
sac=None,
stage_with_sac=(False, False, False, False),
rfp_inplanes=None,
output_img=False,
pretrained=None,
init_cfg=None,
**kwargs):
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
self.pretrained = pretrained
if init_cfg is not None:
assert isinstance(init_cfg, dict), \
f'init_cfg must be a dict, but got {type(init_cfg)}'
if 'type' in init_cfg:
assert init_cfg.get('type') == 'Pretrained', \
'Only can initialize module by loading a pretrained model'
else:
raise KeyError('`init_cfg` must contain the key "type"')
self.pretrained = init_cfg.get('checkpoint')
self.sac = sac
self.stage_with_sac = stage_with_sac
self.rfp_inplanes = rfp_inplanes
self.output_img = output_img
super(DetectoRS_ResNet, self).__init__(**kwargs)
self.inplanes = self.stem_channels
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
sac = self.sac if self.stage_with_sac[i] else None
if self.plugins is not None:
stage_plugins = self.make_stage_plugins(self.plugins, i)
else:
stage_plugins = None
planes = self.base_channels * 2**i
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=planes,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
sac=sac,
rfp_inplanes=rfp_inplanes if i > 0 else None,
plugins=stage_plugins)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
# In order to be properly initialized by RFP
def init_weights(self):
# Calling this method will cause parameter initialization exception
# super(DetectoRS_ResNet, self).init_weights()
if isinstance(self.pretrained, str):
logger = get_root_logger()
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottleneck) and hasattr(
m.conv2, 'conv_offset'):
constant_init(m.conv2.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer`` for DetectoRS."""
return ResLayer(**kwargs)
def forward(self, x):
"""Forward function."""
outs = list(super(DetectoRS_ResNet, self).forward(x))
if self.output_img:
outs.insert(0, x)
return tuple(outs)
def rfp_forward(self, x, rfp_feats):
"""Forward function for RFP."""
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
rfp_feat = rfp_feats[i] if i > 0 else None
for layer in res_layer:
x = layer.rfp_forward(x, rfp_feat)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 12,736 | 34.980226 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/ssd_vgg.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import VGG
from mmcv.runner import BaseModule
from ..builder import BACKBONES
from ..necks import ssd_neck
@BACKBONES.register_module()
class SSDVGG(VGG, BaseModule):
"""VGG Backbone network for single-shot-detection.
Args:
depth (int): Depth of vgg, from {11, 13, 16, 19}.
with_last_pool (bool): Whether to add a pooling layer at the last
of the model
ceil_mode (bool): When True, will use `ceil` instead of `floor`
to compute the output shape.
out_indices (Sequence[int]): Output from which stages.
out_feature_indices (Sequence[int]): Output from which feature map.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
input_size (int, optional): Deprecated argumment.
Width and height of input, from {300, 512}.
l2_norm_scale (float, optional) : Deprecated argumment.
L2 normalization layer init scale.
Example:
>>> self = SSDVGG(input_size=300, depth=11)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 300, 300)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 19, 19)
(1, 512, 10, 10)
(1, 256, 5, 5)
(1, 256, 3, 3)
(1, 256, 1, 1)
"""
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
pretrained=None,
init_cfg=None,
input_size=None,
l2_norm_scale=None):
# TODO: in_channels for mmcv.VGG
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if init_cfg is not None:
self.init_cfg = init_cfg
elif isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
dict(type='Normal', std=0.01, layer='Linear'),
]
else:
raise TypeError('pretrained must be a str or None')
if input_size is not None:
warnings.warn('DeprecationWarning: input_size is deprecated')
if l2_norm_scale is not None:
warnings.warn('DeprecationWarning: l2_norm_scale in VGG is '
'deprecated, it has been moved to SSDNeck.')
def init_weights(self, pretrained=None):
super(VGG, self).init_weights()
def forward(self, x):
"""Forward function."""
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py '
'is deprecated, please use L2Norm in '
'mmdet/models/necks/ssd_neck.py instead')
| 4,705 | 35.48062 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/resnext.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from ..utils import ResLayer
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
**kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
if self.with_plugins:
self._del_block_plugins(self.after_conv1_plugin_names +
self.after_conv2_plugin_names +
self.after_conv3_plugin_names)
self.after_conv1_plugin_names = self.make_block_plugins(
width, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(
width, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins(
self.planes * self.expansion, self.after_conv3_plugins)
def _del_block_plugins(self, plugin_names):
"""delete plugins for block if exist.
Args:
plugin_names (list[str]): List of plugins name to delete.
"""
assert isinstance(plugin_names, list)
for plugin_name in plugin_names:
del self._modules[plugin_name]
@BACKBONES.register_module()
class ResNeXt(ResNet):
"""ResNeXt backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Resnet stages. Default: 4.
groups (int): Group of resnext.
base_width (int): Base width of resnext.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``"""
return ResLayer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
| 5,712 | 35.858065 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/resnest.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import BaseModule
from ..builder import BACKBONES
from ..utils import ResLayer
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNetV1d
class RSoftmax(nn.Module):
"""Radix Softmax module in ``SplitAttentionConv2d``.
Args:
radix (int): Radix of input.
groups (int): Groups of input.
"""
def __init__(self, radix, groups):
super().__init__()
self.radix = radix
self.groups = groups
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
class SplitAttentionConv2d(BaseModule):
"""Split-Attention Conv2d in ResNeSt.
Args:
in_channels (int): Number of channels in the input feature map.
channels (int): Number of intermediate channels.
kernel_size (int | tuple[int]): Size of the convolution kernel.
stride (int | tuple[int]): Stride of the convolution.
padding (int | tuple[int]): Zero-padding added to both sides of
dilation (int | tuple[int]): Spacing between kernel elements.
groups (int): Number of blocked connections from input channels to
output channels.
groups (int): Same as nn.Conv2d.
radix (int): Radix of SpltAtConv2d. Default: 2
reduction_factor (int): Reduction factor of inter_channels. Default: 4.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer. Default: None.
dcn (dict): Config dict for DCN. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
radix=2,
reduction_factor=4,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
init_cfg=None):
super(SplitAttentionConv2d, self).__init__(init_cfg)
inter_channels = max(in_channels * radix // reduction_factor, 32)
self.radix = radix
self.groups = groups
self.channels = channels
self.with_dcn = dcn is not None
self.dcn = dcn
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_dcn and not fallback_on_stride:
assert conv_cfg is None, 'conv_cfg must be None for DCN'
conv_cfg = dcn
self.conv = build_conv_layer(
conv_cfg,
in_channels,
channels * radix,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups * radix,
bias=False)
# To be consistent with original implementation, starting from 0
self.norm0_name, norm0 = build_norm_layer(
norm_cfg, channels * radix, postfix=0)
self.add_module(self.norm0_name, norm0)
self.relu = nn.ReLU(inplace=True)
self.fc1 = build_conv_layer(
None, channels, inter_channels, 1, groups=self.groups)
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, inter_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.fc2 = build_conv_layer(
None, inter_channels, channels * radix, 1, groups=self.groups)
self.rsoftmax = RSoftmax(radix, groups)
@property
def norm0(self):
"""nn.Module: the normalization layer named "norm0" """
return getattr(self, self.norm0_name)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def forward(self, x):
x = self.conv(x)
x = self.norm0(x)
x = self.relu(x)
batch, rchannel = x.shape[:2]
batch = x.size(0)
if self.radix > 1:
splits = x.view(batch, self.radix, -1, *x.shape[2:])
gap = splits.sum(dim=1)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
gap = self.norm1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
if self.radix > 1:
attens = atten.view(batch, self.radix, -1, *atten.shape[2:])
out = torch.sum(attens * splits, dim=1)
else:
out = atten * x
return out.contiguous()
class Bottleneck(_Bottleneck):
"""Bottleneck block for ResNeSt.
Args:
inplane (int): Input planes of this block.
planes (int): Middle planes of this block.
groups (int): Groups of conv2.
base_width (int): Base of width in terms of base channels. Default: 4.
base_channels (int): Base of channels for calculating width.
Default: 64.
radix (int): Radix of SpltAtConv2d. Default: 2
reduction_factor (int): Reduction factor of inter_channels in
SplitAttentionConv2d. Default: 4.
avg_down_stride (bool): Whether to use average pool for stride in
Bottleneck. Default: True.
kwargs (dict): Key word arguments for base class.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
radix=2,
reduction_factor=4,
avg_down_stride=True,
**kwargs):
"""Bottleneck block for ResNeSt."""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.avg_down_stride = avg_down_stride and self.conv2_stride > 1
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
self.with_modulated_dcn = False
self.conv2 = SplitAttentionConv2d(
width,
width,
kernel_size=3,
stride=1 if self.avg_down_stride else self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
radix=radix,
reduction_factor=reduction_factor,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=self.dcn)
delattr(self, self.norm2_name)
if self.avg_down_stride:
self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
if self.avg_down_stride:
out = self.avd_layer(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNeSt(ResNetV1d):
"""ResNeSt backbone.
Args:
groups (int): Number of groups of Bottleneck. Default: 1
base_width (int): Base width of Bottleneck. Default: 4
radix (int): Radix of SplitAttentionConv2d. Default: 2
reduction_factor (int): Reduction factor of inter_channels in
SplitAttentionConv2d. Default: 4.
avg_down_stride (bool): Whether to use average pool for stride in
Bottleneck. Default: True.
kwargs (dict): Keyword arguments for ResNet.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3)),
200: (Bottleneck, (3, 24, 36, 3))
}
def __init__(self,
groups=1,
base_width=4,
radix=2,
reduction_factor=4,
avg_down_stride=True,
**kwargs):
self.groups = groups
self.base_width = base_width
self.radix = radix
self.reduction_factor = reduction_factor
self.avg_down_stride = avg_down_stride
super(ResNeSt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``."""
return ResLayer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
radix=self.radix,
reduction_factor=self.reduction_factor,
avg_down_stride=self.avg_down_stride,
**kwargs)
| 10,579 | 31.755418 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/csp_darknet.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from ..utils import CSPLayer
class Focus(nn.Module):
"""Focus width and height information into channel space.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
kernel_size (int): The kernel size of the convolution. Default: 1
stride (int): The stride of the convolution. Default: 1
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish').
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish')):
super().__init__()
self.conv = ConvModule(
in_channels * 4,
out_channels,
kernel_size,
stride,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x):
# shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)
patch_top_left = x[..., ::2, ::2]
patch_top_right = x[..., ::2, 1::2]
patch_bot_left = x[..., 1::2, ::2]
patch_bot_right = x[..., 1::2, 1::2]
x = torch.cat(
(
patch_top_left,
patch_bot_left,
patch_top_right,
patch_bot_right,
),
dim=1,
)
return self.conv(x)
class SPPBottleneck(BaseModule):
"""Spatial pyramid pooling layer used in YOLOv3-SPP.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
kernel_sizes (tuple[int]): Sequential of kernel sizes of pooling
layers. Default: (5, 9, 13).
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish').
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
kernel_sizes=(5, 9, 13),
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
init_cfg=None):
super().__init__(init_cfg)
mid_channels = in_channels // 2
self.conv1 = ConvModule(
in_channels,
mid_channels,
1,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.poolings = nn.ModuleList([
nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)
for ks in kernel_sizes
])
conv2_channels = mid_channels * (len(kernel_sizes) + 1)
self.conv2 = ConvModule(
conv2_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x):
x = self.conv1(x)
x = torch.cat([x] + [pooling(x) for pooling in self.poolings], dim=1)
x = self.conv2(x)
return x
@BACKBONES.register_module()
class CSPDarknet(BaseModule):
"""CSP-Darknet backbone used in YOLOv5 and YOLOX.
Args:
arch (str): Architechture of CSP-Darknet, from {P5, P6}.
Default: P5.
deepen_factor (float): Depth multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
widen_factor (float): Width multiplier, multiply number of
blocks in CSP layer by this amount. Default: 1.0.
out_indices (Sequence[int]): Output from which stages.
Default: (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Default: -1.
use_depthwise (bool): Whether to use depthwise separable convolution.
Default: False.
arch_ovewrite(list): Overwrite default arch settings. Default: None.
spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP
layers. Default: (5, 9, 13).
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True).
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
Example:
>>> from mmdet.models import CSPDarknet
>>> import torch
>>> self = CSPDarknet(depth=53)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 416, 416)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
...
(1, 256, 52, 52)
(1, 512, 26, 26)
(1, 1024, 13, 13)
"""
# From left to right:
# in_channels, out_channels, num_blocks, add_identity, use_spp
arch_settings = {
'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False],
[256, 512, 9, True, False], [512, 1024, 3, False, True]],
'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False],
[256, 512, 9, True, False], [512, 768, 3, True, False],
[768, 1024, 3, False, True]]
}
def __init__(self,
arch='P5',
deepen_factor=1.0,
widen_factor=1.0,
out_indices=(2, 3, 4),
frozen_stages=-1,
use_depthwise=False,
arch_ovewrite=None,
spp_kernal_sizes=(5, 9, 13),
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
norm_eval=False,
init_cfg=dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')):
super().__init__(init_cfg)
arch_setting = self.arch_settings[arch]
if arch_ovewrite:
arch_setting = arch_ovewrite
assert set(out_indices).issubset(
i for i in range(len(arch_setting) + 1))
if frozen_stages not in range(-1, len(arch_setting) + 1):
raise ValueError('frozen_stages must be in range(-1, '
'len(arch_setting) + 1). But received '
f'{frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.use_depthwise = use_depthwise
self.norm_eval = norm_eval
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
self.stem = Focus(
3,
int(arch_setting[0][0] * widen_factor),
kernel_size=3,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.layers = ['stem']
for i, (in_channels, out_channels, num_blocks, add_identity,
use_spp) in enumerate(arch_setting):
in_channels = int(in_channels * widen_factor)
out_channels = int(out_channels * widen_factor)
num_blocks = max(round(num_blocks * deepen_factor), 1)
stage = []
conv_layer = conv(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(conv_layer)
if use_spp:
spp = SPPBottleneck(
out_channels,
out_channels,
kernel_sizes=spp_kernal_sizes,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(spp)
csp_layer = CSPLayer(
out_channels,
out_channels,
num_blocks=num_blocks,
add_identity=add_identity,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(csp_layer)
self.add_module(f'stage{i + 1}', nn.Sequential(*stage))
self.layers.append(f'stage{i + 1}')
def _freeze_stages(self):
if self.frozen_stages >= 0:
for i in range(self.frozen_stages + 1):
m = getattr(self, self.layers[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(CSPDarknet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
def forward(self, x):
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 10,544 | 36 | 77 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/hourglass.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from ..builder import BACKBONES
from ..utils import ResLayer
from .resnet import BasicBlock
class HourglassModule(BaseModule):
"""Hourglass Module for HourglassNet backbone.
Generate module recursively and use BasicBlock as the base unit.
Args:
depth (int): Depth of current HourglassModule.
stage_channels (list[int]): Feature channels of sub-modules in current
and follow-up HourglassModule.
stage_blocks (list[int]): Number of sub-modules stacked in current and
follow-up HourglassModule.
norm_cfg (dict): Dictionary to construct and config norm layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
upsample_cfg (dict, optional): Config dict for interpolate layer.
Default: `dict(mode='nearest')`
"""
def __init__(self,
depth,
stage_channels,
stage_blocks,
norm_cfg=dict(type='BN', requires_grad=True),
init_cfg=None,
upsample_cfg=dict(mode='nearest')):
super(HourglassModule, self).__init__(init_cfg)
self.depth = depth
cur_block = stage_blocks[0]
next_block = stage_blocks[1]
cur_channel = stage_channels[0]
next_channel = stage_channels[1]
self.up1 = ResLayer(
BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg)
self.low1 = ResLayer(
BasicBlock,
cur_channel,
next_channel,
cur_block,
stride=2,
norm_cfg=norm_cfg)
if self.depth > 1:
self.low2 = HourglassModule(depth - 1, stage_channels[1:],
stage_blocks[1:])
else:
self.low2 = ResLayer(
BasicBlock,
next_channel,
next_channel,
next_block,
norm_cfg=norm_cfg)
self.low3 = ResLayer(
BasicBlock,
next_channel,
cur_channel,
cur_block,
norm_cfg=norm_cfg,
downsample_first=False)
self.up2 = F.interpolate
self.upsample_cfg = upsample_cfg
def forward(self, x):
"""Forward function."""
up1 = self.up1(x)
low1 = self.low1(x)
low2 = self.low2(low1)
low3 = self.low3(low2)
# Fixing `scale factor` (e.g. 2) is common for upsampling, but
# in some cases the spatial size is mismatched and error will arise.
if 'scale_factor' in self.upsample_cfg:
up2 = self.up2(low3, **self.upsample_cfg)
else:
shape = up1.shape[2:]
up2 = self.up2(low3, size=shape, **self.upsample_cfg)
return up1 + up2
@BACKBONES.register_module()
class HourglassNet(BaseModule):
"""HourglassNet backbone.
Stacked Hourglass Networks for Human Pose Estimation.
More details can be found in the `paper
<https://arxiv.org/abs/1603.06937>`_ .
Args:
downsample_times (int): Downsample times in a HourglassModule.
num_stacks (int): Number of HourglassModule modules stacked,
1 for Hourglass-52, 2 for Hourglass-104.
stage_channels (list[int]): Feature channel of each sub-module in a
HourglassModule.
stage_blocks (list[int]): Number of sub-modules stacked in a
HourglassModule.
feat_channel (int): Feature channel of conv after a HourglassModule.
norm_cfg (dict): Dictionary to construct and config norm layer.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import HourglassNet
>>> import torch
>>> self = HourglassNet()
>>> self.eval()
>>> inputs = torch.rand(1, 3, 511, 511)
>>> level_outputs = self.forward(inputs)
>>> for level_output in level_outputs:
... print(tuple(level_output.shape))
(1, 256, 128, 128)
(1, 256, 128, 128)
"""
def __init__(self,
downsample_times=5,
num_stacks=2,
stage_channels=(256, 256, 384, 384, 384, 512),
stage_blocks=(2, 2, 2, 2, 2, 4),
feat_channel=256,
norm_cfg=dict(type='BN', requires_grad=True),
pretrained=None,
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(HourglassNet, self).__init__(init_cfg)
self.num_stacks = num_stacks
assert self.num_stacks >= 1
assert len(stage_channels) == len(stage_blocks)
assert len(stage_channels) > downsample_times
cur_channel = stage_channels[0]
self.stem = nn.Sequential(
ConvModule(
3, cur_channel // 2, 7, padding=3, stride=2,
norm_cfg=norm_cfg),
ResLayer(
BasicBlock,
cur_channel // 2,
cur_channel,
1,
stride=2,
norm_cfg=norm_cfg))
self.hourglass_modules = nn.ModuleList([
HourglassModule(downsample_times, stage_channels, stage_blocks)
for _ in range(num_stacks)
])
self.inters = ResLayer(
BasicBlock,
cur_channel,
cur_channel,
num_stacks - 1,
norm_cfg=norm_cfg)
self.conv1x1s = nn.ModuleList([
ConvModule(
cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
for _ in range(num_stacks - 1)
])
self.out_convs = nn.ModuleList([
ConvModule(
cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg)
for _ in range(num_stacks)
])
self.remap_convs = nn.ModuleList([
ConvModule(
feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
for _ in range(num_stacks - 1)
])
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
"""Init module weights."""
# Training Centripetal Model needs to reset parameters for Conv2d
super(HourglassNet, self).init_weights()
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.reset_parameters()
def forward(self, x):
"""Forward function."""
inter_feat = self.stem(x)
out_feats = []
for ind in range(self.num_stacks):
single_hourglass = self.hourglass_modules[ind]
out_conv = self.out_convs[ind]
hourglass_feat = single_hourglass(inter_feat)
out_feat = out_conv(hourglass_feat)
out_feats.append(out_feat)
if ind < self.num_stacks - 1:
inter_feat = self.conv1x1s[ind](
inter_feat) + self.remap_convs[ind](
out_feat)
inter_feat = self.inters[ind](self.relu(inter_feat))
return out_feats
| 7,494 | 32.609865 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/res2net.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import Sequential
from ..builder import BACKBONES
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
scales=4,
base_width=26,
base_channels=64,
stage_type='normal',
**kwargs):
"""Bottle2neck block for Res2Net.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor(self.planes * (base_width / base_channels)))
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width * scales, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width * scales,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
if stage_type == 'stage' and self.conv2_stride != 1:
self.pool = nn.AvgPool2d(
kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(
self.conv_cfg,
width * scales,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, self.scales - 1):
if self.stage_type == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if self.stage_type == 'normal' or self.conv2_stride == 1:
out = torch.cat((out, spx[self.scales - 1]), 1)
elif self.stage_type == 'stage':
out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Res2Layer(Sequential):
"""Res2Layer to build Res2Net style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
scales=4,
base_width=26,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False),
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=1,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
stage_type='stage',
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
**kwargs))
super(Res2Layer, self).__init__(*layers)
@BACKBONES.register_module()
class Res2Net(ResNet):
"""Res2Net backbone.
Args:
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
depth (int): Depth of res2net, from {50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Res2net stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import Res2Net
>>> import torch
>>> self = Res2Net(depth=50, scales=4, base_width=26)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 256, 8, 8)
(1, 512, 4, 4)
(1, 1024, 2, 2)
(1, 2048, 1, 1)
"""
arch_settings = {
50: (Bottle2neck, (3, 4, 6, 3)),
101: (Bottle2neck, (3, 4, 23, 3)),
152: (Bottle2neck, (3, 8, 36, 3))
}
def __init__(self,
scales=4,
base_width=26,
style='pytorch',
deep_stem=True,
avg_down=True,
pretrained=None,
init_cfg=None,
**kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(
style='pytorch',
deep_stem=True,
avg_down=True,
pretrained=pretrained,
init_cfg=init_cfg,
**kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(
scales=self.scales,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
| 11,659 | 34.54878 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/darknet.py | # Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
import warnings
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
class ResBlock(BaseModule):
"""The basic residual block used in Darknet. Each ResBlock consists of two
ConvModules and the input is added to the final output. Each ConvModule is
composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer
has half of the number of the filters as much as the second convLayer. The
first convLayer has filter size of 1x1 and the second one has the filter
size of 3x3.
Args:
in_channels (int): The input channels. Must be even.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=None):
super(ResBlock, self).__init__(init_cfg)
assert in_channels % 2 == 0 # ensure the in_channels is even
half_in_channels = in_channels // 2
# shortcut
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)
self.conv2 = ConvModule(
half_in_channels, in_channels, 3, padding=1, **cfg)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
out = out + residual
return out
@BACKBONES.register_module()
class Darknet(BaseModule):
"""Darknet backbone.
Args:
depth (int): Depth of Darknet. Currently only support 53.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import Darknet
>>> import torch
>>> self = Darknet(depth=53)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 416, 416)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
...
(1, 256, 52, 52)
(1, 512, 26, 26)
(1, 1024, 13, 13)
"""
# Dict(depth: (layers, channels))
arch_settings = {
53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512),
(512, 1024)))
}
def __init__(self,
depth=53,
out_indices=(3, 4, 5),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
norm_eval=True,
pretrained=None,
init_cfg=None):
super(Darknet, self).__init__(init_cfg)
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for darknet')
self.depth = depth
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.layers, self.channels = self.arch_settings[depth]
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)
self.cr_blocks = ['conv1']
for i, n_layers in enumerate(self.layers):
layer_name = f'conv_res_block{i + 1}'
in_c, out_c = self.channels[i]
self.add_module(
layer_name,
self.make_conv_res_block(in_c, out_c, n_layers, **cfg))
self.cr_blocks.append(layer_name)
self.norm_eval = norm_eval
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
for i, layer_name in enumerate(self.cr_blocks):
cr_block = getattr(self, layer_name)
x = cr_block(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for i in range(self.frozen_stages):
m = getattr(self, self.cr_blocks[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(Darknet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
@staticmethod
def make_conv_res_block(in_channels,
out_channels,
res_repeat,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU',
negative_slope=0.1)):
"""In Darknet backbone, ConvLayer is usually followed by ResBlock. This
function will make that. The Conv layers always have 3x3 filters with
stride=2. The number of the filters in Conv layer is the same as the
out channels of the ResBlock.
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
res_repeat (int): The number of ResBlocks.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
"""
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
model = nn.Sequential()
model.add_module(
'conv',
ConvModule(
in_channels, out_channels, 3, stride=2, padding=1, **cfg))
for idx in range(res_repeat):
model.add_module('res{}'.format(idx),
ResBlock(out_channels, **cfg))
return model
| 8,233 | 37.476636 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/datasets/custom.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from torch.utils.data import Dataset
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .pipelines import Compose
import ipdb
@DATASETS.register_module()
class CustomDataset(Dataset):
"""Custom dataset for detection.
The annotation format is shown as follows. The `ann` field is optional for
testing.
.. code-block:: none
[
{
'filename': 'a.jpg',
'width': 1280,
'height': 720,
'ann': {
'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.
'labels': <np.ndarray> (n, ),
'bboxes_ignore': <np.ndarray> (k, 4), (optional field)
'labels_ignore': <np.ndarray> (k, 4) (optional field)
}
},
...
]
Args:
ann_file (str): Annotation file path.
pipeline (list[dict]): Processing pipeline.
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
data_root (str, optional): Data root for ``ann_file``,
``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.
test_mode (bool, optional): If set True, annotation will not be loaded.
filter_empty_gt (bool, optional): If set true, images without bounding
boxes of the dataset's classes will be filtered out. This option
only works when `test_mode=False`, i.e., we never filter images
during tests.
"""
CLASSES = None
def __init__(self,
ann_file,
pipeline,
classes=None,
data_root=None,
img_prefix='',
seg_prefix=None,
proposal_file=None,
test_mode=False,
filter_empty_gt=True):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.CLASSES = self.get_classes(classes)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.ann_file):
self.ann_file = osp.join(self.data_root, self.ann_file)
if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if not (self.proposal_file is None
or osp.isabs(self.proposal_file)):
self.proposal_file = osp.join(self.data_root,
self.proposal_file)
# load annotations (and proposals)
self.data_infos = self.load_annotations(self.ann_file)
if self.proposal_file is not None:
self.proposals = self.load_proposals(self.proposal_file)
else:
self.proposals = None
# filter images too small and containing no annotations
if not test_mode:
valid_inds = self._filter_imgs()
self.data_infos = [self.data_infos[i] for i in valid_inds]
if self.proposals is not None:
self.proposals = [self.proposals[i] for i in valid_inds]
# set group flag for the sampler
self._set_group_flag()
# processing pipeline
self.pipeline = Compose(pipeline)
def __len__(self):
"""Total number of samples of data."""
return len(self.data_infos)
def load_annotations(self, ann_file):
"""Load annotation from annotation file."""
return mmcv.load(ann_file)
def load_proposals(self, proposal_file):
"""Load proposal from proposal file."""
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.data_infos[idx]['ann']
def get_cat_ids(self, idx):
"""Get category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
"""Filter images too small."""
if self.filter_empty_gt:
warnings.warn(
'CustomDataset does not support filtering empty gt images.')
valid_inds = []
for i, img_info in enumerate(self.data_infos):
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.data_infos[i]
if img_info['width'] / img_info['height'] > 1:
self.flag[i] = 1
def _rand_another(self, idx):
"""Get another random index from the same group as the given index."""
pool = np.where(self.flag == self.flag[idx])[0]
return np.random.choice(pool)
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set \
True).
"""
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys \
introduced by pipeline.
"""
img_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys introduced by \
pipeline.
"""
img_info = self.data_infos[idx]
results = dict(img_info=img_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
@classmethod
def get_classes(cls, classes=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
Returns:
tuple[str] or list[str]: Names of categories of the dataset.
"""
if classes is None:
return cls.CLASSES
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
Default: None.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=scale_ranges,
iou_thr=iou_thr,
dataset=self.CLASSES,
logger=logger)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for i, num in enumerate(proposal_nums):
for j, iou in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
def __repr__(self):
"""Print the number of instance number."""
dataset_type = 'Test' if self.test_mode else 'Train'
result = (f'\n{self.__class__.__name__} {dataset_type} dataset '
f'with number of images {len(self)}, '
f'and instance counts: \n')
if self.CLASSES is None:
result += 'Category names are not provided. \n'
return result
instance_count = np.zeros(len(self.CLASSES) + 1).astype(int)
# count the instance number in each image
for idx in range(len(self)):
label = self.get_ann_info(idx)['labels']
unique, counts = np.unique(label, return_counts=True)
if len(unique) > 0:
# add the occurrence number to each class
instance_count[unique] += counts
else:
# background is the last index
instance_count[-1] += 1
# create a table with category count
table_data = [['category', 'count'] * 5]
row_data = []
for cls, count in enumerate(instance_count):
if cls < len(self.CLASSES):
row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}']
else:
# add the background number
row_data += ['-1 background', f'{count}']
if len(row_data) == 10:
table_data.append(row_data)
row_data = []
if len(row_data) >= 2:
if row_data[-1] == '0':
row_data = row_data[:-2]
if len(row_data) >= 2:
table_data.append([])
table_data.append(row_data)
table = AsciiTable(table_data)
result += table.table
return result
| 13,457 | 35.570652 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/datasets/dataset_wrappers.py | # Copyright (c) OpenMMLab. All rights reserved.
import bisect
import collections
import copy
import math
from collections import defaultdict
import numpy as np
from mmcv.utils import build_from_cfg, print_log
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .builder import DATASETS, PIPELINES
from .coco import CocoDataset
import ipdb
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
separate_eval (bool): Whether to evaluate the results
separately if it is used as validation dataset.
Defaults to True.
"""
def __init__(self, datasets, separate_eval=True):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.separate_eval = separate_eval
if not separate_eval:
if any([isinstance(ds, CocoDataset) for ds in datasets]):
raise NotImplementedError(
'Evaluating concatenated CocoDataset as a whole is not'
' supported! Please set "separate_eval=True"')
elif len(set([type(ds) for ds in datasets])) != 1:
raise NotImplementedError(
'All the datasets should have same types')
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
def get_cat_ids(self, idx):
"""Get category ids of concatenated dataset by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
if idx < 0:
if -idx > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
def evaluate(self, results, logger=None, **kwargs):
"""Evaluate the results.
Args:
results (list[list | tuple]): Testing results of the dataset.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str: float]: AP results of the total dataset or each separate
dataset if `self.separate_eval=True`.
"""
assert len(results) == self.cumulative_sizes[-1], \
('Dataset and results have different sizes: '
f'{self.cumulative_sizes[-1]} v.s. {len(results)}')
# Check whether all the datasets support evaluation
for dataset in self.datasets:
assert hasattr(dataset, 'evaluate'), \
f'{type(dataset)} does not implement evaluate function'
if self.separate_eval:
dataset_idx = -1
total_eval_results = dict()
for size, dataset in zip(self.cumulative_sizes, self.datasets):
start_idx = 0 if dataset_idx == -1 else \
self.cumulative_sizes[dataset_idx]
end_idx = self.cumulative_sizes[dataset_idx + 1]
results_per_dataset = results[start_idx:end_idx]
print_log(
f'\nEvaluateing {dataset.ann_file} with '
f'{len(results_per_dataset)} images now',
logger=logger)
eval_results_per_dataset = dataset.evaluate(
results_per_dataset, logger=logger, **kwargs)
dataset_idx += 1
for k, v in eval_results_per_dataset.items():
total_eval_results.update({f'{dataset_idx}_{k}': v})
return total_eval_results
elif any([isinstance(ds, CocoDataset) for ds in self.datasets]):
raise NotImplementedError(
'Evaluating concatenated CocoDataset as a whole is not'
' supported! Please set "separate_eval=True"')
elif len(set([type(ds) for ds in self.datasets])) != 1:
raise NotImplementedError(
'All the datasets should have same types')
else:
original_data_infos = self.datasets[0].data_infos
self.datasets[0].data_infos = sum(
[dataset.data_infos for dataset in self.datasets], [])
eval_results = self.datasets[0].evaluate(
results, logger=logger, **kwargs)
self.datasets[0].data_infos = original_data_infos
return eval_results
@DATASETS.register_module()
class RepeatDataset:
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def get_cat_ids(self, idx):
"""Get category ids of repeat dataset by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
return self.dataset.get_cat_ids(idx % self._ori_len)
def __len__(self):
"""Length after repetition."""
return self.times * self._ori_len
# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa
@DATASETS.register_module()
class ClassBalancedDataset:
"""A wrapper of repeated dataset with repeat factor.
Suitable for training on class imbalanced datasets like LVIS. Following
the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,
in each epoch, an image may appear multiple times based on its
"repeat factor".
The repeat factor for an image is a function of the frequency the rarest
category labeled in that image. The "frequency of category c" in [0, 1]
is defined by the fraction of images in the training set (without repeats)
in which category c appears.
The dataset needs to instantiate :func:`self.get_cat_ids` to support
ClassBalancedDataset.
The repeat factor is computed as followed.
1. For each category c, compute the fraction # of images
that contain it: :math:`f(c)`
2. For each category c, compute the category-level repeat factor:
:math:`r(c) = max(1, sqrt(t/f(c)))`
3. For each image I, compute the image-level repeat factor:
:math:`r(I) = max_{c in I} r(c)`
Args:
dataset (:obj:`CustomDataset`): The dataset to be repeated.
oversample_thr (float): frequency threshold below which data is
repeated. For categories with ``f_c >= oversample_thr``, there is
no oversampling. For categories with ``f_c < oversample_thr``, the
degree of oversampling following the square-root inverse frequency
heuristic above.
filter_empty_gt (bool, optional): If set true, images without bounding
boxes will not be oversampled. Otherwise, they will be categorized
as the pure background class and involved into the oversampling.
Default: True.
"""
def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
self.dataset = dataset
self.oversample_thr = oversample_thr
self.filter_empty_gt = filter_empty_gt
self.CLASSES = dataset.CLASSES
repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
repeat_indices = []
for dataset_idx, repeat_factor in enumerate(repeat_factors):
repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor))
self.repeat_indices = repeat_indices
flags = []
if hasattr(self.dataset, 'flag'):
for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
flags.extend([flag] * int(math.ceil(repeat_factor)))
assert len(flags) == len(repeat_indices)
self.flag = np.asarray(flags, dtype=np.uint8)
def _get_repeat_factors(self, dataset, repeat_thr):
"""Get repeat factor for each images in the dataset.
Args:
dataset (:obj:`CustomDataset`): The dataset
repeat_thr (float): The threshold of frequency. If an image
contains the categories whose frequency below the threshold,
it would be repeated.
Returns:
list[float]: The repeat factors for each images in the dataset.
"""
# 1. For each category c, compute the fraction # of images
# that contain it: f(c)
category_freq = defaultdict(int)
num_images = len(dataset)
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if len(cat_ids) == 0 and not self.filter_empty_gt:
cat_ids = set([len(self.CLASSES)])
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t/f(c)))
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I, compute the image-level repeat factor:
# r(I) = max_{c in I} r(c)
repeat_factors = []
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if len(cat_ids) == 0 and not self.filter_empty_gt:
cat_ids = set([len(self.CLASSES)])
repeat_factor = 1
if len(cat_ids) > 0:
repeat_factor = max(
{category_repeat[cat_id]
for cat_id in cat_ids})
repeat_factors.append(repeat_factor)
return repeat_factors
def __getitem__(self, idx):
ori_index = self.repeat_indices[idx]
return self.dataset[ori_index]
def __len__(self):
"""Length after repetition."""
return len(self.repeat_indices)
@DATASETS.register_module()
class MultiImageMixDataset:
"""A wrapper of multiple images mixed dataset.
Suitable for training on multiple images mixed data augmentation like
mosaic and mixup. For the augmentation pipeline of mixed image data,
the `get_indexes` method needs to be provided to obtain the image
indexes, and you can set `skip_flags` to change the pipeline running
process. At the same time, we provide the `dynamic_scale` parameter
to dynamically change the output image size.
Args:
dataset (:obj:`CustomDataset`): The dataset to be mixed.
pipeline (Sequence[dict]): Sequence of transform object or
config dict to be composed.
dynamic_scale (tuple[int], optional): The image scale can be changed
dynamically. Default to None.
skip_type_keys (list[str], optional): Sequence of type string to
be skip pipeline. Default to None.
"""
def __init__(self,
dataset,
pipeline,
dynamic_scale=None,
skip_type_keys=None):
assert isinstance(pipeline, collections.abc.Sequence)
if skip_type_keys is not None:
assert all([
isinstance(skip_type_key, str)
for skip_type_key in skip_type_keys
])
self._skip_type_keys = skip_type_keys
self.pipeline = []
self.pipeline_types = []
for transform in pipeline:
if isinstance(transform, dict):
self.pipeline_types.append(transform['type'])
transform = build_from_cfg(transform, PIPELINES)
self.pipeline.append(transform)
else:
raise TypeError('pipeline must be a dict')
self.dataset = dataset
self.CLASSES = dataset.CLASSES
if hasattr(self.dataset, 'flag'):
self.flag = dataset.flag
self.num_samples = len(dataset)
if dynamic_scale is not None:
assert isinstance(dynamic_scale, tuple)
self._dynamic_scale = dynamic_scale
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
results = copy.deepcopy(self.dataset[idx])
for (transform, transform_type) in zip(self.pipeline,
self.pipeline_types):
if self._skip_type_keys is not None and \
transform_type in self._skip_type_keys:
continue
if hasattr(transform, 'get_indexes'):
indexes = transform.get_indexes(self.dataset)
if not isinstance(indexes, collections.abc.Sequence):
indexes = [indexes]
mix_results = [
copy.deepcopy(self.dataset[index]) for index in indexes
]
results['mix_results'] = mix_results
if self._dynamic_scale is not None:
# Used for subsequent pipeline to automatically change
# the output image size. E.g MixUp, Resize.
results['scale'] = self._dynamic_scale
results = transform(results)
if 'mix_results' in results:
results.pop('mix_results')
if 'img_scale' in results:
results.pop('img_scale')
return results
def update_skip_type_keys(self, skip_type_keys):
"""Update skip_type_keys. It is called by an external hook.
Args:
skip_type_keys (list[str], optional): Sequence of type
string to be skip pipeline.
"""
assert all([
isinstance(skip_type_key, str) for skip_type_key in skip_type_keys
])
self._skip_type_keys = skip_type_keys
def update_dynamic_scale(self, dynamic_scale):
"""Update dynamic_scale. It is called by an external hook.
Args:
dynamic_scale (tuple[int]): The image scale can be
changed dynamically.
"""
assert isinstance(dynamic_scale, tuple)
self._dynamic_scale = dynamic_scale
| 15,324 | 37.602015 | 167 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/datasets/builder.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import DataLoader
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', True)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
# pop 'separate_eval' since it is not a valid key for common datasets.
if 'separate_eval' in data_cfg:
data_cfg.pop('separate_eval')
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import (ConcatDataset, RepeatDataset,
ClassBalancedDataset, MultiImageMixDataset)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'ConcatDataset':
dataset = ConcatDataset(
[build_dataset(c, default_args) for c in cfg['datasets']],
cfg.get('separate_eval', True))
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif cfg['type'] == 'MultiImageMixDataset':
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
# DistributedGroupSampler will definitely shuffle the data to satisfy
# that images on each GPU are in the same group
if shuffle:
sampler = DistributedGroupSampler(
dataset, samples_per_gpu, world_size, rank, seed=seed)
else:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=False, seed=seed)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=False,
worker_init_fn=init_fn,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 5,629 | 36.284768 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/datasets/samplers/group_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import numpy as np
import torch
from mmcv.runner import get_dist_info
from torch.utils.data import Sampler
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, size in enumerate(self.group_sizes):
self.num_samples += int(np.ceil(
size / self.samples_per_gpu)) * self.samples_per_gpu
def __iter__(self):
indices = []
for i, size in enumerate(self.group_sizes):
if size == 0:
continue
indice = np.where(self.flag == i)[0]
assert len(indice) == size
np.random.shuffle(indice)
num_extra = int(np.ceil(size / self.samples_per_gpu)
) * self.samples_per_gpu - len(indice)
indice = np.concatenate(
[indice, np.random.choice(indice, num_extra)])
indices.append(indice)
indices = np.concatenate(indices)
indices = [
indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
for i in np.random.permutation(
range(len(indices) // self.samples_per_gpu))
]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
class DistributedGroupSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
seed (int, optional): random seed used to shuffle the sampler if
``shuffle=True``. This number should be identical across all
processes in the distributed group. Default: 0.
"""
def __init__(self,
dataset,
samples_per_gpu=1,
num_replicas=None,
rank=None,
seed=0):
_rank, _num_replicas = get_dist_info()
if num_replicas is None:
num_replicas = _num_replicas
if rank is None:
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.seed = seed if seed is not None else 0
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, j in enumerate(self.group_sizes):
self.num_samples += int(
math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
self.num_replicas)) * self.samples_per_gpu
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = []
for i, size in enumerate(self.group_sizes):
if size > 0:
indice = np.where(self.flag == i)[0]
assert len(indice) == size
# add .numpy() to avoid bug when selecting indice in parrots.
# TODO: check whether torch.randperm() can be replaced by
# numpy.random.permutation().
indice = indice[list(
torch.randperm(int(size), generator=g).numpy())].tolist()
extra = int(
math.ceil(
size * 1.0 / self.samples_per_gpu / self.num_replicas)
) * self.samples_per_gpu * self.num_replicas - len(indice)
# pad indice
tmp = indice.copy()
for _ in range(extra // size):
indice.extend(tmp)
indice.extend(tmp[:extra % size])
indices.extend(indice)
assert len(indices) == self.total_size
indices = [
indices[j] for i in list(
torch.randperm(
len(indices) // self.samples_per_gpu, generator=g))
for j in range(i * self.samples_per_gpu, (i + 1) *
self.samples_per_gpu)
]
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 5,384 | 35.14094 | 78 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/datasets/samplers/distributed_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| 1,358 | 32.146341 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/datasets/pipelines/formating.py | # Copyright (c) OpenMMLab. All rights reserved.
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some results to :obj:`torch.Tensor` by given keys.
Args:
keys (Sequence[str]): Keys that need to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert data in results to :obj:`torch.Tensor`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted
to :obj:`torch.Tensor`.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image to :obj:`torch.Tensor` by given keys.
The dimension order of input image is (H, W, C). The pipeline will convert
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
(1, H, W).
Args:
keys (Sequence[str]): Key of images to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = (to_tensor(img.transpose(2, 0, 1))).contiguous()
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose:
"""Transpose some results by given keys.
Args:
keys (Sequence[str]): Keys of results to be transposed.
order (Sequence[int]): Order of transpose.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Call function to transpose the channel order of data in results.
Args:
results (dict): Result dict contains the data to transpose.
Returns:
dict: The result dict contains the data transposed to \
``self.order``.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToDataContainer:
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
Args:
fields (Sequence[dict]): Each field is a dict like
``dict(key='xxx', **kwargs)``. The ``key`` in result will
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))``.
"""
def __init__(self,
fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))):
self.fields = fields
def __call__(self, results):
"""Call function to convert data in results to
:obj:`mmcv.DataContainer`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted to \
:obj:`mmcv.DataContainer`.
"""
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class DefaultFormatBundle:
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img",
"proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".
These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- proposals: (1)to tensor, (2)to DataContainer
- gt_bboxes: (1)to tensor, (2)to DataContainer
- gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
- gt_labels: (1)to tensor, (2)to DataContainer
- gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \
(3)to DataContainer (stack=True)
"""
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with \
default bundle.
"""
if 'img' in results:
img = results['img']
# add default meta keys
results = self._add_default_meta_keys(results)
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if key not in results:
continue
results[key] = DC(to_tensor(results[key]))
if 'gt_masks' in results:
results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
if 'gt_semantic_seg' in results:
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)
return results
def _add_default_meta_keys(self, results):
"""Add default meta keys.
We set default meta keys including `pad_shape`, `scale_factor` and
`img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and
`Pad` are implemented during the whole pipeline.
Args:
results (dict): Result dict contains the data to convert.
Returns:
results (dict): Updated result dict contains the data to convert.
"""
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results.setdefault(
'img_norm_cfg',
dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False))
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "proposals", "gt_bboxes",
"gt_bboxes_ignore", "gt_labels", and/or "gt_masks".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple \
(h, w, c). Note that images may be zero padded on the \
bottom/right if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
'img_norm_cfg')``
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function to collect keys in results. The keys in ``meta_keys``
will be converted to :obj:mmcv.DataContainer.
Args:
results (dict): Result dict contains the data to collect.
Returns:
dict: The result dict contains the following keys
- keys in``self.keys``
- ``img_metas``
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
@PIPELINES.register_module()
class WrapFieldsToLists:
"""Wrap fields of the data dictionary into lists for evaluation.
This class can be used as a last step of a test or validation
pipeline for single image evaluation or inference.
Example:
>>> test_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
>>> dict(type='Pad', size_divisor=32),
>>> dict(type='ImageToTensor', keys=['img']),
>>> dict(type='Collect', keys=['img']),
>>> dict(type='WrapFieldsToLists')
>>> ]
"""
def __call__(self, results):
"""Call function to wrap fields into lists.
Args:
results (dict): Result dict contains the data to wrap.
Returns:
dict: The result dict where value of ``self.keys`` are wrapped \
into list.
"""
# Wrap dict fields into lists
for key, val in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()'
| 12,044 | 31.909836 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/utils/contextmanagers.py | # Copyright (c) OpenMMLab. All rights reserved.
import asyncio
import contextlib
import logging
import os
import time
from typing import List
import torch
logger = logging.getLogger(__name__)
DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
@contextlib.asynccontextmanager
async def completed(trace_name='',
name='',
sleep_interval=0.05,
streams: List[torch.cuda.Stream] = None):
"""Async context manager that waits for work to complete on given CUDA
streams."""
if not torch.cuda.is_available():
yield
return
stream_before_context_switch = torch.cuda.current_stream()
if not streams:
streams = [stream_before_context_switch]
else:
streams = [s if s else stream_before_context_switch for s in streams]
end_events = [
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
]
if DEBUG_COMPLETED_TIME:
start = torch.cuda.Event(enable_timing=True)
stream_before_context_switch.record_event(start)
cpu_start = time.monotonic()
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
grad_enabled_before = torch.is_grad_enabled()
try:
yield
finally:
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_end = time.monotonic()
for i, stream in enumerate(streams):
event = end_events[i]
stream.record_event(event)
grad_enabled_after = torch.is_grad_enabled()
# observed change of torch.is_grad_enabled() during concurrent run of
# async_test_bboxes code
assert (grad_enabled_before == grad_enabled_after
), 'Unexpected is_grad_enabled() value change'
are_done = [e.query() for e in end_events]
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
are_done, streams)
with torch.cuda.stream(stream_before_context_switch):
while not all(are_done):
await asyncio.sleep(sleep_interval)
are_done = [e.query() for e in end_events]
logger.debug(
'%s %s completed: %s streams: %s',
trace_name,
name,
are_done,
streams,
)
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_time = (cpu_end - cpu_start) * 1000
stream_times_ms = ''
for i, stream in enumerate(streams):
elapsed_time = start.elapsed_time(end_events[i])
stream_times_ms += f' {stream} {elapsed_time:.2f} ms'
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
stream_times_ms)
@contextlib.asynccontextmanager
async def concurrent(streamqueue: asyncio.Queue,
trace_name='concurrent',
name='stream'):
"""Run code concurrently in different streams.
:param streamqueue: asyncio.Queue instance.
Queue tasks define the pool of streams used for concurrent execution.
"""
if not torch.cuda.is_available():
yield
return
initial_stream = torch.cuda.current_stream()
with torch.cuda.stream(initial_stream):
stream = await streamqueue.get()
assert isinstance(stream, torch.cuda.Stream)
try:
with torch.cuda.stream(stream):
logger.debug('%s %s is starting, stream: %s', trace_name, name,
stream)
yield
current = torch.cuda.current_stream()
assert current == stream
logger.debug('%s %s has finished, stream: %s', trace_name,
name, stream)
finally:
streamqueue.task_done()
streamqueue.put_nowait(stream)
| 4,125 | 32.544715 | 79 | py |
PseCo | PseCo-master/thirdparty/mmdetection/mmdet/utils/profiling.py | # Copyright (c) OpenMMLab. All rights reserved.
import contextlib
import sys
import time
import torch
if sys.version_info >= (3, 7):
@contextlib.contextmanager
def profile_time(trace_name,
name,
enabled=True,
stream=None,
end_stream=None):
"""Print time spent by CPU and GPU.
Useful as a temporary context manager to find sweet spots of code
suitable for async implementation.
"""
if (not enabled) or not torch.cuda.is_available():
yield
return
stream = stream if stream else torch.cuda.current_stream()
end_stream = end_stream if end_stream else stream
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
stream.record_event(start)
try:
cpu_start = time.monotonic()
yield
finally:
cpu_end = time.monotonic()
end_stream.record_event(end)
end.synchronize()
cpu_time = (cpu_end - cpu_start) * 1000
gpu_time = start.elapsed_time(end)
msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms '
msg += f'gpu_time {gpu_time:.2f} ms stream {stream}'
print(msg, end_stream)
| 1,336 | 31.609756 | 73 | py |
PseCo | PseCo-master/configs/supervised_baseline/base.py | mmdet_base = "../../thirdparty/mmdetection/configs/_base_"
_base_ = [
f"{mmdet_base}/models/faster_rcnn_r50_fpn.py",
f"{mmdet_base}/datasets/coco_detection.py",
f"{mmdet_base}/schedules/schedule_1x.py",
f"{mmdet_base}/default_runtime.py",
]
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style="caffe",
init_cfg=dict(
type="Pretrained", checkpoint="open-mmlab://detectron2/resnet50_caffe"
),
)
)
img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type="LoadImageFromFile"),
dict(type="LoadAnnotations", with_bbox=True),
dict(
type="Sequential",
transforms=[
dict(
type="RandResize",
img_scale=[(1333, 400), (1333, 1200)],
multiscale_mode="range",
keep_ratio=True,
),
dict(type="RandFlip", flip_ratio=0.5),
dict(
type="OneOf",
transforms=[
dict(type=k)
for k in [
"Identity",
"AutoContrast",
"RandEqualize",
"RandSolarize",
"RandColor",
"RandContrast",
"RandBrightness",
"RandSharpness",
"RandPosterize",
]
],
),
],
),
dict(type="Pad", size_divisor=32),
dict(type="Normalize", **img_norm_cfg),
dict(type="ExtraAttrs", tag="sup"),
dict(type="DefaultFormatBundle"),
dict(
type="Collect",
keys=["img", "gt_bboxes", "gt_labels"],
meta_keys=(
"filename",
"ori_shape",
"img_shape",
"img_norm_cfg",
"pad_shape",
"scale_factor",
"tag",
),
),
]
test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=1,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline),
)
custom_hooks = [
dict(type="WeightSummary"),
]
optimizer = dict(type="SGD", lr=0.01, momentum=0.9, weight_decay=0.0001)
lr_config = dict(step=[120000, 160000])
runner = dict(_delete_=True, type="IterBasedRunner", max_iters=180000)
checkpoint_config = dict(by_epoch=False, interval=10000, max_keep_ckpts=5, create_symlink=False)
evaluation = dict(interval=10000)
# fp16 = dict(loss_scale="dynamic")
log_config = dict(
interval=50,
hooks=[
dict(type="TextLoggerHook", by_epoch=False),
],
)
| 3,220 | 26.767241 | 96 | py |
PseCo | PseCo-master/configs/PseCo/base.py | mmdet_base = "../../thirdparty/mmdetection/configs/_base_"
_base_ = [
f"{mmdet_base}/models/faster_rcnn_r50_fpn.py",
f"{mmdet_base}/datasets/coco_detection.py",
f"{mmdet_base}/schedules/schedule_1x.py",
f"{mmdet_base}/default_runtime.py",
]
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style="caffe",
init_cfg=dict(
type="Pretrained", checkpoint="open-mmlab://detectron2/resnet50_caffe"
),
)
)
img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type="LoadImageFromFile", file_client_args=dict(backend="${backend}")),
dict(type="LoadAnnotations", with_bbox=True),
dict(
type="Sequential",
transforms=[
dict(
type="RandResize",
img_scale=[(1333, 400), (1333, 1200)],
multiscale_mode="range",
keep_ratio=True,
),
dict(type="RandFlip", flip_ratio=0.5),
dict(
type="OneOf",
transforms=[
dict(type=k)
for k in [
"Identity",
"AutoContrast",
"RandEqualize",
"RandSolarize",
"RandColor",
"RandContrast",
"RandBrightness",
"RandSharpness",
"RandPosterize",
]
],
),
],
record=True,
),
dict(type="Pad", size_divisor=32),
dict(type="Normalize", **img_norm_cfg),
dict(type="ExtraAttrs", tag="sup"),
dict(type="DefaultFormatBundle"),
dict(
type="Collect",
keys=["img", "gt_bboxes", "gt_labels"],
meta_keys=(
"filename",
"ori_shape",
"img_shape",
"img_norm_cfg",
"pad_shape",
"scale_factor",
"tag",
),
),
]
strong_pipeline = [
dict(
type="Sequential",
transforms=[
dict(
type="RandResize",
img_scale=[(1333, 400), (1333, 1200)],
multiscale_mode="range",
keep_ratio=True,
),
dict(type="RandFlip", flip_ratio=0.5),
dict(
type="ShuffledSequential",
transforms=[
dict(
type="OneOf",
transforms=[
dict(type=k)
for k in [
"Identity",
"AutoContrast",
"RandEqualize",
"RandSolarize",
"RandColor",
"RandContrast",
"RandBrightness",
"RandSharpness",
"RandPosterize",
]
],
),
dict(
type="OneOf",
transforms=[
dict(type="RandTranslate", x=(-0.1, 0.1)),
dict(type="RandTranslate", y=(-0.1, 0.1)),
dict(type="RandRotate", angle=(-30, 30)),
[
dict(type="RandShear", x=(-30, 30)),
dict(type="RandShear", y=(-30, 30)),
],
],
),
],
),
dict(
type="RandErase",
n_iterations=(1, 5),
size=[0, 0.2],
squared=True,
),
],
record=True,
),
dict(type="Pad", size_divisor=32),
dict(type="Normalize", **img_norm_cfg),
dict(type="ExtraAttrs", tag="unsup_student"),
dict(type="DefaultFormatBundle"),
dict(
type="Collect",
keys=["img", "gt_bboxes", "gt_labels"],
meta_keys=(
"filename",
"ori_shape",
"img_shape",
"img_norm_cfg",
"pad_shape",
"scale_factor",
"tag",
"transform_matrix",
),
),
]
weak_pipeline = [
dict(
type="Sequential",
transforms=[
dict(
type="RandResize",
img_scale=[(1333, 400), (1333, 1200)],
multiscale_mode="range",
keep_ratio=True,
),
dict(type="RandFlip", flip_ratio=0.5),
],
record=True,
),
dict(type="Pad", size_divisor=32),
dict(type="Normalize", **img_norm_cfg),
dict(type="ExtraAttrs", tag="unsup_teacher"),
dict(type="DefaultFormatBundle"),
dict(
type="Collect",
keys=["img", "gt_bboxes", "gt_labels"],
meta_keys=(
"filename",
"ori_shape",
"img_shape",
"img_norm_cfg",
"pad_shape",
"scale_factor",
"tag",
"transform_matrix",
),
),
]
unsup_pipeline = [
dict(type="LoadImageFromFile"),
# dict(type="LoadAnnotations", with_bbox=True),
# generate fake labels for data format compatibility
dict(type="PseudoSamples", with_bbox=True),
dict(
type="MultiBranch", unsup_student=strong_pipeline, unsup_teacher=weak_pipeline
),
]
test_pipeline = [
dict(type="LoadImageFromFile", file_client_args=dict(backend="${backend}")),
dict(
type="MultiScaleFlipAug",
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
data = dict(
samples_per_gpu=None,
workers_per_gpu=None,
train=dict(
_delete_=True,
type="SemiDataset",
sup=dict(
type="CocoDataset",
ann_file=None,
img_prefix=None,
pipeline=train_pipeline,
),
unsup=dict(
type="CocoDataset",
ann_file=None,
img_prefix=None,
pipeline=unsup_pipeline,
filter_empty_gt=False,
),
),
val=dict(
ann_file="../data/annotations/instances_val2017.json",
img_prefix="../data/val2017/",
pipeline=test_pipeline),
test=dict(
ann_file="../data/annotations/instances_val2017.json",
img_prefix="../data/val2017/",
pipeline=test_pipeline),
sampler=dict(
train=dict(
type="SemiBalanceSampler",
sample_ratio=[1, 4],
by_prob=True,
# at_least_one=True,
epoch_length=7330,
)
),
)
thres=0.9
refresh=False
custom_hooks = [
dict(type="NumClassCheckHook"),
dict(type="WeightSummary"),
dict(type="MeanTeacher", momentum=0.999, warm_up=0),
]
evaluation = dict(type="SubModulesDistEvalHook", interval=10000, start=20000)
optimizer = dict(type="SGD", lr=0.01, momentum=0.9, weight_decay=0.0001)
lr_config = dict(step=[120000])
runner = dict(_delete_=True, type="IterBasedRunner", max_iters=180000)
checkpoint_config = dict(by_epoch=False, interval=5000, max_keep_ckpts=10, create_symlink=False)
fp16 = dict(loss_scale="dynamic")
log_config = dict(
interval=50,
hooks=[
dict(type="TextLoggerHook", by_epoch=False),
],
)
| 7,897 | 28.580524 | 96 | py |
RLNLocalization | RLNLocalization-main/statistic_test.py | # ========================================
# Perform alignment based on Prior Library
# ========================================
import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
from scipy.ndimage import center_of_mass
from medpy.metric import dc
from torchvision import transforms as T
from op.data_op import load_list
from dipy.align import imaffine
from dipy.align import transforms
TEXT_PATH = "Text"
VIEW = ['left']
VISUAL = 'Visual'
PRIOR_DATA = 'PRIOR'
def double_align(tissues_mask, segmentations_mask, rln_and_tissues_mask):
identity = np.eye(3)
c_of_mass = imaffine.transform_centers_of_mass(segmentations_mask, identity, tissues_mask, identity)
n_bins = 32
sampling_prop = None
metric = imaffine.MutualInformationMetric(n_bins, sampling_prop)
level_iter = [10000, 1000, 100]
sigmas = [3.0, 1.0, 0.0]
factors = [4, 2, 1]
affine_reg = imaffine.AffineRegistration(metric=metric, level_iters=level_iter, sigmas=sigmas, factors=factors,
verbosity=0)
transform = transforms.TranslationTransform2D()
params0 = None
starting_affine = c_of_mass.affine
translation = affine_reg.optimize(segmentations_mask, tissues_mask, transform, params0, identity, identity,
starting_affine=starting_affine)
# transformed_img = translation.transform(img, interpolation='linear')
transformed_tissues_mask = translation.transform(tissues_mask, interpolation='nearest')
transformed_rln_and_tissues_mask = translation.transform(rln_and_tissues_mask, interpolation='nearest')
transformed_tissues_mask = transformed_tissues_mask / 50
transformed_tissues_mask = transformed_tissues_mask.astype(np.int32)
transformed_tissues_mask *= 50
transformed_rln_and_tissues_mask = transformed_rln_and_tissues_mask / 50
transformed_rln_and_tissues_mask = transformed_rln_and_tissues_mask.astype(np.int32)
transformed_rln_and_tissues_mask *= 50
return transformed_tissues_mask, transformed_rln_and_tissues_mask
if __name__ == '__main__':
train_list, val_list, test_list = load_list(TEXT_PATH, VIEW)
target_list = test_list
source_list = val_list + train_list
os.makedirs(VISUAL, exist_ok=True)
os.makedirs(PRIOR_DATA, exist_ok=True)
mask_transform = T.Compose([
T.Resize((256, 256), Image.NEAREST),
])
target_dice_list = []
for target_path in target_list[:10]:
patient_id, view_type, item_id = target_path.split('\\')[1:]
segmentations_path = 'Results/{}-{}-{}.png'.format(patient_id, view_type, item_id)
seg_msk = Image.open(segmentations_path).convert('L')
seg_msk_arr = np.array(seg_msk, dtype=np.int32)
target_item_dice_list = []
prior_item_center_list = []
for source_path in tqdm(source_list):
temp_list = []
for idx, mask_item in enumerate(["CCA", "thyroid", "trachea", "RLN"]):
msk = Image.open(os.path.join(source_path, "MASK", "{}.jpg".format(mask_item)))
msk = mask_transform(msk)
msk = np.array(msk) / 255 * (idx + 1)
temp_list.append(msk)
tissues_msk_arr = np.stack(temp_list[:-1], axis=0)
tissues_msk_arr = np.max(tissues_msk_arr, axis=0) * 50
tissues_msk_arr = tissues_msk_arr.astype(np.int32)
rln_msk_arr = np.stack(temp_list, axis=0)
rln_msk_arr = np.max(rln_msk_arr, axis=0) * 50
rln_msk_arr = rln_msk_arr.astype(np.int32)
aligned_tissues_msk, aligned_rln_msk = double_align(tissues_msk_arr, seg_msk_arr, rln_msk_arr)
dice_aligned_seg = dc(aligned_tissues_msk, seg_msk_arr)
target_item_dice_list.append(dice_aligned_seg)
prior_rln_msk = aligned_rln_msk == 200
prior_rln_msk = prior_rln_msk.astype(np.int32)
center_coord = center_of_mass(prior_rln_msk)
prior_item_center_list.append([center_coord[0], center_coord[1], dice_aligned_seg])
target_item_dice_list.sort(reverse=True)
target_dice_list.append(target_item_dice_list)
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(seg_msk_arr)
for temp in prior_item_center_list:
if temp[2] > 0.85:
s = 15
elif 0.75 > temp[2] > 0.85:
s = 5
else:
s = 1
plt.scatter(x=temp[1], y=temp[0], s=s, alpha=0.3, c="red")
plt.subplot(1, 2, 2)
plt.plot(target_item_dice_list)
plt.title('Sorted Dice score of each aligned prior mask to the segmentation')
plt.xlabel('Subj Id')
plt.ylabel('Dice')
# plt.show()
plt.savefig(os.path.join(VISUAL, '{}-{}-{}.png'.format(patient_id, view_type, item_id)))
plt.close()
prior_item_center_arr = np.array(prior_item_center_list)
np.save(os.path.join(PRIOR_DATA, '{}-{}-{}.npy'.format(patient_id, view_type, item_id)), prior_item_center_arr)
# x = np.arange(1, len(target_item_dice_list) + 1)
plt.figure()
for temp in target_dice_list:
plt.plot(temp)
plt.show()
| 5,288 | 37.326087 | 120 | py |
RLNLocalization | RLNLocalization-main/utils.py | import os
import torch
import numpy as np
from matplotlib import pyplot as plt
from medpy.metric import dc
from dipy.align import imaffine
from dipy.align import transforms
def check_dir(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
def set_device(cuda):
"""
Set the torch gpu device
TODO: parallel setup is requested
----------------------------
Parameters:
cuda: [int] id of the used GPU, where -1 is "cpu"
Return:
torch device
----------------------------
"""
assert isinstance(cuda, int)
if cuda == -1 or not torch.cuda.is_available():
device = torch.device('cpu')
else:
device = torch.device('cuda:{}'.format(cuda))
return device
class Plotter(object):
"""
Plot the loss/metric curves
"""
def __init__(self, send_path):
"""
send_path: [string] path to save the figures
"""
self.send_path = send_path
self.buffer = dict()
def update(self, logs):
"""
logs: [dict] metric dict that to be plot
"""
for key in logs.keys():
if key not in self.buffer.keys():
self.buffer[key] = []
self.buffer[key].append(logs[key])
def send(self):
"""
function to plot the curve
"""
for key in self.buffer.keys():
plt.figure()
plt.plot(self.buffer[key])
plt.title(key)
plt.xlabel("epoch")
plt.savefig(os.path.join(self.send_path, key+".png"))
plt.close()
class Recorder(object):
"""
record the metric and return the statistic results
"""
def __init__(self, keys):
"""
keys: [list] variables' name to be saved
"""
self.data = dict()
self.keys = keys
for key in keys:
self.data[key] = []
def update(self, item):
"""
item: [dict] data dict to update the buffer, the keys should be consistent
"""
for key in item.keys():
self.data[key].append(item[key])
def reset(self, keys=None):
"""
keys: [list] variables to be cleared in the buffer
"""
if keys is None:
keys = self.data.keys()
for key in keys:
self.data[key] = []
def call(self, key, return_std=False):
"""
key: [string] variable to be calculated for the statistical results
return_std: [bool] option to return variance
"""
arr = np.array(self.data[key])
if return_std:
return np.mean(arr), np.std(arr)
else:
return np.mean(arr)
def array2tensor(array, dtype="float32"):
"""
transfer the numpy array to the torch tensor
TODO: more dtype is requested
----------------------------
Parameters:
array: [numpy.array] array to be transferred
dtype: [string] type of the tensor, current only support Float32 and Int64
Return:
torch tensor
----------------------------
"""
tensor = torch.from_numpy(array)
if dtype == "float32":
return tensor.float()
elif dtype == "int64":
return tensor.long()
else:
raise NameError("Currently only support Float32 and Int64")
def tensor2array(tensor, squeeze=False):
"""
transfer the torch tensor to the numpy array
----------------------------
Parameters:
tensor: [torch.Tensor] tensor to be transferred
squeeze: [bool] option for squeeze the tensor
Return:
numpy array
----------------------------
"""
if squeeze:
tensor = tensor.squeeze()
return tensor.cpu().detach().numpy()
def procrustes_analysis(reference_mask, mask):
identity = np.eye(3)
c_of_mass = imaffine.transform_centers_of_mass(reference_mask, identity, mask, identity)
n_bins = 32
sampling_prop = None
metric = imaffine.MutualInformationMetric(n_bins, sampling_prop)
level_iter = [10000, 1000, 100]
sigmas = [3.0, 1.0, 0.0]
factors = [4, 2, 1]
affine_reg = imaffine.AffineRegistration(metric=metric, level_iters=level_iter, sigmas=sigmas, factors=factors)
transform = transforms.TranslationTransform2D()
params0 = None
translation = affine_reg.optimize(reference_mask, mask, transform, params0, identity, identity,
starting_affine=c_of_mass.affine)
transform = transforms.RigidTransform2D()
rigid = affine_reg.optimize(reference_mask, mask, transform, params0, identity, identity, starting_affine=translation.affine)
# transformed_img = rigid.transform(img, interpolation='linear')
transformed_mask = rigid.transform(mask, interpolation='nearest')
transformed_mask = transformed_mask / 50
transformed_mask = transformed_mask.astype(np.int32)
transformed_mask *= 50
print(set(list(transformed_mask.reshape(-1))))
return
| 4,994 | 26.75 | 129 | py |
RLNLocalization | RLNLocalization-main/prior_localize.py | import os
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from torchvision import transforms as T
from scipy.ndimage import center_of_mass
PRIOR_PATH = 'PRIOR_right'
SAVE_PATH = 'Prior_Results_right'
GT_PATH = '../Dataset/Data'
mask_transform = T.Compose([
T.Resize((256, 256), Image.NEAREST),
])
if __name__ == '__main__':
os.makedirs(SAVE_PATH, exist_ok=True)
prior_list = os.listdir(PRIOR_PATH)
error_mean = 0.
error_weighted_mean = 0.
count = 0
error_arr = []
for prior_name in prior_list:
prior_data = np.load(os.path.join(PRIOR_PATH, prior_name))
nan_check = ~np.isnan(prior_data).any(axis=1)
prior_data = prior_data[nan_check, :]
dice_threshold = 0.8
failed_check = prior_data[:, 2] > dice_threshold
prior_data = prior_data[failed_check, :]
K = 10
top_orders = np.argsort(prior_data[:, 2])
top_orders = np.flipud(top_orders)
top_orders = top_orders[:K]
prior_data = prior_data[top_orders, :]
prior_name = prior_name.split('.npy')[0]
patient_id, view_type_1, view_type_2, item_id = prior_name.split('-')
view_type = view_type_1 + '-' + view_type_2
temp_list = []
for idx, mask_item in enumerate(["CCA", "thyroid", "trachea", "RLN"]):
msk = Image.open(os.path.join(GT_PATH, patient_id, view_type, item_id, "MASK", "{}.jpg".format(mask_item)))
msk = mask_transform(msk)
msk = np.array(msk) / 255 * (idx + 1)
temp_list.append(msk)
gt_msk_arr = np.stack(temp_list, axis=0)
gt_msk_arr = np.max(gt_msk_arr, axis=0) * 50
gt_msk_arr = gt_msk_arr.astype(np.int32)
gt_rln_msk = gt_msk_arr == 200
gt_rln_msk = gt_rln_msk.astype(np.int32)
gt_rln_coord = list(center_of_mass(gt_rln_msk))
mean_rln_coord = np.mean(prior_data, axis=0)
weight = np.exp(prior_data[:, 2])
weight /= weight.sum()
weighted_mean_rln_coord = np.sum(prior_data[:, :2] * weight[:, np.newaxis], axis=0)
dist_mean_gt = np.sqrt(np.sum((mean_rln_coord[:2] - gt_rln_coord) ** 2))
dist_weighted_mean_gt = np.sqrt(np.sum((weighted_mean_rln_coord - gt_rln_coord) ** 2))
plt.figure()
plt.imshow(gt_msk_arr)
for temp in prior_data:
plt.scatter(x=temp[1], y=temp[0], s=1, alpha=0.1, c="red")
plt.scatter(x=gt_rln_coord[1], y=gt_rln_coord[0], s=5, c='blue', label='GT')
plt.scatter(x=mean_rln_coord[1], y=mean_rln_coord[0], s=5, c='whitesmoke', label='Mean')
plt.scatter(x=weighted_mean_rln_coord[1], y=weighted_mean_rln_coord[0], s=5, c='cyan', label='Weighted Mean')
plt.legend()
plt.savefig(os.path.join(SAVE_PATH, '{}-{}-{}.png'.format(patient_id, view_type, item_id)))
plt.close()
print(prior_name, dist_mean_gt, dist_weighted_mean_gt, gt_rln_coord, mean_rln_coord, weighted_mean_rln_coord)
error_mean += dist_mean_gt
error_weighted_mean += dist_weighted_mean_gt
if np.isnan(np.sum(dist_mean_gt)):
error_arr.append(dist_mean_gt)
else:
error_arr.append(dist_mean_gt)
count += 1
save_list = {'Mean': mean_rln_coord,
'Weighted_Mean': weighted_mean_rln_coord,
'K': K,
'Dice_Threshold': dice_threshold}
np.save(os.path.join(SAVE_PATH, '{}-{}-{}.npy'.format(patient_id, view_type, item_id)), save_list)
print(np.mean(error_arr), np.std(error_arr))
print(np.mean(np.array(error_arr) < 15))
print("Avg", error_mean/count, error_weighted_mean/count)
| 3,704 | 35.683168 | 119 | py |
RLNLocalization | RLNLocalization-main/models/Regress/model.py | import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from utils import tensor2array
from medpy.metric import dc
class conv_block(nn.Module):
"""
Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True))
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
"""
Up Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.up(x)
return x
class Locator(nn.Module):
"""
UNet - Basic Implementation
Paper : https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_ch, feat_n, loss_weight):
super(Locator, self).__init__()
filters = [feat_n, feat_n * 2, feat_n * 4, feat_n * 8, feat_n * 16]
self.loss_weight = loss_weight
self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(in_ch, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2], filters[3])
self.Conv5 = conv_block(filters[3], filters[4])
self.ada_pool = nn.AdaptiveAvgPool2d(output_size=1)
self.fc = nn.Sequential(
nn.Linear(1024, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 2)
)
def forward(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
flat_e5 = self.ada_pool(e5).view(x.size(0), -1)
out = self.fc(flat_e5)
return out
def evaluate(self, x, y):
out = self.forward(x)
out_arr = tensor2array(out, True)
out_arr = np.clip(out_arr, a_min=0, a_max=64)
y_arr = tensor2array(y, True)
dist = np.linalg.norm(out_arr - y_arr)
return dist
def loss_function(self, x, y):
out = self.forward(x)
regression = F.smooth_l1_loss(out, y)
total = regression * self.loss_weight["Regression"]
losses = {
'Total': total.item(),
'Regression': regression.item()
}
return total, losses
class MCLocator(nn.Module):
"""
UNet - Basic Implementation
Paper : https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_ch, feat_n, loss_weight):
super(MCLocator, self).__init__()
filters = [feat_n, feat_n * 2, feat_n * 4, feat_n * 8, feat_n * 16]
self.loss_weight = loss_weight
self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(in_ch, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2]*2, filters[3])
self.Conv5 = nn.Sequential(
conv_block(filters[3], filters[2]),
conv_block(filters[2], filters[2]),
)
self.ada_pool = nn.AdaptiveAvgPool2d(output_size=6)
self.fc = nn.Sequential(
nn.Linear(9216, 512),
nn.ReLU(inplace=True),
nn.Linear(512, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 2)
)
def forward(self, x):
s, l = x
s_f = self.feature_extract(s)
l_f = self.feature_extract(l)
l_f = self.ada_pool(l_f)
f = torch.cat((l_f, s_f), dim=1)
f = self.Conv4(f)
f = self.Conv5(f)
flat_f = torch.flatten(f, start_dim=1)
out = self.fc(flat_f)
return out
def feature_extract(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
return e3
def evaluate(self, x, y):
out = self.forward(x)
out_arr = tensor2array(out, True)
out_arr = np.clip(out_arr, a_min=0, a_max=24)
y_arr = tensor2array(y, True)
dist = np.linalg.norm(out_arr - y_arr)
return dist
def loss_function(self, x, y):
out = self.forward(x)
regression = F.smooth_l1_loss(out, y)
total = regression * self.loss_weight["Regression"]
losses = {
'Total': total.item(),
'Regression': regression.item()
}
return total, losses
| 5,554 | 26.775 | 85 | py |
RLNLocalization | RLNLocalization-main/models/AutoEncoder/model.py | import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from utils import tensor2array
from medpy.metric import dc
class conv_block(nn.Module):
"""
Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True))
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
"""
Up Convolution Block
"""
def __init__(self, in_ch, out_ch):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
nn.InstanceNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.up(x)
return x
class U_Net(nn.Module):
"""
UNet - Basic Implementation
Paper : https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_ch, out_ch, feat_n, loss_weight):
super(U_Net, self).__init__()
filters = [feat_n, feat_n * 2, feat_n * 4, feat_n * 8, feat_n * 16]
self.loss_weight = loss_weight
self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(in_ch, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2], filters[3])
self.Conv5 = conv_block(filters[3], filters[4])
self.Up5 = up_conv(filters[4], filters[3])
self.Up_conv5 = conv_block(filters[4], filters[3])
self.Up4 = up_conv(filters[3], filters[2])
self.Up_conv4 = conv_block(filters[3], filters[2])
self.Up3 = up_conv(filters[2], filters[1])
self.Up_conv3 = conv_block(filters[2], filters[1])
self.Up2 = up_conv(filters[1], filters[0])
self.Up_conv2 = conv_block(filters[1], filters[0])
self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)
def forward(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
d5 = self.Up5(e5)
d5 = torch.cat((e4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((e3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((e2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((e1, d2), dim=1)
d2 = self.Up_conv2(d2)
out = self.Conv(d2)
# d1 = self.active(out)
return out
def evaluate(self, x, y):
out = self.forward(x)
out_arr = tensor2array(out, True)
out_arr = np.argmax(out_arr, axis=0)
y_arr = tensor2array(y, True)
dice = dc(out_arr, y_arr)
return dice
def loss_function(self, x, y):
out = self.forward(x)
cross_entropy = F.cross_entropy(out, y)
total = cross_entropy * self.loss_weight["CrossEntropy"]
losses = {
'Total': total.item(),
'CrossEntropy': cross_entropy.item()
}
return total, losses
| 3,901 | 27.071942 | 85 | py |
RLNLocalization | RLNLocalization-main/op/data_op.py | import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.ndimage import center_of_mass
from random import uniform
from torch.utils.data import Dataset
from torchvision import transforms as T
from torchvision.transforms.functional import crop, to_tensor
def load_list(text_path, view):
def _read_text(path):
data = open(path, 'r').readlines()
data = [item.replace('\n', '') for item in data]
return data
train_list, val_list, test_list = [], [], []
if 'left' in view:
train_list += _read_text(os.path.join(text_path, "train_L-RLN.txt"))
val_list += _read_text(os.path.join(text_path, "val_L-RLN.txt"))
test_list += _read_text(os.path.join(text_path, "test_L-RLN.txt"))
if 'right' in view:
train_list += _read_text(os.path.join(text_path, "train_R-RLN.txt"))
val_list += _read_text(os.path.join(text_path, "val_R-RLN.txt"))
test_list += _read_text(os.path.join(text_path, "test_R-RLN.txt"))
return train_list, val_list, test_list
class RLNDataset(Dataset):
def __init__(self, data_list):
self.data_list = data_list
self.img_transform = T.Compose([
T.Resize((256, 256), Image.BILINEAR),
T.ToTensor(),
])
self.mask_transform = T.Compose([
T.Resize((256, 256), Image.NEAREST),
])
def __getitem__(self, item):
item_path = self.data_list[item].replace('\\', '/')
img_name = os.listdir(os.path.join(item_path, "IMG"))[0]
img_path = os.path.join(item_path, "IMG", img_name)
item_name = '{}-{}-{}'.format(*item_path.split('/')[3:])
img = Image.open(img_path).convert('L')
img_tensor = self.img_transform(img)
msk_list = []
for idx, mask_item in enumerate(["CCA", "thyroid", "trachea"]):
msk = Image.open(os.path.join(item_path, "MASK", "{}.jpg".format(mask_item)))
msk = self.mask_transform(msk)
msk = np.array(msk) / 255 * (idx+1)
msk_list.append(torch.from_numpy(msk).long())
msk_tensor = torch.stack(msk_list, dim=0)
msk_tensor = torch.max(msk_tensor, dim=0)[0]
return img_tensor, msk_tensor, item_name
def __len__(self):
return len(self.data_list)
class RLNRefineDataset(Dataset):
def __init__(self, data_list):
self.data_list = data_list
self.img_transform = T.Compose([
T.Resize((256, 256), Image.BILINEAR),
# T.ToTensor(),
])
self.mask_transform = T.Compose([
T.Resize((256, 256), Image.NEAREST),
])
def __getitem__(self, item):
item_path = self.data_list[item].replace('\\', '/')
img_name = os.listdir(os.path.join(item_path, "IMG"))[0]
img_path = os.path.join(item_path, "IMG", img_name)
item_name = '{}-{}-{}'.format(*item_path.split('/')[3:])
img = Image.open(img_path).convert('L')
img = self.img_transform(img)
rln_path = os.path.join(item_path, "MASK", "RLN.jpg")
rln_msk = Image.open(rln_path).convert('L')
rln_msk = self.mask_transform(rln_msk)
rln_msk_arr = np.array(rln_msk)
rln_coord = list(center_of_mass(rln_msk_arr))
# ==================== Single Crop =================================== #
# top, left = rln_coord[0] - 32, rln_coord[1] - 32 # patch size is 64
#
# cropped_center_h, cropped_center_w = 32, 32
# height, width = 64, 64
#
# random_shift_h, random_shift_w = uniform(-15, 15), uniform(-15, 15)
#
# top += random_shift_h
# left += random_shift_w
#
# cropped_center_h -= random_shift_h
# cropped_center_w -= random_shift_w
#
# cropped_img = crop(img, top=top, left=left, height=height, width=width)
# ==================== Single Crop =================================== #
# ==================== Multi Crop =================================== #
cropped_center_h, cropped_center_w = 32, 32
random_shift_h, random_shift_w = uniform(-20, 20), uniform(-20, 20)
cropped_center_h -= random_shift_h
cropped_center_w -= random_shift_w
cropped_img_s = crop(
img,
top=rln_coord[0] + random_shift_h - 12,
left=rln_coord[1] + random_shift_w - 12,
height=24, width=24
)
# cropped_img_m = crop(
# img,
# top=rln_coord[0] + random_shift_h - 16,
# left=rln_coord[1] + random_shift_w - 16,
# height=32, width=32
# )
cropped_img_l = crop(
img,
top=rln_coord[0] + random_shift_h - 32,
left=rln_coord[1] + random_shift_w - 32,
height=64, width=64
)
# ==================== Multi Crop =================================== #
# plt.figure()
# plt.subplot(1, 2, 1)
# plt.imshow(cropped_img, cmap='gray')
# plt.scatter(x=cropped_center_w, y=cropped_center_h, s=5, c='cyan')
# plt.subplot(1, 2, 2)
# plt.imshow(img, cmap='gray')
# plt.scatter(x=rln_coord[1], y=rln_coord[0], s=5, c='cyan')
# plt.show()
# img_tensor = to_tensor(img.convert('RGB'))
cropped_img_s_tensor = to_tensor(cropped_img_s)
# cropped_img_m_tensor = to_tensor(cropped_img_m)
cropped_img_l_tensor = to_tensor(cropped_img_l)
center_coord = torch.from_numpy(np.array([cropped_center_h, cropped_center_w])).float()
return [cropped_img_s_tensor, cropped_img_l_tensor], center_coord, item_name
def __len__(self):
return len(self.data_list)
class RLNRriorDataset(Dataset):
def __init__(self, data_list):
self.data_list = data_list
self.img_transform = T.Compose([
T.Resize((256, 256), Image.BILINEAR),
# T.ToTensor(),
])
self.mask_transform = T.Compose([
T.Resize((256, 256), Image.NEAREST),
])
def __getitem__(self, item):
item_path = self.data_list[item].replace('\\', '/')
img_name = os.listdir(os.path.join(item_path, "IMG"))[0]
img_path = os.path.join(item_path, "IMG", img_name)
item_name = '{}-{}-{}'.format(*item_path.split('/')[3:])
prior_result_path = os.path.join('Prior_Results', '{}-{}-{}.npy'.format(*item_path.split('/')[3:]))
prior_result = np.load(prior_result_path, allow_pickle=True).item()
init_coord = prior_result['Weighted_Mean']
img = Image.open(img_path).convert('L')
img = self.img_transform(img)
rln_path = os.path.join(item_path, "MASK", "RLN.jpg")
rln_msk = Image.open(rln_path).convert('L')
rln_msk = self.mask_transform(rln_msk)
rln_msk_arr = np.array(rln_msk)
rln_coord = list(center_of_mass(rln_msk_arr))
# ==================== Single Crop =================================== #
# top, left = init_coord[0] - 32, init_coord[1] - 32 # patch size is 64
# height, width = 64, 64
# cropped_rln_coord = [rln_coord[0] - top, rln_coord[1] - left]
# cropped_img = crop(img, top=top, left=left, height=height, width=width)
# ==================== Single Crop =================================== #
# ==================== Multi Crop =================================== #
top, left = init_coord[0] - 32, init_coord[1] - 32
cropped_rln_coord = [rln_coord[0] - top, rln_coord[1] - left]
cropped_img_s = crop(
img,
top=init_coord[0] - 12,
left=init_coord[1] - 12,
height=24, width=24
)
# cropped_img_m = crop(
# img,
# top=init_coord[0] - 16,
# left=init_coord[1] - 16,
# height=32, width=32
# )
cropped_img_l = crop(
img,
top=init_coord[0] - 32,
left=init_coord[1] - 32,
height=64, width=64
)
# ==================== Multi Crop =================================== #
# plt.figure()
# plt.subplot(1, 2, 1)
# plt.imshow(cropped_img, cmap='gray')
# plt.scatter(x=cropped_rln_coord[1], y=cropped_rln_coord[0], s=5, c='cyan')
# plt.scatter(x=32, y=32, s=5, c='red')
# plt.subplot(1, 2, 2)
# plt.imshow(img, cmap='gray')
# plt.scatter(x=rln_coord[1], y=rln_coord[0], s=5, c='cyan')
# plt.show()
img_tensor = to_tensor(img.convert('RGB'))
# cropped_img_tensor = to_tensor(cropped_img)
cropped_img_s_tensor = to_tensor(cropped_img_s)
# cropped_img_m_tensor = to_tensor(cropped_img_m)
cropped_img_l_tensor = to_tensor(cropped_img_l)
center_coord = torch.from_numpy(np.array(cropped_rln_coord))
move_coord = torch.from_numpy(np.array([top, left]))
return img_tensor, [cropped_img_s_tensor, cropped_img_l_tensor], center_coord, move_coord, item_name
def __len__(self):
return len(self.data_list) | 9,157 | 36.076923 | 108 | py |
RLNLocalization | RLNLocalization-main/op/run_op.py | import os
import torch
from PIL import Image
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from op.data_op import RLNDataset, RLNRefineDataset, RLNRriorDataset
from utils import Recorder, set_device, tensor2array
import os
import torch
import numpy as np
from medpy.metric import dc
from datetime import datetime
from op.model_op import load_model
from op.data_op import load_list
from utils import check_dir, Plotter
class Baser(object):
def __init__(self, args):
self.log_path = args.log_path
self.ckpt_path = args.ckpt_path
self.save_path = args.save_path
self.view = args.view
check_dir(self.log_path)
check_dir(self.ckpt_path)
check_dir(self.save_path)
self.train_list, self.val_list, self.test_list = load_list(args.text_path, args.view)
self.device = set_device(args.gpu_id)
self.model = load_model(args).to(self.device)
if args.init_path:
checkpoint = torch.load(args.init_path, map_location=self.device)
print("Load from", args.init_path)
self.model.load_state_dict(checkpoint["model"])
now = datetime.now()
self.log = os.path.join(self.log_path, now.strftime("%m-%d-%Y-%H-%M-%S") + ".txt")
open(self.log, "w+").close()
self.plotter = Plotter(self.log_path)
class Trainer(Baser):
def __init__(self, args):
super(Trainer, self).__init__(args)
train_set = RLNDataset(
self.train_list)
val_set = RLNDataset(
self.val_list)
self.train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=0)
self.val_loader = DataLoader(val_set, batch_size=1, shuffle=False)
self.optimizer = torch.optim.Adam(self.model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
self.recorder = Recorder(["Total", "CrossEntropy"])
self.epoch_count = args.start_epoch
self.args = args
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 1, gamma=args.gamma)
if args.reuse:
self.load_weight(args.reuse)
def load_weight(self, reuse):
if isinstance(reuse, int) or reuse == "best":
weight_path = os.path.join(self.ckpt_path, "ckpt-{}.pth".format(reuse))
elif isinstance(reuse, str):
weight_path = reuse
else:
raise NameError
checkpoint = torch.load(weight_path, map_location=self.device)
self.model.load_state_dict(checkpoint["model"])
self.optimizer.load_state_dict(checkpoint["optimizer"])
print("Load weight with {}".format(weight_path))
def val(self):
print("Evaluating...")
self.model.eval()
running_metric = 0.0
count = 0
for idx, (x, y, subj_id) in enumerate(self.val_loader):
with torch.no_grad():
metric = self.model.evaluate(x.to(self.device), y.to(self.device))
running_metric += metric
count += 1
running_metric /= count
self.plotter.update({"val_metric": running_metric})
self.plotter.send()
return running_metric
def train(self):
self.model.train()
self.recorder.reset()
for idx, (x, y, subj_id) in enumerate(self.train_loader):
loss, loss_info = self.model.loss_function(x.to(self.device), y.to(self.device))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.recorder.update({
"Total": loss_info["Total"],
"CrossEntropy": loss_info["CrossEntropy"],
})
if idx % self.args.print_freq == 0 and idx != 0:
info = "Epoch {} Batch {} Total Loss: {:.4f} CrossEntropy Loss: {:.4f} ".format(
self.epoch_count, idx,
self.recorder.call("Total"), self.recorder.call("CrossEntropy"))
print(info)
open(self.log, "a+").write(info + "\n")
self.plotter.update({
"TotalLoss": self.recorder.call("Total"),
"CrossEntropyLoss": self.recorder.call("CrossEntropy"),
})
self.plotter.send()
self.scheduler.step()
return self.recorder.call("Total")
def update_count(self, count_num=1):
self.epoch_count += count_num
def save_weight(self, attr):
weight_dict = dict()
weight_dict["model"] = self.model.state_dict()
weight_dict["optimizer"] = self.optimizer.state_dict()
torch.save(weight_dict, os.path.join(self.ckpt_path, "ckpt-{}.pth".format(attr)))
print("Saving model to {}".format(os.path.join(self.ckpt_path, "ckpt-{}.pth".format(attr))))
class Tester(Baser):
def __init__(self, args):
super(Tester, self).__init__(args)
test_set = RLNDataset(self.test_list)
self.test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
self.results_path = args.results_path
check_dir(self.results_path)
self.args = args
checkpoint = torch.load(args.weight_path, map_location=self.device)
self.model.load_state_dict(checkpoint["model"])
print("Load weight with {}".format(args.weight_path))
def test(self):
print("Evaluating...")
self.model.eval()
metric_arr = np.zeros((3, len(self.test_loader)), dtype=np.float64)
for idx, (x, y, subj_id) in enumerate(self.test_loader):
with torch.no_grad():
out = self.model(x.to(self.device))
img = tensor2array(x, True) * 255
out = tensor2array(out, True)
out = np.argmax(out, axis=0).astype(np.int32)
pil_out = Image.fromarray(out * 50).convert('L')
pil_out.save(os.path.join(self.results_path, "{}.png".format(subj_id[0])))
y = tensor2array(y, True)
metric_arr[:, idx] = np.array([
dc(out == 1, y == 1),
dc(out == 2, y == 2),
dc(out == 3, y == 3)
])
plt.figure()
plt.subplot(1, 3, 1)
plt.imshow(img, cmap="gray")
plt.title("Img")
plt.xticks([])
plt.yticks([])
plt.subplot(1, 3, 2)
plt.imshow(out)
plt.title("Out")
plt.xticks([])
plt.yticks([])
plt.subplot(1, 3, 3)
plt.imshow(y)
plt.title("Msk")
plt.xticks([])
plt.yticks([])
# plt.show()
plt.savefig(os.path.join(self.save_path, "{}.png".format(subj_id[0])))
plt.close()
print(subj_id, metric_arr[:, idx])
avg_metric = np.mean(metric_arr, axis=-1)
print("CCA\tthyroid\ttrachea")
print("{:.3f}\t{:.3f}\t{:.3f}".format(*avg_metric.tolist()))
class Refiner(Baser):
def __init__(self, args):
super(Refiner, self).__init__(args)
train_set = RLNRefineDataset(
self.train_list)
val_set = RLNRefineDataset(
self.val_list)
self.train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=0)
self.val_loader = DataLoader(val_set, batch_size=1, shuffle=False)
self.optimizer = torch.optim.Adam(self.model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
self.recorder = Recorder(["Total", "Regression"])
self.epoch_count = args.start_epoch
self.args = args
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 1, gamma=args.gamma) # todo useless
if args.reuse:
self.load_weight(args.reuse)
def load_weight(self, reuse):
if isinstance(reuse, int) or reuse == "best":
weight_path = os.path.join(self.ckpt_path, "ckpt-{}.pth".format(reuse))
elif isinstance(reuse, str):
weight_path = reuse
else:
raise NameError
checkpoint = torch.load(weight_path, map_location=self.device)
self.model.load_state_dict(checkpoint["model"])
self.optimizer.load_state_dict(checkpoint["optimizer"])
print("Load weight with {}".format(weight_path))
def val(self):
print("Evaluating...")
self.model.eval()
running_metric = 0.0
count = 0
for idx, (xs, y, subj_id) in enumerate(self.val_loader):
with torch.no_grad():
metric = self.model.evaluate([x.to(self.device) for x in xs], y.to(self.device))
running_metric += metric
count += 1
running_metric /= count
self.plotter.update({"val_metric": running_metric})
self.plotter.send()
return running_metric
def train(self):
self.model.train()
self.recorder.reset()
for idx, (xs, y, subj_id) in enumerate(self.train_loader):
loss, loss_info = self.model.loss_function([x.to(self.device) for x in xs], y.to(self.device))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.recorder.update({
"Total": loss_info["Total"],
"Regression": loss_info["Regression"],
})
if idx % self.args.print_freq == 0 and idx != 0:
info = "Epoch {} Batch {} Total Loss: {:.4f} Regression Loss: {:.4f} ".format(
self.epoch_count, idx,
self.recorder.call("Total"), self.recorder.call("Regression"))
print(info)
open(self.log, "a+").write(info + "\n")
self.plotter.update({
"TotalLoss": self.recorder.call("Total"),
"RegressionLoss": self.recorder.call("Regression"),
})
self.plotter.send()
self.scheduler.step()
return self.recorder.call("Total")
def update_count(self, count_num=1):
self.epoch_count += count_num
def save_weight(self, attr):
weight_dict = dict()
weight_dict["model"] = self.model.state_dict()
weight_dict["optimizer"] = self.optimizer.state_dict()
torch.save(weight_dict, os.path.join(self.ckpt_path, "ckpt-{}.pth".format(attr)))
print("Saving model to {}".format(os.path.join(self.ckpt_path, "ckpt-{}.pth".format(attr))))
class RefineTester(Baser):
def __init__(self, args):
super(RefineTester, self).__init__(args)
test_set = RLNRriorDataset(self.test_list)
self.test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
self.results_path = args.results_path
check_dir(self.results_path)
self.args = args
checkpoint = torch.load(args.weight_path, map_location=self.device)
self.model.load_state_dict(checkpoint["model"])
print("Load weight with {}".format(args.weight_path))
def test(self):
print("Evaluating...")
self.model.eval()
metric_arr = np.zeros((1, len(self.test_loader)), dtype=np.float64)
for idx, (img, xs, y, move, subj_id) in enumerate(self.test_loader):
with torch.no_grad():
out = self.model([x.to(self.device) for x in xs])
img = tensor2array(img, True)
out = tensor2array(out, True)
out = np.clip(out, a_min=0, a_max=64)
y = tensor2array(y, True)
move = tensor2array(move, True)
y += move
out += move
metric_arr[:, idx] = np.linalg.norm(y - out)
# plt.figure()
# plt.imshow(img, cmap="gray")
# plt.scatter(x=out[1], y=out[0], s=5, c='cyan', label='out')
# plt.scatter(x=y[1], y=y[0], s=5, c='red', label='y')
# plt.title("Cropped Img")
# plt.legend()
# plt.text(55, 15, '{:.3f}'.format(np.linalg.norm(y - out)))
# # plt.show()
# plt.savefig(os.path.join(self.save_path, "{}.png".format(subj_id[0])))
# plt.close()
# print(subj_id, metric_arr[:, idx])
plt.figure()
plt.imshow(img[0], cmap="gray")
plt.scatter(x=out[1], y=out[0], s=30, c='cyan', label='prediction')
plt.scatter(x=y[1], y=y[0], s=30, c='red', label='ground truth')
# plt.title("Cropped Img")
plt.legend(fontsize=18)
plt.text(55, 15, '{:.3f}'.format(np.linalg.norm(y - out)))
plt.axis('off')
# plt.show()
plt.savefig(os.path.join(self.save_path, "{}.png".format(subj_id[0])), dpi=300, bbox_inches='tight',
pad_inches=0.0)
plt.close()
print(subj_id, metric_arr[:, idx])
avg_metric = np.mean(metric_arr, axis=-1)
std_metric = np.std(metric_arr, axis=-1)
print("L2 Dist")
print("{:.4f}-{:.4f}".format(float(avg_metric), float(std_metric)))
print("Hit 15")
print("{:.3f}".format(np.mean(metric_arr < 15)))
| 13,171 | 34.6 | 112 | py |
Wasserstein2Barycenters | Wasserstein2Barycenters-main/src/distributions.py | import torch
import numpy as np
from scipy.linalg import sqrtm
import sklearn.datasets
import random
def symmetrize(X):
return np.real((X + X.T) / 2)
class Sampler:
def __init__(
self, device='cuda',
requires_grad=False,
):
self.device = device
self.requires_grad = requires_grad
def sample(self, batch_size=5):
pass
def _estimate_mean(self, num_samples=100000):
batch = self.sample(num_samples).cpu().detach().numpy()
self.mean = batch.mean(axis=0).astype(np.float32)
def _estimate_cov(self, num_samples=100000):
batch = self.sample(num_samples).cpu().detach().numpy()
self.cov = np.cov(batch.T).astype(np.float32)
self.var = np.trace(self.cov)
class StandardNormalSampler(Sampler):
def __init__(
self, dim=2, device='cuda',
requires_grad=False
):
super(StandardNormalSampler, self).__init__(device, requires_grad)
self.dim = dim
self.mean = np.zeros(self.dim, dtype=np.float32)
self.cov = np.eye(self.dim, dtype=np.float32)
self.var = self.dim
def sample(self, batch_size=10):
return torch.randn(
batch_size, self.dim,
device=self.device,
requires_grad=self.requires_grad
)
class NormalSampler(Sampler):
def __init__(
self, mean, cov=None, weight=None, device='cuda',
requires_grad=False
):
super(NormalSampler, self).__init__(device=device, requires_grad=requires_grad)
self.mean = np.array(mean, dtype=np.float32)
self.dim = self.mean.shape[0]
if weight is not None:
weight = np.array(weight, dtype=np.float32)
if cov is not None:
self.cov = np.array(cov, dtype=np.float32)
elif weight is not None:
self.cov = weight @ weight.T
else:
self.cov = np.eye(self.dim, dtype=np.float32)
if weight is None:
weight = symmetrize(sqrtm(self.cov))
self.var = np.trace(self.cov)
self.weight = torch.tensor(weight, device=self.device, dtype=torch.float32)
self.bias = torch.tensor(self.mean, device=self.device, dtype=torch.float32)
def sample(self, batch_size=4):
batch = torch.randn(batch_size, self.dim, device=self.device)
with torch.no_grad():
batch = batch @ self.weight.T
if self.bias is not None:
batch += self.bias
batch.requires_grad_(self.requires_grad)
return batch
class CubeUniformSampler(Sampler):
def __init__(
self, dim=1, centered=False, normalized=False, device='cuda',
requires_grad=False
):
super(CubeUniformSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = dim
self.centered = centered
self.normalized = normalized
self.var = self.dim if self.normalized else (self.dim / 12)
self.cov = np.eye(self.dim, dtype=np.float32) if self.normalized else np.eye(self.dim, dtype=np.float32) / 12
self.mean = np.zeros(self.dim, dtype=np.float32) if self.centered else .5 * np.ones(self.dim, dtype=np.float32)
self.bias = torch.tensor(self.mean, device=self.device)
def sample(self, batch_size=10):
return np.sqrt(self.var) * (torch.rand(
batch_size, self.dim, device=self.device,
requires_grad=self.requires_grad
) - .5) / np.sqrt(self.dim / 12) + self.bias
class BoxUniformSampler(Sampler):
# A uniform box with axes components and the range on each
# axis i is [a_min[i], a_max[i]].
def __init__(
self, components, a_min, a_max, estimate_size=100000,
device='cuda', requires_grad=False
):
super(BoxUniformSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = components.shape[1]
self.components = torch.from_numpy(components).float().to(device=device)
self.a_min = torch.from_numpy(a_min).float().to(device=device)
self.a_max = torch.from_numpy(a_max).float().to(device=device)
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size):
with torch.no_grad():
batch = torch.rand(
batch_size, self.dim,
device=self.device
)
batch = (torch.unsqueeze(self.a_min, 0) +
batch * torch.unsqueeze(self.a_max - self.a_min, 0))
batch = torch.matmul(batch, self.components)
return torch.tensor(
batch, device=self.device,
requires_grad=self.requires_grad
)
class EmpiricalSampler(Sampler):
def __init__(
self, data, estimate_size=100000,
device='cuda', requires_grad=False
):
super(EmpiricalSampler, self).__init__(
device=device, requires_grad=requires_grad
)
# data is a np array NxD
self.dim = data.shape[1]
self.num_points = data.shape[0]
self.data = torch.from_numpy(data).float().to(device=device)
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size):
inds = torch.randperm(self.num_points)
if batch_size <= self.num_points:
inds = inds[:batch_size]
else:
additional_inds = torch.randint(0, self.num_points, (batch_size - self.num_points))
inds = torch.cat([inds, additional_inds], dim=0)
inds_repeated = torch.unsqueeze(inds, 1).repeat(1, self.dim)
batch = torch.gather(self.data, 0, inds_repeated.to(device=self.device))
return torch.tensor(
batch, device=self.device,
requires_grad=self.requires_grad
)
class TensorDatasetSampler(Sampler):
def __init__(
self, dataset, transform=None, storage='cpu', storage_dtype=torch.float,
device='cuda', requires_grad=False, estimate_size=100000,
):
super(TensorDatasetSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.storage = storage
if transform is not None:
self.transform = transform
else:
self.transform = lambda t: t
self.storage_dtype = storage_dtype
self.dataset = torch.tensor(
dataset, device=storage, dtype=storage_dtype, requires_grad=False
)
self.dim = self.sample(1).shape[1]
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
if batch_size:
ind = random.choices(range(len(self.dataset)), k=batch_size)
else:
ind = range(len(self.dataset))
with torch.no_grad():
batch = self.transform(torch.tensor(
self.dataset[ind], device=self.device,
dtype=torch.float32, requires_grad=False
))
if self.requires_grad:
batch.requires_grad_(True)
return batch
class BallCrustUniformSampler(Sampler):
def __init__(
self, dim=2, r_min=0.8, r_max=1.2, estimate_size=100000,
device='cuda', requires_grad=False
):
super(BallCrustUniformSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = dim
assert r_min >= 0
assert r_min < r_max
self.r_min, self.r_max = r_min, r_max
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
with torch.no_grad():
batch = torch.randn(
batch_size, self.dim,
device=self.device
)
batch /= torch.norm(batch, dim=1)[:, None]
ratio = (1 - (self.r_max - self.r_min) / self.r_max) ** self.dim
r = (torch.rand(
batch_size, device=self.device
) * (1 - ratio) + ratio) ** (1. / self.dim)
return torch.tensor(
(batch.transpose(0, 1) * r * self.r_max).transpose(0, 1),
device=self.device,
requires_grad=self.requires_grad
)
class MixN2GaussiansSampler(Sampler):
def __init__(self, n=5, std=1, step=9, device='cuda', estimate_size=100000,
requires_grad=False
):
super(MixN2GaussiansSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = 2
self.std, self.step = std, step
self.n = n
grid_1d = np.linspace(-(n-1) / 2., (n-1) / 2., n)
xx, yy = np.meshgrid(grid_1d, grid_1d)
centers = np.stack([xx, yy]).reshape(2, -1).T
self.centers = torch.tensor(
centers,
device=self.device,
dtype=torch.float32
)
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
batch = torch.randn(
batch_size, self.dim,
device=self.device
)
indices = random.choices(range(len(self.centers)), k=batch_size)
batch *= self.std
batch += self.step * self.centers[indices, :]
return torch.tensor(
batch, device=self.device,
requires_grad=self.requires_grad
)
class CubeCrustUniformSampler(Sampler):
def __init__(
self, dim=2, r_min=0.8, r_max=1.2, estimate_size=100000, device='cuda',
requires_grad=False
):
super(CubeCrustUniformSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = dim
assert r_min >= 0
assert r_min < r_max
self.r_min, self.r_max = r_min, r_max
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
with torch.no_grad():
batch = 2 * torch.rand(
batch_size, self.dim,
device=self.device
) - 1
axes = torch.randint(0, self.dim, size=(batch_size, 1), device=self.device)
batch.scatter_(
1, axes,
2 * ((batch.gather(1, axes) > 0)).type(torch.float32) - 1
)
ratio = (1 - (self.r_max - self.r_min) / self.r_max) ** self.dim
r = (torch.rand(
batch_size, device=self.device
) * (1 - ratio) + ratio) ** (1. / self.dim)
return torch.tensor(
(batch.transpose(0, 1) * self.r_max * r).transpose(0, 1),
device=self.device,
requires_grad=self.requires_grad
)
class SwissRollSampler(Sampler):
def __init__(
self, estimate_size=100000, device='cuda', requires_grad=False
):
super(SwissRollSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = 2
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
batch = sklearn.datasets.make_swiss_roll(
n_samples=batch_size,
noise=0.8
)[0].astype(np.float32)[:, [0, 2]] / 7.5
return torch.tensor(
batch, device=self.device,
requires_grad=self.requires_grad
)
class Mix8GaussiansSampler(Sampler):
def __init__(
self, with_central=False, std=1, r=12,
estimate_size=100000,
device='cuda', requires_grad=False
):
super(Mix8GaussiansSampler, self).__init__(
device=device, requires_grad=requires_grad
)
self.dim = 2
self.std, self.r = std, r
self.with_central = with_central
centers = [
(1, 0), (-1, 0), (0, 1), (0, -1),
(1. / np.sqrt(2), 1. / np.sqrt(2)),
(1. / np.sqrt(2), -1. / np.sqrt(2)),
(-1. / np.sqrt(2), 1. / np.sqrt(2)),
(-1. / np.sqrt(2), -1. / np.sqrt(2))
]
if self.with_central:
centers.append((0, 0))
self.centers = torch.tensor(
centers, device=self.device
)
self._estimate_mean(estimate_size)
self._estimate_cov(estimate_size)
def sample(self, batch_size=10):
with torch.no_grad():
batch = torch.randn(
batch_size, self.dim,
device=self.device
)
indices = random.choices(range(len(self.centers)), k=batch_size)
batch *= self.std
batch += self.r * self.centers[indices, :]
if self.requires_grad:
batch.requires_grad_(True)
return batch
class Transformer(object):
def __init__(
self, device='cuda',
requires_grad=False
):
self.device = device
self.requires_grad = requires_grad
class LinearTransformer(Transformer):
def __init__(
self, weight, bias=None, base_sampler=None,
device='cuda',
requires_grad=False
):
super(LinearTransformer, self).__init__(
device=device,
requires_grad=requires_grad
)
self.fitted = False
self.dim = weight.shape[0]
self.weight = torch.tensor(weight, device=device, dtype=torch.float32, requires_grad=False)
if bias is not None:
self.bias = torch.tensor(bias, device=device, dtype=torch.float32, requires_grad=False)
else:
self.bias = torch.zeros(self.dim, device=device, dtype=torch.float32, requires_grad=False)
if base_sampler is not None:
self.fit(base_sampler)
def fit(self, base_sampler):
self.base_sampler = base_sampler
weight, bias = self.weight.cpu().numpy(), self.bias.cpu().numpy()
self.mean = weight @ self.base_sampler.mean + bias
self.cov = weight @ self.base_sampler.cov @ weight.T
self.var = np.trace(self.cov)
self.fitted = True
return self
def sample(self, batch_size=4):
assert self.fitted == True
batch = torch.tensor(
self.base_sampler.sample(batch_size),
device=self.device, requires_grad=False
)
with torch.no_grad():
batch = batch @ self.weight.T
if self.bias is not None:
batch += self.bias
batch = batch.detach()
batch.requires_grad_(self.requires_grad)
return batch
class StandardNormalScaler(Transformer):
def __init__(
self, base_sampler=None, device='cuda', requires_grad=False
):
super(StandardNormalScaler, self).__init__(
device=device, requires_grad=requires_grad
)
if base_sampler is not None:
self.fit(base_sampler)
def fit(self, base_sampler, batch_size=1000):
self.base_sampler = base_sampler
self.dim = self.base_sampler.dim
self.bias = torch.tensor(
self.base_sampler.mean, device=self.device, dtype=torch.float32
)
weight = symmetrize(np.linalg.inv(sqrtm(self.base_sampler.cov)))
self.weight = torch.tensor(weight, device=self.device, dtype=torch.float32)
self.mean = np.zeros(self.dim, dtype=np.float32)
self.cov = weight @ self.base_sampler.cov @ weight.T
self.var = np.trace(self.cov)
return self
def sample(self, batch_size=10):
batch = torch.tensor(
self.base_sampler.sample(batch_size),
device=self.device, requires_grad=False
)
with torch.no_grad():
batch -= self.bias
batch @= self.weight
if self.requires_grad:
batch.requires_grad_(True)
return batch | 16,356 | 33.006237 | 119 | py |
Wasserstein2Barycenters | Wasserstein2Barycenters-main/src/benchmarks.py | import torch
import torch.nn as nn
import numpy as np
from scipy.stats import ortho_group
from scipy.linalg import sqrtm
from .tools import calculate_frechet_distance
from tqdm import tqdm_notebook as tqdm
from . import distributions
def symmetrize(X):
return np.real((X + X.T) / 2)
def get_barycenter_cov(covs, alphas, max_iter=1000, tol=1e-8, verbose=True):
# Iterative computation of barycenter's covariance
# matrix via fixed-point approach
bar_cov = np.eye(covs[0].shape[0], dtype=np.float32)
for iteration in tqdm(range(max_iter)) if verbose else range(max_iter):
bar_cov_old = bar_cov
root_bar_cov = symmetrize(sqrtm(bar_cov))
inv_root_bar_cov = symmetrize(np.linalg.inv(root_bar_cov))
# To remove cycle Batch sqrtm required (does it exist?)
inner_sum = 0.
for k in range(len(alphas)):
inner_sum += alphas[k] * symmetrize(sqrtm(root_bar_cov @ covs[k] @ root_bar_cov))
inner_sum = symmetrize(inner_sum)
inner_sum = inner_sum @ inner_sum
bar_cov = symmetrize(inv_root_bar_cov @ inner_sum @ inv_root_bar_cov)
if np.max(np.abs((bar_cov - bar_cov_old))) < tol:
break
return bar_cov
def get_linear_transport(mean1, cov1, mean2, cov2):
root_cov1 = symmetrize(sqrtm(cov1))
inv_root_cov1 = symmetrize(np.linalg.inv(root_cov1))
weight = inv_root_cov1 @ symmetrize(sqrtm(root_cov1 @ cov2 @ root_cov1)) @ inv_root_cov1
bias = mean2 - weight @ mean1
return weight, bias
class Benchmark:
pass
class Wasserstein2BarycenterBenchmark(Benchmark):
def __init__(
self, samplers, bar_sampler=None, alphas=None,
compute_gaussian=True, max_iter=1000, tol=1e-6,
device='cuda', requires_grad=False, verbose=False
):
super(Wasserstein2BarycenterBenchmark, self).__init__()
self.verbose = verbose
self.dim = samplers[0].dim
self.num = len(samplers)
if alphas is not None:
self.alphas = alphas
else:
self.alphas = np.ones(self.num, dtype=np.float32) / self.num
self.device = device
self.requires_grad = requires_grad
self.samplers = samplers
self.gauss_bar_sampler = None
self.bar_sampler = bar_sampler
self.bar_maps = None
self.bar_maps_inv = None
self.bar_cost = None
if compute_gaussian:
self._compute_gaussian_barycenter(max_iter=max_iter, tol=tol)
def _compute_gaussian_barycenter(self, max_iter=1000, tol=1e-6):
if self.verbose:
print(f'Computing Gaussian Barycenter Covariance, max_iter={max_iter}')
gauss_bar_cov = get_barycenter_cov(
[sampler.cov for sampler in self.samplers], self.alphas,
max_iter, tol, verbose=self.verbose
)
self.gauss_bar_sampler = distributions.NormalSampler(
np.zeros(self.dim, dtype=np.float32), cov=gauss_bar_cov,
device=self.device, requires_grad=self.requires_grad
)
if self.verbose:
print('Computing the Gaussian Barycenter Functional')
self.gauss_bar_cost = np.sum([self.alphas[n] * calculate_frechet_distance(
self.samplers[n].mean, self.samplers[n].cov,
self.gauss_bar_sampler.mean, self.gauss_bar_sampler.cov,
) for n in range(self.num)])
self.gauss_bar_maps_inv, self.gauss_bar_maps = [], []
for n in tqdm(range(self.num)) if self.verbose else range(self.num):
weight_inv, bias_inv = get_linear_transport(
self.gauss_bar_sampler.mean, self.gauss_bar_sampler.cov,
self.samplers[n].mean, self.samplers[n].cov,
)
map_inv = nn.Linear(self.dim, self.dim).to(self.device)
map_inv.weight.data = torch.tensor(weight_inv, device=self.device)
map_inv.bias.data = torch.tensor(bias_inv, device=self.device)
self.gauss_bar_maps_inv.append(map_inv)
weight, bias = get_linear_transport(
self.samplers[n].mean, self.samplers[n].cov,
self.gauss_bar_sampler.mean, self.gauss_bar_sampler.cov,
)
map_fwd = nn.Linear(self.dim, self.dim).to(self.device)
map_fwd.weight.data = torch.tensor(weight, device=self.device)
map_fwd.bias.data = torch.tensor(bias, device=self.device)
self.gauss_bar_maps.append(map_fwd)
class LocationScatterBenchmark(Wasserstein2BarycenterBenchmark):
def __init__(
self, sampler, means, covs, alphas=None,
compute_barycenter=True, max_iter=1000, tol=1e-6,
device='cuda', requires_grad=False, verbose=False
):
samplers = []
for mean, cov in zip(means, covs):
weight, bias = get_linear_transport(sampler.mean, sampler.cov, mean, cov)
samplers.append(
distributions.LinearTransformer(
weight, bias, requires_grad=requires_grad
).fit(sampler)
)
super(LocationScatterBenchmark, self).__init__(
samplers, alphas=alphas,
compute_gaussian=compute_barycenter, max_iter=max_iter, tol=tol,
device=device, requires_grad=requires_grad, verbose=verbose
)
if compute_barycenter:
self.bar_cost = self.gauss_bar_cost
self.bar_maps = self.gauss_bar_maps
self.bar_maps_inv = self.gauss_bar_maps_inv
weight, bias = get_linear_transport(
sampler.mean, sampler.cov,
self.gauss_bar_sampler.mean, self.gauss_bar_sampler.cov
)
self.bar_sampler = distributions.LinearTransformer(
weight, bias,
requires_grad=self.requires_grad,
device=self.device
).fit(sampler)
class EigenWarpBenchmark(LocationScatterBenchmark):
def __init__(
self, sampler, num=3, min_eig=0.5, max_eig=2., shift=0., alphas=None,
compute_barycenter=True, max_iter=1000, tol=1e-6,
device='cuda', requires_grad=False, verbose=False
):
self.num = num
self.dim = sampler.dim
self.min_eig, self.max_eig = min_eig, max_eig
self.shift = shift
self.verbose = verbose
means = self.shift * np.random.normal(size=(self.num, self.dim)).astype(np.float32)
covs = np.zeros((self.num, self.dim, self.dim), dtype=np.float32)
if self.verbose:
print('Generating Covariance Matrices')
for n in range(self.num):
rotation = ortho_group.rvs(self.dim)
weight = rotation @ np.diag(np.exp(np.linspace(np.log(min_eig), np.log(max_eig), self.dim)))
covs[n] = weight @ weight.T
super(EigenWarpBenchmark, self).__init__(
sampler, means, covs, alphas=alphas,
compute_barycenter=compute_barycenter, max_iter=max_iter, tol=tol,
device=device, requires_grad=requires_grad, verbose=verbose
)
# class RotatedGaussiansBenchmark(Wasserstein2BarycenterBenchmark):
# def __init__(
# self,
# dim=2, count=2, alphas=None,
# eig=(0.5, 2.), shift=3.,
# max_iter=1000, tol=1e-6,
# verbose=False,
# device='cuda',
# dtype=torch.float32,
# requires_grad=False
# ):
# super(RotatedGaussiansBenchmark, self).__init__(
# dim, count, alphas,
# device=device,
# requires_grad=requires_grad
# )
# self.eig = eig
# self.shift = shift
# self.verbose = verbose
# means = self.shift * np.random.normal(size=(self.count, self.dim)).astype(np.float32)
# means -= (means.T * self.alphas).sum(axis=1)
# transforms = np.zeros((self.count, self.dim, self.dim), dtype=np.float32)
# if self.verbose:
# print('Generating Covariance Matrices')
# for k in range(self.count):
# rotation = ortho_group.rvs(self.dim)
# transforms[k] = rotation @ np.diag(np.exp(np.linspace(np.log(eig[0]), np.log(eig[1]), self.dim)))
# if self.verbose:
# print('Initializing samplers')
# self.samplers = [
# distributions.NormalSampler(
# means[k], weight=transforms[k],
# device=self.device, requires_grad=self.requires_grad
# ) for k in range(count)
# ]
# self._compute_barycenter(max_iter, tol)
# def _compute_barycenter(self, max_iter=1000, tol=1e-6):
# if self.verbose:
# print(f'Computing Barycenter Covariance, max_iter={max_iter}')
# bar_cov = get_barycenter_cov(
# [sampler.cov for sampler in self.samplers], self.alphas,
# max_iter, tol, verbose=self.verbose
# )
# self.bar_sampler = distributions.NormalSampler(
# np.zeros(self.dim, dtype=np.float32), cov=bar_cov,
# device=self.device, requires_grad=self.requires_grad
# )
# if self.verbose:
# print('Computing inverse and forward maps to barycenter')
# self.bar_maps_inv, self.bar_maps = [], []
# self.bar_cost = 0.
# for k in tqdm(range(self.count)) if self.verbose else range(self.count):
# weight, bias, weight_inv, bias_inv = get_linear_transport(
# self.samplers[k].mean, self.samplers[k].cov,
# self.bar_sampler.mean, self.bar_sampler.cov,
# )
# map_inv = nn.Linear(self.dim, self.dim).to(self.device)
# map_inv.weight.data = torch.tensor(weight_inv, device=self.device)
# map_inv.bias.data = torch.tensor(bias_inv, device=self.device)
# self.bar_maps_inv.append(map_inv)
# map_fwd = nn.Linear(self.dim, self.dim).to(self.device)
# map_fwd.weight.data = torch.tensor(weight, device=self.device)
# map_fwd.bias.data = torch.tensor(bias, device=self.device)
# self.bar_maps.append(map_fwd)
# self.bar_cost += self.alphas[k] * calculate_frechet_distance(
# self.samplers[k].mean, self.samplers[k].cov,
# self.bar_sampler.mean, self.bar_sampler.cov,
# ) | 10,558 | 40.735178 | 111 | py |
Wasserstein2Barycenters | Wasserstein2Barycenters-main/src/plotters.py | import numpy as np
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import torch
import gc
def plot_rgb_cloud(cloud, ax):
colors = np.clip(cloud, 0, 1)
ax.scatter(cloud[:, 0], cloud[:, 1], cloud[:, 2], c=colors)
ax.set_xlabel('Red'); ax.set_ylabel('Green'); ax.set_zlabel('Blue');
ax.set_xlim(0, 1); ax.set_ylim(0, 1); ax.set_zlim(0, 1)
def plot_training_phase(
benchmark, pca, D_list, D_conj_list,
G=None, Z_sampler=None,
plot_batchsize=250, partsize=(5, 5), dpi=200
):
plot_G = True if ((G is not None) and (Z_sampler is not None)) else False
plot_B = True if (benchmark.bar_sampler is not None) or (benchmark.gauss_bar_sampler is not None) else False
fig, axes = plt.subplots(
3, benchmark.num + 2,
figsize=(partsize[0] * (benchmark.num+2), 3 * partsize[1]),
sharex=True, sharey=True, dpi=dpi
)
# Original distributions, pushed and inverse pushed from G(Z)
if plot_G:
Z = Z_sampler.sample(plot_batchsize).detach()
Y = G(Z).detach()
Y.requires_grad_(True)
Y_pca = pca.transform(Y.cpu().detach().numpy())
axes[1,-2].scatter(Y_pca[:, 0], Y_pca[:, 1], edgecolors='black', color='gold')
axes[1,-2].set_title(f'Generated Barycenter', fontsize=12)
Y_push_sum = 0.
for n in range(benchmark.num):
X = benchmark.samplers[n].sample(plot_batchsize)
X_pca = pca.transform(X.cpu().detach().numpy())
X_push_pca = pca.transform(D_list[n].push(X).cpu().detach().numpy())
axes[0, n].scatter(X_pca[:, 0], X_pca[:, 1], edgecolors='black')
axes[0, n].set_title(f'Initial distribution {n}', fontsize=12)
axes[1, n].scatter(X_push_pca[:, 0], X_push_pca[:, 1], edgecolors='black', color='orange')
axes[1, n].set_title(f'Pushed distribution {n}', fontsize=12)
if plot_G:
Y_push = D_conj_list[n].push(Y).detach()
Y_push_pca = pca.transform(Y_push.cpu().detach().numpy())
with torch.no_grad():
Y_push_sum += benchmark.alphas[n] * Y_push
axes[2, n].set_title(f'Inverse pushed {n} from generated', fontsize=12)
else:
Y = D_list[n].push(X).detach()
Y.requires_grad_(True)
Y_push_pca = pca.transform(D_conj_list[n].push(Y).cpu().detach().numpy())
axes[2, n].set_title(f'Inverse pushed {n}', fontsize=12)
axes[2, n].scatter(Y_push_pca[:, 0], Y_push_pca[:, 1], edgecolors='black', color='lightblue')
if plot_G:
Y_push_sum_pca = pca.transform(Y_push_sum.cpu().detach().numpy())
axes[2, -2].scatter(Y_push_sum_pca[:, 0], Y_push_sum_pca[:, 1], edgecolors='black', color='red')
axes[2, -2].set_title(f'Generator Target', fontsize=12)
if plot_B:
if benchmark.bar_sampler is not None:
axes[1, -1].set_title(f'True Barycenter', fontsize=12)
axes[2, -1].set_title(f'True Barycenter', fontsize=12)
Y = benchmark.bar_sampler.sample(plot_batchsize).cpu().detach().numpy()
else:
axes[1, -1].set_title(f'Gaussian Barycenter', fontsize=12)
axes[2, -1].set_title(f'Gaussian Barycenter', fontsize=12)
Y = benchmark.gauss_bar_sampler.sample(plot_batchsize).cpu().detach().numpy()
Y_pca = pca.transform(Y)
axes[1, -1].scatter(Y_pca[:, 0], Y_pca[:, 1], edgecolors='black', color='green')
axes[2, -1].scatter(Y_pca[:, 0], Y_pca[:, 1], edgecolors='black', color='green')
gc.collect()
torch.cuda.empty_cache()
return fig, axes
def plot_colored_cloud(cloud, ax):
ax._axis3don = False
colors = (cloud - cloud.min(axis=0)) / (cloud.max(axis=0) - cloud.min(axis=0))
ax.scatter(cloud[:, 0], cloud[:, 1], cloud[:, 2], c=colors)
ax.set_xlabel('Red'); ax.set_ylabel('Green'); ax.set_zlabel('Blue');
def push_img(im, D):
X = (np.asarray(im).transpose(2, 0, 1).reshape(3, -1) / 255.).T
X_pushed = np.zeros_like(X)
pos = 0; batch = 4999
while pos < len(X):
X_pushed[pos:pos+batch] = D.push(
torch.tensor(X[pos:pos+batch], device='cuda', requires_grad=True).float()
).detach().cpu().numpy()
pos += batch
im_pushed = (
np.clip(
(X_pushed.T.reshape(
np.asarray(im).transpose(2, 0, 1).shape
)).transpose(1, 2, 0), 0, 1) * 255
).astype(int)
return im_pushed
def plot_training_phase_palettes(
benchmark, D_list, D_conj_list,
plot_batchsize=250, partsize=(5, 5), dpi=200,
elev=0., azim=40
):
fig, axes = plt.subplots(
3, benchmark.num,
figsize=(partsize[0] * (benchmark.num), 3 * partsize[1]),
sharex=True, sharey=True, dpi=dpi,
subplot_kw=dict(projection='3d')
)
for n in range(benchmark.num):
X = benchmark.samplers[n].sample(plot_batchsize)
X_np = np.clip(X.cpu().detach().numpy(), 0, 1)
X_push_np = np.clip(D_list[n].push(X).cpu().detach().numpy(), 0, 1)
plot_rgb_cloud(X_np, axes[0, n])
axes[0, n].set_title(f'Initial distribution {n}', fontsize=12)
plot_rgb_cloud(X_push_np, axes[1, n])
axes[1, n].set_title(f'Pushed distribution {n}', fontsize=12)
Y = D_list[n].push(X).detach()
Y.requires_grad_(True)
Y_push_np = np.clip(D_conj_list[n].push(Y).cpu().detach().numpy(), 0, 1)
axes[2, n].set_title(f'Inverse pushed {n}', fontsize=12)
plot_rgb_cloud(Y_push_np, axes[2, n])
gc.collect()
torch.cuda.empty_cache()
return fig, axes
def plot_training_phase_im(
imgs, D_list, D_conj_list,
plot_batchsize=250, partsize=(5, 5), dpi=200,
elev=0., azim=40
):
fig, axes = plt.subplots(
3, len(imgs),
figsize=(partsize[0] * (len(imgs)), 3 * partsize[1]),
dpi=dpi
)
for n in range(len(imgs)):
X = imgs[n]
axes[0, n].imshow(X)
axes[0, n].set_title(f'Initial distribution {n}', fontsize=12)
X_push = push_img(X, D_list[n])
axes[1, n].imshow(X_push)
axes[1, n].set_title(f'Pushed distribution {n}', fontsize=12)
X_push_inv = push_img(X_push, D_conj_list[n])
axes[2, n].imshow(X_push_inv)
axes[2, n].set_title(f'Inverse pushed {n}', fontsize=12)
gc.collect()
torch.cuda.empty_cache()
return fig, axes | 6,593 | 37.115607 | 112 | py |
Wasserstein2Barycenters | Wasserstein2Barycenters-main/src/tools.py | import os, sys
import torchvision.datasets as datasets
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy.linalg import sqrtm
import os, sys
import argparse
import collections
from scipy.io import savemat
from tqdm import trange
from torchvision.utils import save_image
from torch.utils.data import DataLoader
import multiprocessing
import itertools
import torch
from PIL import Image
sys.path.append("..")
import gc
def ewma(x, span=200):
return pd.DataFrame({'x': x}).ewm(span=span).mean().values[:, 0]
def fig2data ( fig ):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = np.fromstring ( fig.canvas.tostring_argb(), dtype=np.uint8 )
buf.shape = ( w, h,4 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll ( buf, 3, axis = 2 )
return buf
def fig2img ( fig ):
buf = fig2data ( fig )
w, h, d = buf.shape
return Image.frombytes( "RGBA", ( w ,h ), buf.tostring( ) )
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def score_forward_maps(benchmark, D_list, score_size=1024):
assert (benchmark.bar_maps is not None) and (benchmark.bar_sampler is not None)
L2_UVP = []
Y = benchmark.bar_sampler.sample(score_size).detach()
for n in range(benchmark.num):
X = benchmark.samplers[n].sample(score_size)
X_push = D_list[n].push(X).detach()
with torch.no_grad():
X_push_true = benchmark.bar_maps[n](X)
L2_UVP.append(
100 * (((X_push - X_push_true) ** 2).sum(dim=1).mean() / benchmark.bar_sampler.var).item()
)
return L2_UVP
def score_pushforwards(benchmark, D_list, score_size=128*1024, batch_size=1024):
assert (benchmark.bar_sampler is not None)
BW2_UVP = []
if score_size < batch_size:
batch_size = score_size
num_chunks = score_size // batch_size
for n in range(benchmark.num):
X_push = np.vstack([
D_list[n].push(benchmark.samplers[n].sample(batch_size)).cpu().detach().numpy()
for _ in range(num_chunks)
])
X_push_cov = np.cov(X_push.T)
X_push_mean = np.mean(X_push, axis=0)
UVP = 100 * calculate_frechet_distance(
X_push_mean, X_push_cov,
benchmark.bar_sampler.mean, benchmark.bar_sampler.cov,
) / benchmark.bar_sampler.var
BW2_UVP.append(UVP)
return BW2_UVP
def score_cycle_consistency(benchmark, D_list, D_conj_list, score_size=1024):
cycle_UVP = []
for n in range(benchmark.num):
X = benchmark.samplers[n].sample(score_size)
X_push = D_list[n].push(X).detach()
X_push.requires_grad_(True)
X_push_inv = D_conj_list[n].push(X_push).detach()
with torch.no_grad():
cycle_UVP.append(
100 * (((X - X_push_inv) ** 2).sum(dim=1).mean() / benchmark.samplers[n].var).item()
)
return cycle_UVP
def score_congruence(benchmark, D_conj_list, score_size=1024):
assert benchmark.bar_sampler is not None
Y = benchmark.bar_sampler.sample(score_size)
Y_sum = torch.zeros_like(Y).detach()
for n in range(benchmark.num):
Y_push = D_conj_list[n].push(Y).detach()
with torch.no_grad():
Y_sum += benchmark.alphas[n] * Y_push
return 100 * (((Y - Y_sum) ** 2).sum(dim=1).mean() / benchmark.bar_sampler.var).item() | 5,715 | 34.725 | 106 | py |
Wasserstein2Barycenters | Wasserstein2Barycenters-main/src/layers.py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
class ConvexQuadratic(nn.Module):
'''Convex Quadratic Layer'''
__constants__ = ['in_features', 'out_features', 'quadratic_decomposed', 'weight', 'bias']
def __init__(self, in_features, out_features, bias=True, rank=1):
super(ConvexQuadratic, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.rank = rank
self.quadratic_decomposed = nn.Parameter(torch.Tensor(
torch.randn(in_features, rank, out_features)
))
self.weight = nn.Parameter(torch.Tensor(
torch.randn(out_features, in_features)
))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
def forward(self, input):
quad = ((input.matmul(self.quadratic_decomposed.transpose(1,0)).transpose(1, 0)) ** 2).sum(dim=1)
linear = F.linear(input, self.weight, self.bias)
return quad + linear
class View(nn.Module):
def __init__(self, *shape):
super(View, self).__init__()
self.shape = shape
def forward(self, input):
return input.view(-1, *self.shape)
class Conv2dConvexQuadratic(nn.Module):
'''Convolutional Input-Convex Quadratic Layer'''
def __init__(
self, in_channels, out_channels, kernel_size, rank,
stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'
):
super(Conv2dConvexQuadratic, self).__init__()
assert rank > 0
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.rank = rank
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.padding_mode = padding_mode
self.quadratic_decomposed = nn.Conv2d(
in_channels, out_channels * rank, kernel_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
bias=False,
padding_mode=self.padding_mode
)
self.linear = nn.Conv2d(
in_channels, out_channels, kernel_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
bias=self.bias,
padding_mode=self.padding_mode
)
def forward(self, input):
output = (self.quadratic_decomposed(input) ** 2)
n, c, h, w = output.size()
output = output.reshape(n, c // self.rank, self.rank, h, w).sum(2)
output += self.linear(input)
return output | 2,893 | 33.047059 | 105 | py |
Wasserstein2Barycenters | Wasserstein2Barycenters-main/src/icnn.py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
from .layers import ConvexQuadratic, Conv2dConvexQuadratic
class DenseICNN(nn.Module):
'''Fully Conncted ICNN with input-quadratic skip connections'''
def __init__(
self, in_dim,
hidden_layer_sizes=[32, 32, 32],
rank=1, activation='celu', dropout=0.03,
strong_convexity=1e-6
):
super(DenseICNN, self).__init__()
self.strong_convexity = strong_convexity
self.hidden_layer_sizes = hidden_layer_sizes
self.droput = dropout
self.activation = activation
self.rank = rank
self.quadratic_layers = nn.ModuleList([
nn.Sequential(
ConvexQuadratic(in_dim, out_features, rank=rank, bias=True),
nn.Dropout(dropout)
)
for out_features in hidden_layer_sizes
])
sizes = zip(hidden_layer_sizes[:-1], hidden_layer_sizes[1:])
self.convex_layers = nn.ModuleList([
nn.Sequential(
nn.Linear(in_features, out_features, bias=False),
nn.Dropout(dropout)
)
for (in_features, out_features) in sizes
])
self.final_layer = nn.Linear(hidden_layer_sizes[-1], 1, bias=False)
def forward(self, input):
output = self.quadratic_layers[0](input)
for quadratic_layer, convex_layer in zip(self.quadratic_layers[1:], self.convex_layers):
output = convex_layer(output) + quadratic_layer(input)
if self.activation == 'celu':
output = torch.celu(output)
elif self.activation == 'softplus':
output = F.softplus(output)
elif self.activation == 'relu':
output = F.relu(output)
else:
raise Exception('Activation is not specified or unknown.')
return self.final_layer(output) + .5 * self.strong_convexity * (input ** 2).sum(dim=1).reshape(-1, 1)
def push(self, input):
output = autograd.grad(
outputs=self.forward(input), inputs=input,
create_graph=True, retain_graph=True,
only_inputs=True,
grad_outputs=torch.ones((input.size()[0], 1)).cuda().float()
)[0]
return output
def convexify(self):
for layer in self.convex_layers:
for sublayer in layer:
if (isinstance(sublayer, nn.Linear)):
sublayer.weight.data.clamp_(0)
self.final_layer.weight.data.clamp_(0)
class View(nn.Module):
def __init__(self, *shape):
super(View, self).__init__()
self.shape = shape
def forward(self, input):
return input.view(-1, *self.shape)
class ConvICNN128(nn.Module):
def __init__(self, channels=3):
super(ConvICNN128, self).__init__()
self.first_linear = nn.Sequential(
nn.Conv2d(channels, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=True),
)
self.first_squared = nn.Sequential(
nn.Conv2d(channels, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
)
self.convex = nn.Sequential(
nn.CELU(),
nn.Conv2d(128, 128, kernel_size=3,stride=2, bias=True, padding=1),
nn.CELU(),
nn.Conv2d(128, 128, kernel_size=3,stride=2, bias=True, padding=1),
nn.CELU(),
nn.Conv2d(128, 128, kernel_size=3,stride=2, bias=True, padding=1),
nn.CELU(),
nn.Conv2d(128, 128, kernel_size=3,stride=2, bias=True, padding=1),
nn.CELU(),
nn.Conv2d(128, 128, kernel_size=3,stride=2, bias=True, padding=1),
nn.CELU(),
View(32* 8 * 8),
nn.CELU(),
nn.Linear(32 * 8 * 8, 128),
nn.CELU(),
nn.Linear(128, 64),
nn.CELU(),
nn.Linear(64, 32),
nn.CELU(),
nn.Linear(32, 1),
View()
).cuda()
def forward(self, input):
output = self.first_linear(input) + self.first_squared(input) ** 2
output = self.convex(output)
return output
def push(self, input):
return autograd.grad(
outputs=self.forward(input), inputs=input,
create_graph=True, retain_graph=True,
only_inputs=True, grad_outputs=torch.ones(input.size()[0]).cuda().float()
)[0]
def convexify(self):
for layer in self.convex:
if (isinstance(layer, nn.Linear)) or (isinstance(layer, nn.Conv2d)):
layer.weight.data.clamp_(0)
class ConvICNN16(nn.Module):
'''
ConvICNN for 1 x 16 x 16 images.
Several convolutional layers with input-quadratic convolutional skip connections are
followed by positive fully-connected layers.
'''
def __init__(self, strong_convexity=0.01, dropout=0.01, rank=1, unflatten=True):
super(ConvICNN16, self).__init__()
self.strong_convexity = strong_convexity
self.dropout = dropout
self.rank = rank
self.unflatten = unflatten
self.convex_layers = nn.ModuleList([
nn.Conv2d(512, 512, 3, padding=1, stride=2), # bs x 256 x 8 x 8
nn.Conv2d(512, 512, 3, padding=1, stride=2), # bs x 256 x 8 x 8
])
self.quadratic_layers = nn.ModuleList([
Conv2dConvexQuadratic(1, 512, 5, rank=self.rank, padding=2, stride=1, bias=False), # bs x 128 x 16 x16
Conv2dConvexQuadratic(1, 512, 7, rank=self.rank, padding=3, stride=2, bias=False), # bs x 128 x 8 x 8
Conv2dConvexQuadratic(1, 512, 9, rank=self.rank, padding=4, stride=4, bias=False), # bs x 128 x 8 x 8
])
self.pos_features = nn.Sequential(
nn.Dropout2d(self.dropout),
nn.Conv2d(512, 256, 4, padding=1, stride=2),
nn.CELU(0.2, True),
nn.Dropout2d(self.dropout),
nn.Conv2d(256, 1, 2, padding=0, stride=1), # bs x 1 x 1 x 1
View(1),
)
# img = torch.randn(5, 1, 16, 16)
# print(self(img).shape)
# print('Input Quadratic Convolutions Output shapes')
# for layer in self.quadratic_layers:
# print(layer(img).shape)
# print('Sequential Convolutions Output shapes\nEmpty')
# img = self.quadratic_layers[0](img)
# for layer in self.convex_layers:
# img = layer(img)
# print(img.shape)
# print('Final Shape')
# print(self.pos_features(img).shape)
def forward(self, input):
if self.unflatten:
input = input.reshape(-1, 1, 16, 16)
output = self.quadratic_layers[0](input)
for quadratic_layer, convex_layer in zip(self.quadratic_layers[1:], self.convex_layers):
output = convex_layer(output) + quadratic_layer(input)
output = torch.celu(output)
if self.training:
output = F.dropout2d(output, p=self.dropout)
output = self.pos_features(output)
return output + .5 * self.strong_convexity * (input ** 2).flatten(start_dim=1).sum(dim=1).reshape(-1, 1)
def push(self, input):
output = autograd.grad(
outputs=self.forward(input), inputs=input,
create_graph=True, retain_graph=True,
only_inputs=True,
grad_outputs=torch.ones((input.size()[0], 1)).cuda().float()
)[0]
return output
def convexify(self):
for layer in self.convex_layers:
if (isinstance(layer, nn.Linear)) or (isinstance(layer, nn.Conv2d)):
layer.weight.data.clamp_(0)
for layer in self.pos_features:
if (isinstance(layer, nn.Linear)) or (isinstance(layer, nn.Conv2d)):
layer.weight.data.clamp_(0) | 8,460 | 37.990783 | 115 | py |
SSC | SSC-master/main.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
python script to train the SSC model
---
Jie Li
jieli_cn@163.com
Nanjing University of Science and Technology
Aug 25, 2019
"""
from utils.seed import seed_torch
import os
import torch
import argparse
import numpy as np
from tqdm import tqdm
from torch.autograd import Variable
import datetime
from dataloaders import make_data_loader
import sscMetrics
from models import make_model
import config
parser = argparse.ArgumentParser(description='PyTorch SSC Training')
parser.add_argument('--dataset', type=str, default='nyu', choices=['nyu', 'nyucad', 'debug'],
help='dataset name (default: nyu)')
parser.add_argument('--model', type=str, default='ddrnet', choices=['ddrnet', 'aicnet', 'grfnet', 'palnet'],
help='model name (default: palnet)')
# parser.add_argument('--data_augment', default=False, type=bool, help='data augment for training')
parser.add_argument('--epochs', default=50, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--lr', default=0.01, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--lr_adj_n', default=100, type=int, metavar='LR', help='every n epochs adjust learning rate once')
parser.add_argument('--lr_adj_rate', default=0.1, type=float, metavar='LR', help='scale while adjusting learning rate')
parser.add_argument('--batch_size', default=4, type=int, metavar='N', help='mini-batch size (default: 4)')
parser.add_argument('--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--resume', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--checkpoint', default='./', metavar='DIR', help='path to checkpoint')
# parser.add_argument('--logdir', default='./logs_debug', metavar='DIR', help='path to logs')
parser.add_argument('--model_name', default='SSC_debug', type=str, help='name of model to save check points')
# parser.add_argument('--w', default=0.05, type=float, help='weight')
global args
args = parser.parse_args()
seed_torch(2019)
def main():
# ---- Check CUDA
if torch.cuda.is_available():
print("Great, You have {} CUDA device!".format(torch.cuda.device_count()))
else:
print("Sorry, You DO NOT have a CUDA device!")
train_time_start = datetime.datetime.now()
train()
print('Training finished in: {}'.format(datetime.datetime.now() - train_time_start))
def train():
# ---- Data loader
train_loader, val_loader = make_data_loader(args)
# ---- create model ---------- ---------- ---------- ---------- ----------#
net = make_model(args.model, num_classes=12).cuda()
#net = torch.nn.DataParallel(net) # Multi-GPU
# ---- optionally resume from a checkpoint --------- ---------- ----------#
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
cp_states = torch.load(args.resume)
net.load_state_dict(cp_states['state_dict'], strict=True)
else:
raise Exception("=> NO checkpoint found at '{}'".format(args.resume))
# -------- ---------- --- Set checkpoint --------- ---------- ----------#
# timestamp = datetime.datetime.now().strftime("%Y%m%d-%H.%M.%S")
# model_info = 'epoch{}_lr{}'.format(args.epochs, args.lr)
cp_filename = args.checkpoint + 'cp_{}.pth.tar'.format(args.model_name)
cp_best_filename = args.checkpoint + 'cpBest_{}.pth.tar'.format(args.model_name)
# ---- Define loss function (criterion) and optimizer ---------- ----------#
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, weight_decay=0.0005, momentum=0.9)
loss_func = torch.nn.CrossEntropyLoss(weight=config.class_weights, ignore_index=255).cuda()
# ---- Print Settings for training -------- ---------- ---------- ----------#
print('Training epochs:{} \nInitial Learning rate:{} \nBatch size:{} \nNumber of workers:{}'.format(
args.epochs,
args.lr,
args.batch_size,
args.workers,
cp_filename))
print("Checkpoint filename:{}".format(cp_filename))
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_adj_n, gamma=args.lr_adj_rate, last_epoch=-1)
np.set_printoptions(precision=1)
# ---- Train
step_count_all = 0
best_miou = 0
print("Start training")
for epoch in range(0, args.epochs):
# print("epoch {}".format(epoch))
net.train() # switch to train mode
# adjust_learning_rate(optimizer, args.lr, epoch, n=args.lr_adj_n, rate=args.lr_adj_rate) # n=10, rate=0.9
decs_str = 'Training epoch {}/{}'.format(epoch + 1, args.epochs)
log_loss_1epoch = 0.0
step_count = 0
torch.cuda.empty_cache()
for step, (rgb, depth, tsdf, target, position, _) in tqdm(enumerate(train_loader), desc=decs_str, unit='step'):
# target should be a LongTensor. (bs, 60L, 36L, 60L)
y_true = target.long().contiguous()
y_true = Variable(y_true.view(-1)).cuda() # bs * D * H * W
# ---- (bs, C, D, H, W), channel first for Conv3d in pyTorch
# FloatTensor to Variable. (bs, channels, 240L, 144L, 240L)
x_depth = Variable(depth.float()).cuda()
position = position.long().cuda()
if args.model == 'palnet':
x_tsdf = Variable(tsdf.float()).cuda()
y_pred = net(x_depth=x_depth, x_tsdf=x_tsdf, p=position)
else:
x_rgb = Variable(rgb.float()).cuda()
y_pred = net(x_depth=x_depth, x_rgb=x_rgb, p=position)
y_pred = y_pred.permute(0, 2, 3, 4, 1).contiguous() # (BS, C, D, H, W) --> (BS, D, H, W, C)
y_pred = y_pred.view(-1, 12) # C = 12
optimizer.zero_grad()
loss = loss_func(y_pred, y_true)
loss.backward()
optimizer.step()
scheduler.step()
# ---- Evaluate on validation set
v_prec, v_recall, v_iou, v_acc, v_ssc_iou, v_mean_iou = validate_on_dataset_stsdf(net, val_loader)
print('Validate with TSDF:epoch {}, p {:.1f}, r {:.1f}, IoU {:.1f}'.format(epoch + 1, v_prec*100.0, v_recall*100.0, v_iou*100.0))
print('pixel-acc {:.4f}, mean IoU {:.1f}, SSC IoU:{}'.format(v_acc*100.0, v_mean_iou*100.0, v_ssc_iou*100.0))
# ---- Save Checkpoint
is_best = v_mean_iou > best_miou
best_miou = max(v_mean_iou, best_miou)
state = {'state_dict': net.state_dict()}
torch.save(state, cp_filename)
if is_best:
print('Yeah! Got better mIoU {}% in epoch {}. State saved'.format(100.0*v_mean_iou, epoch + 1))
torch.save(state, cp_best_filename) # Save Checkpoint
# --------------------------------------------------------------------------------------------------------------
def validate_on_dataset_stsdf(model, date_loader, save_ply=False):
"""
Evaluate on validation set.
model: network with parameters loaded
date_loader: TEST mode
"""
model.eval() # switch to evaluate mode.
val_acc, val_p, val_r, val_iou = 0.0, 0.0, 0.0, 0.0
_C = 12
val_cnt_class = np.zeros(_C, dtype=np.int32) # count for each class
val_iou_ssc = np.zeros(_C, dtype=np.float32) # sum of iou for each class
count = 0
with torch.no_grad():
# ---- STSDF depth, input, target, position, _
for step, (rgb, depth, volume, y_true, nonempty, position, filename) in tqdm(enumerate(date_loader), desc='Validating', unit='frame'):
var_x_depth = Variable(depth.float()).cuda()
position = position.long().cuda()
if args.model == 'palnet':
var_x_volume = Variable(volume.float()).cuda()
y_pred = model(x_depth=var_x_depth, x_tsdf=var_x_volume, p=position)
else:
var_x_rgb = Variable(rgb.float()).cuda()
y_pred = model(x_depth=var_x_depth, x_rgb=var_x_rgb, p=position) # y_pred.size(): (bs, C, W, H, D)
y_pred = y_pred.cpu().data.numpy() # CUDA to CPU, Variable to numpy
y_true = y_true.numpy() # torch tensor to numpy
nonempty = nonempty.numpy()
p, r, iou, acc, iou_sum, cnt_class = validate_on_batch(y_pred, y_true, nonempty)
count += 1
val_acc += acc
val_p += p
val_r += r
val_iou += iou
val_iou_ssc = np.add(val_iou_ssc, iou_sum)
val_cnt_class = np.add(val_cnt_class, cnt_class)
# print('acc_w, acc, p, r, iou', acc_w, acc, p, r, iou)
val_acc = val_acc / count
val_p = val_p / count
val_r = val_r / count
val_iou = val_iou / count
val_iou_ssc, val_iou_ssc_mean = sscMetrics.get_iou(val_iou_ssc, val_cnt_class)
return val_p, val_r, val_iou, val_acc, val_iou_ssc, val_iou_ssc_mean
def validate_on_batch(predict, target, nonempty=None): # CPU
"""
predict: (bs, channels, D, H, W)
target: (bs, channels, D, H, W)
"""
# TODO: validation will increase the usage of GPU memory!!!
y_pred = predict
y_true = target
p, r, iou = sscMetrics.get_score_completion(y_pred, y_true, nonempty)
#acc, iou_sum, cnt_class = sscMetrics.get_score_semantic_and_completion(y_pred, y_true, stsdf)
acc, iou_sum, cnt_class, tp_sum, fp_sum, fn_sum = sscMetrics.get_score_semantic_and_completion(y_pred, y_true, nonempty)
# iou = np.divide(iou_sum, cnt_class)
return p, r, iou, acc, iou_sum, cnt_class
# static method
def adjust_learning_rate(optimizer, lr, epoch, n=10, rate=0.9):
"""Sets the learning rate to the initial LR decayed by rate=0.9 every n=10 epochs"""
new_lr = lr * (rate ** (epoch // n))
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
if epoch % n == 0:
print('Current learning rate is: {}'.format(new_lr))
if __name__ == '__main__':
main()
| 10,137 | 39.552 | 142 | py |
SSC | SSC-master/test.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
python script to evaluate the SSC model
---
Jie Li
jieli_cn@163.com
Nanjing University of Science and Technology
Aug 25, 2019
"""
import os
import torch
import argparse
import datetime
from dataloaders import make_data_loader
from models import make_model
from main import validate_on_dataset_stsdf
import config
parser = argparse.ArgumentParser(description='PyTorch SSC Training')
parser.add_argument('--dataset', type=str, default='nyu', choices=['nyu', 'nyucad', 'debug'],
help='dataset name (default: nyu)')
parser.add_argument('--model', type=str, default='ddrnet', choices=['ddrnet', 'aicnet', 'grfnet', 'palnet', 'lwddrnet'],
help='model name (default: palnet)')
parser.add_argument('--batch_size', default=4, type=int, metavar='N', help='mini-batch size (default: 4)')
parser.add_argument('--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--resume', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
global args
args = parser.parse_args()
def main():
# ---- Check CUDA
if torch.cuda.is_available():
print("Great, You have {} CUDA device!".format(torch.cuda.device_count()))
else:
print("Sorry, You DO NOT have a CUDA device!")
train_time_start = datetime.datetime.now()
test()
print('Training finished in: {}'.format(datetime.datetime.now() - train_time_start))
def test():
# ---- create model ---------- ---------- ---------- ---------- ----------#
net = make_model(args.model, num_classes=12).cuda()
#net = torch.nn.DataParallel(net) # Multi-GPU
# ---- load pretrained model --------- ---------- ----------#
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
cp_states = torch.load(args.resume)
net.load_state_dict(cp_states['state_dict'], strict=True)
else:
raise Exception("=> NO checkpoint found at '{}'".format(args.resume))
# ---- Data loader
train_loader, val_loader = make_data_loader(args)
torch.cuda.empty_cache()
# ---- Evaluation
v_prec, v_recall, v_iou, v_acc, v_ssc_iou, v_mean_iou = validate_on_dataset_stsdf(net, val_loader)
print('Validate with TSDF:, p {:.1f}, r {:.1f}, IoU {:.1f}'.format(v_prec*100.0, v_recall*100.0, v_iou*100.0))
print('pixel-acc {:.4f}, mean IoU {:.1f}, SSC IoU:{}'.format(v_acc*100.0, v_mean_iou*100.0, v_ssc_iou*100.0))
if __name__ == '__main__':
main() | 2,574 | 33.333333 | 120 | py |
SSC | SSC-master/config.py | import numpy as np
import torch
class Path(object):
@staticmethod
def db_root_dir(dataset):
if dataset == 'nyu':
# folder that contains dataset/.
return {'train': '/home/mcheem/data/datasets/NYU_SSC/NYUtrain_npz',
'val': '/home/mcheem/data/datasets/NYU_SSC/NYUtest_npz'}
elif dataset == 'nyucad':
return {'train': '/home/jsg/jie/Data_zoo/NYU_SSC/NYUCADtrain_npz',
'val': '/home/jsg/jie/Data_zoo/NYU_SSC/NYUCADtest_npz'}
# debug
elif dataset == 'debug':
return {'train': '/home/jsg/jie/Data_zoo/NYU_SSC/NYUCADval40_npz',
'val': '/home/jsg/jie/Data_zoo/NYU_SSC/NYUCADval40_npz'}
else:
print('Dataset {} not available.'.format(dataset))
raise NotImplementedError
# ssc: color map
colorMap = np.array([[22, 191, 206], # 0 empty, free space
[214, 38, 40], # 1 ceiling
[43, 160, 4], # 2 floor
[158, 216, 229], # 3 wall
[114, 158, 206], # 4 window
[204, 204, 91], # 5 chair new: 180, 220, 90
[255, 186, 119], # 6 bed
[147, 102, 188], # 7 sofa
[30, 119, 181], # 8 table
[188, 188, 33], # 9 tvs
[255, 127, 12], # 10 furn
[196, 175, 214], # 11 objects
[153, 153, 153], # 12 Accessible area, or label==255, ignore
]).astype(np.int32)
# ###########################################################################################
class_weights = torch.FloatTensor([0.05, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
| 1,795 | 36.416667 | 93 | py |
SSC | SSC-master/infer_ros.py | #!/usr/bin/env python3
from utils.seed import seed_torch
import os
# Network dependencies
import torch
import argparse
import numpy as np
from torch.autograd import Variable
# ROS dependencies
import rospy
from sensor_msgs.msg import Image
import tf.transformations as tr
import tf
from cv_bridge import CvBridge
# local imports
from models import make_model
from utils import utils
from ssc_msgs.msg import SSCGrid
class ROSInfer:
def __init__(self):
self._load_arguments()
self.net = make_model(self.args.model, num_classes=12)
self.depth_cam_frame = self.args.depth_cam_frame
self.world_frame = self.args.world_frame
self.listener = tf.TransformListener()
self.ssc_pub = rospy.Publisher('ssc', SSCGrid, queue_size=10)
self.bridge = CvBridge()
def start(self):
"""
Loads SSC Network model and start listening to depth images.
"""
# load pretrained model
self.load_network()
self.depth_img_subscriber = rospy.Subscriber(
self.depth_cam_frame, Image, self.callback)
def callback(self, depth_image):
"""
Receive a Depth image from the simulation, voxelize the depthmap as TSDF, 2D to 3D mapping
and perform inference using 3D CNN. Publish the results as SSCGrid Message.
"""
# get depth camera pose wrt odom
try:
position, orientation = self.listener.lookupTransform(
self.world_frame, self.depth_cam_frame, depth_image.header.stamp)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
return
if torch.cuda.is_available():
torch.cuda.empty_cache()
# parse depth image
cv_image = self.bridge.imgmsg_to_cv2(
depth_image, desired_encoding='passthrough')
# prepare pose matrix
pose_matrix = tr.quaternion_matrix(orientation)
pose_matrix[0:3, -1] = position
vox_origin, rgb, depth, tsdf, position, occupancy_grid = self._load_data_from_depth_image(
cv_image, pose_matrix)
x_depth = Variable(depth.float()).to(self.device)
position = position.long().to(self.device)
if self.args.model == 'palnet':
x_tsdf = Variable(tsdf.float()).to(self.device)
y_pred = self.net(x_depth=x_depth, x_tsdf=x_tsdf, p=position)
else:
x_rgb = Variable(rgb.float())
y_pred = self.net(x_depth=x_depth, x_rgb=x_rgb, p=position)
scores = torch.nn.Softmax(dim=0)(y_pred.squeeze())
preds = torch.argmax(scores, dim=0).cpu().numpy()
#setup message
msg = SSCGrid()
msg.data = preds.reshape(-1).astype(np.float32).tolist()
msg.origin_x = vox_origin[0]
msg.origin_y = vox_origin[1]
msg.origin_z = vox_origin[2]
msg.frame = 'odom'
msg.width = preds.shape[0]
msg.height = preds.shape[1]
msg.depth = preds.shape[2]
# publish message
self.ssc_pub.publish(msg)
def _load_data_from_depth_image(self, depth, cam_pose, max_depth=8, cam_k=[[320, 0, 320], [0, 320, 240], [0, 0, 1]]):
"""
Takes a depth map, pose as input and outputs the 3D voxeloccupancy, 2D to 3D mapping and TSDF grid.
"""
rgb = None
depth_npy = np.array(depth)
# discard inf points
depth_npy[depth_npy > max_depth] = depth_npy.min()
# get voxel grid origin
vox_origin = utils.get_origin_from_depth_image(
depth_npy, cam_k, cam_pose)
# compute tsdf for the voxel grid from depth camera
vox_tsdf, depth_mapping_idxs, voxel_occupancy = utils.compute_tsdf(
depth_npy, vox_origin, cam_k, cam_pose)
return vox_origin, rgb, torch.as_tensor(depth_npy).unsqueeze(0).unsqueeze(0), torch.as_tensor(vox_tsdf).unsqueeze(0), torch.as_tensor(depth_mapping_idxs).unsqueeze(0).unsqueeze(0), torch.as_tensor(voxel_occupancy.transpose(2, 1, 0)).unsqueeze(0)
def load_network(self):
"""
Loads a pretrained model for inference
"""
if os.path.isfile(self.args.resume):
print("=> loading checkpoint '{}'".format(self.args.resume))
cp_states = torch.load(self.args.resume, map_location=torch.device('cpu'))
self.net.load_state_dict(cp_states['state_dict'], strict=True)
else:
raise Exception("=> NO checkpoint found at '{}'".format(self.args.resume))
if torch.cuda.is_available():
print("CUDA device found!".format(torch.cuda.device_count()))
self.device = torch.device('cuda')
else:
print("Using CPU!")
self.device = torch.device('cpu')
self.net = self.net.to(self.device)
# switch to test mode
self.net.eval()
def _load_arguments(self):
parser = argparse.ArgumentParser(description='PyTorch SSC Inference')
parser.add_argument('--depth_cam_frame', type=str, default='/airsim_drone/Depth_cam',
help='depth cam frame name (default: /airsim_drone/Depth_cam)')
parser.add_argument('--world_frame', type=str, default='/odom',
help='world frame name (default: /odom)')
parser.add_argument('--model', type=str, default='palnet', choices=['ddrnet', 'palnet'],
help='model name (default: palnet)')
parser.add_argument('--resume', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
args = parser.parse_args()
# use argparse arguments as default and override with ros params
args.world_frame = rospy.get_param('~world_frame', args.world_frame)
args.depth_cam_frame = rospy.get_param('~depth_cam_frame', args.depth_cam_frame)
args.model = rospy.get_param('~model', args.model)
args.resume = rospy.get_param('~resume', args.resume)
self.args = args
if __name__ == '__main__':
rospy.init_node("scene_completion")
ri = ROSInfer()
ri.start()
rospy.spin()
| 6,176 | 35.550296 | 253 | py |
SSC | SSC-master/infer.py | from utils.seed import seed_torch
import os
import torch
import argparse
import numpy as np
from pathlib import Path
import imageio
import glob
from tqdm import tqdm
from torch.autograd import Variable
import datetime
from models import make_model
import config
import VoxelUtils as vu
from utils import utils
parser = argparse.ArgumentParser(description='PyTorch SSC Inference')
parser.add_argument('--dataset', type=str, default='nyu', choices=['nyu', 'nyucad', 'debug'],
help='dataset name (default: nyu)')
parser.add_argument('--model', type=str, default='palnet', choices=['ddrnet', 'aicnet', 'grfnet', 'palnet'],
help='model name (default: palnet)')
parser.add_argument('--files', default="/home/mcheem/data/datasets/large_room/", help='Depth Images')
parser.add_argument('--resume', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--save_completions', type=str, metavar='PATH', default="outputs", help='path to save completions (default: none)')
parser.add_argument('--model_name', default='SSC_debug', type=str, help='name of model to save check points')
global args
args = parser.parse_args()
def load_data_from_depth_image(filename, max_depth=8, cam_k=[[320, 0, 320], [0, 320, 240], [0, 0, 1]]):
"""
Read depth and pose froms ave npz file and return tsdf voxels.
"""
rgb = None
frame_data = np.load(filename[:-4] + ".npz")
depth_npy = frame_data["depth"]
cam_pose = frame_data["pose"]
depth_npy[depth_npy > max_depth] = depth_npy.min()
vox_origin = utils.get_origin_from_depth_image(depth_npy, cam_k, cam_pose)
vox_tsdf, depth_mapping_idxs, voxel_occupancy = utils.compute_tsdf(
depth_npy, vox_origin, cam_k, cam_pose)
return rgb, torch.as_tensor(depth_npy).unsqueeze(0).unsqueeze(0), torch.as_tensor(vox_tsdf).unsqueeze(0), torch.as_tensor(depth_mapping_idxs).unsqueeze(0).unsqueeze(0), torch.as_tensor(voxel_occupancy.transpose(2, 1, 0)).unsqueeze(0)
def infer():
"""
Performan Inference on saved depth data and save the results
to output directory specified in the arguments.
"""
NUM_CLASSES = 12
net = make_model(args.model, num_classes=NUM_CLASSES).cuda()
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
cp_states = torch.load(args.resume, map_location=torch.device('cpu'))
net.load_state_dict(cp_states['state_dict'], strict=True)
else:
raise Exception("=> NO checkpoint found at '{}'".format(args.resume))
# switch to eval mode
net.eval()
torch.cuda.empty_cache()
# retrive list of saved depth/pose array files
file_list = glob.glob(str(Path(args.files) / "*.npz"))
for step, depth_file in enumerate(file_list):
rgb, depth, tsdf, position, occupancy_grid = load_data_from_depth_image(depth_file)
x_depth = Variable(depth.float()).cuda()
position = position.long().cuda()
if args.model == 'palnet':
x_tsdf = Variable(tsdf.float()).cuda()
y_pred = net(x_depth=x_depth, x_tsdf=x_tsdf, p=position)
else:
x_rgb = Variable(rgb.float())
y_pred = net(x_depth=x_depth, x_rgb=x_rgb, p=position)
# calculate per voxel class
scores = torch.nn.Softmax(dim=0)(y_pred.squeeze())
scores[0] += 0.3 #Increase offset of empty class to weed out low prob predictions
preds = torch.argmax(scores, dim=0).cpu().numpy()
# save completions
if args.save_completions:
utils.labeled_voxel2ply(preds,"{}/{}_preds.ply".format(args.save_completions, Path(depth_file).stem))
occupancy_grid_downsampled = utils.downsample_voxel(occupancy_grid.squeeze().numpy())
utils.labeled_voxel2ply(occupancy_grid_downsampled,"{}/{}_scan.ply".format(args.save_completions, Path(depth_file).stem))
def main():
# ---- Check CUDA
if torch.cuda.is_available():
print("CUDA device found!".format(torch.cuda.device_count()))
else:
print("Using CPU!")
infer()
if __name__ == '__main__':
main()
| 4,225 | 37.072072 | 237 | py |
SSC | SSC-master/models/PALNet.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
PALNet
jieli_cn@163.com
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from .projection_layer import Project2Dto3D
# ----------------------------------------------------------------------
# takes the depth and fTSDF as inputs
class SSC_PALNet(nn.Module):
def __init__(self, num_classes=12):
super(SSC_PALNet, self).__init__()
print("SSC_PALNet")
# ---- depth
depth_out = 6
self.conv2d_depth = nn.Sequential(
nn.Conv2d(1, depth_out, 3, 1, 1),
nn.ReLU(inplace=True),
)
in_ch = depth_out // 2
self.res_depth = nn.Sequential(
nn.Conv2d(depth_out, in_ch, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, depth_out, 1, 1, 0),
)
self.project_layer = Project2Dto3D(240, 144, 240) # w=240, h=144, d=240
in_channel_3d = depth_out
stride = 2
self.pool1 = nn.Conv3d(in_channel_3d, 8, 7, stride, 3)
self.reduction2_1 = nn.Conv3d(8, 16, 1, 1, 0, bias=False)
self.conv2_1 = nn.Sequential(
nn.Conv3d(8, 8, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(8, 8, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(8, 16, 1, 1, 0)
)
# ---- flipped_tsdf
in_channel_3d = 1
stride = 2
self.pool2 = nn.Conv3d(in_channel_3d, 8, 7, stride, 3)
self.reduction2_2 = nn.Conv3d(8, 16, 1, 1, 0, bias=False)
self.conv2_2 = nn.Sequential(
nn.Conv3d(8, 8, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(8, 8, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(8, 16, 1, 1, 0)
)
stride = 2
self.reduction3_1 = nn.Conv3d(16, 32, 1, stride, 0, bias=False)
self.conv3_1 = nn.Sequential(
nn.Conv3d(16, 8, 1, stride, 0),
nn.ReLU(inplace=True),
nn.Conv3d(8, 8, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(8, 32, 1, 1, 0),
)
stride = 2
self.reduction3_2 = nn.Conv3d(16, 32, 1, stride, 0, bias=False)
self.conv3_2 = nn.Sequential(
nn.Conv3d(16, 8, 1, stride, 0),
nn.ReLU(inplace=True),
nn.Conv3d(8, 8, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(8, 32, 1, 1, 0),
)
# -------------1/4
self.conv3_3 = nn.Sequential(
nn.Conv3d(64, 32, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 64, 1, 1, 0),
)
self.conv3_5 = nn.Sequential(
nn.Conv3d(64, 32, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(32, 32, 3, 1, 2, 2),
nn.Conv3d(32, 32, 3, 1, 2, 2),
nn.ReLU(inplace=True),
nn.Conv3d(32, 64, 1, 1, 0),
)
self.conv3_7 = nn.Sequential(
nn.Conv3d(64, 32, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(32, 32, 3, 1, 2, 2),
nn.Conv3d(32, 32, 3, 1, 2, 2),
nn.ReLU(inplace=True),
nn.Conv3d(32, 64, 1, 1, 0),
)
self.conv4_1 = nn.Conv3d(256, 128, 1, 1, 0)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv3d(128, 128, 1, 1, 0)
self.relu4_2 = nn.ReLU(inplace=True)
self.fc12 = nn.Conv3d(128, num_classes, 1, 1, 0) # C_NUM = 12, number of classes is 12
self.softmax = nn.Softmax(dim=1) # pytorch 0.3.0
# self.logsoftmax = nn.LogSoftmax(dim=1)
# ---- weights init
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.xavier_uniform_(m.weight.data) # gain=1
# nn.init.constant(m.bias.data, 0)
nn.init.normal_(self.conv4_1.weight.data, mean=0, std=0.1)
nn.init.normal_(self.conv4_2.weight.data, mean=0, std=0.01)
nn.init.normal_(self.fc12.weight.data, mean=0, std=0.01)
def forward(self, x_depth, x_tsdf, p):
x0_depth = self.conv2d_depth(x_depth)
x0_depth = F.relu(self.res_depth(x0_depth) + x0_depth, inplace=True)
x0_depth = self.project_layer(x0_depth, p)
x1_depth = self.pool1(x0_depth)
x1_depth = F.relu(x1_depth, inplace=True)
x2_1_depth = self.reduction2_1(x1_depth) # (BS, 32L, 120L, 72L, 120L)
x2_2_depth = self.conv2_1(x1_depth)
x2_depth = x2_1_depth + x2_2_depth
x2_depth = F.relu(x2_depth, inplace=True)
x_tsdf_s = torch.unsqueeze(x_tsdf, 0).permute(1,0,2,3,4)
x1_tsdf = self.pool2(x_tsdf_s) # (BS, 16L, 120L, 72L, 120L)
x1_tsdf = F.relu(x1_tsdf, inplace=True)
x2_1_tsdf = self.reduction2_2(x1_tsdf) # (BS, 32L, 120L, 72L, 120L)
x2_2_tsdf = self.conv2_2(x1_tsdf)
x2_tsdf = x2_1_tsdf + x2_2_tsdf
x2_tsdf = F.relu(x2_tsdf, inplace=True)
x3_1_depth = self.reduction3_1(x2_depth) # (BS, 64L, 60L, 36L, 60L)
x3_2_depth = self.conv3_1(x2_depth)
x_3_depth = x3_1_depth + x3_2_depth
x_3_depth = F.relu(x_3_depth, inplace=True)
# print('SSC: x_3_depth', x_3_depth.size())
x3_1_tsdf = self.reduction3_2(x2_tsdf) # (BS, 64L, 60L, 36L, 60L)
x3_2_tsdf = self.conv3_2(x2_tsdf) #
x_3_tsdf = x3_1_tsdf + x3_2_tsdf
x_3_tsdf = F.relu(x_3_tsdf, inplace=True)
# print('SSC: x_3_tsdf', x_3_tsdf.size())
x_3 = torch.cat((x_3_depth, x_3_tsdf), dim=1)
# ---- 1/4
x_4 = self.conv3_3(x_3) + x_3
x_4 = F.relu(x_4, inplace=True)
# print 'SSC: x_4', x_4.size()
x_5 = self.conv3_5(x_4) + x_4
x_5 = F.relu(x_5, inplace=True)
# print 'SSC: x_5', x_5.size()
x_6 = self.conv3_7(x_5) + x_5
x_6 = F.relu(x_6, inplace=True)
# print 'SSC: x_6', x_6.size()
x_6 = torch.cat((x_3, x_4, x_5, x_6), dim=1) # channels concatenate
# x_6 = F.relu(x_6)
# print('SSC: channels concatenate x', x.size()) # (BS, 256L, 60L, 36L, 60L)
x_6 = self.conv4_1(x_6) # (BS, 128L, 60L, 36L, 60L)
x_6 = F.relu(x_6, inplace=True)
# x_6 = self.relu4_1(x_6)
x_6 = self.conv4_2(x_6) # (BS, 128L, 60L, 36L, 60L)
x_6 = F.relu(x_6, inplace=True)
# print 'SSC: x_6', x_6.size()
y = self.fc12(x_6) # (BS, 12L, 60L, 36L, 60L)
return y
| 6,635 | 32.346734 | 95 | py |
SSC | SSC-master/models/DDR.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
DDR
jieli_cn@163.com
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
# ----------------------------------------------------------------------
class BasicDDR2d(nn.Module):
def __init__(self, c, k=3, dilation=1, residual=True):
super(BasicDDR2d, self).__init__()
d = dilation
p = k // 2 * d
self.conv_1xk = nn.Conv2d(c, c, (1, k), stride=1, padding=(0, p), bias=True, dilation=(1, d))
self.conv_kx1 = nn.Conv2d(c, c, (k, 1), stride=1, padding=(p, 0), bias=True, dilation=(d, 1))
self.residual = residual
def forward(self, x):
y = self.conv_1xk(x)
y = F.relu(y, inplace=True)
y = self.conv_kx1(y)
y = F.relu(y + x, inplace=True) if self.residual else F.relu(y, inplace=True)
return y
# ----------------------------------------------------------------------
class BasicDDR3d(nn.Module):
def __init__(self, c, k=3, dilation=1, stride=1, residual=True):
super(BasicDDR3d, self).__init__()
d = dilation
p = k // 2 * d
# p = (d * (k - 1) + 1) // 2
s = stride
# print("k:{}, d:{}, p:{}".format(k, d, p))
self.conv_1x1xk = nn.Conv3d(c, c, (1, 1, k), stride=(1, 1, s), padding=(0, 0, p), bias=True, dilation=(1, 1, d))
self.conv_1xkx1 = nn.Conv3d(c, c, (1, k, 1), stride=(1, s, 1), padding=(0, p, 0), bias=True, dilation=(1, d, 1))
self.conv_kx1x1 = nn.Conv3d(c, c, (k, 1, 1), stride=(s, 1, 1), padding=(p, 0, 0), bias=True, dilation=(d, 1, 1))
self.residual = residual
def forward(self, x):
y = self.conv_1x1xk(x)
y = F.relu(y, inplace=True)
y = self.conv_1xkx1(y)
y = F.relu(y, inplace=True)
y = self.conv_kx1x1(y)
y = F.relu(y + x, inplace=True) if self.residual else F.relu(y, inplace=True)
return y
class BottleneckDDR2d(nn.Module):
def __init__(self, c_in, c, c_out, kernel=3, stride=1, dilation=1, residual=True):
super(BottleneckDDR2d, self).__init__()
s = stride
k = kernel
d = dilation
p = k // 2 * d
self.conv_in = nn.Conv2d(c_in, c, kernel_size=1, bias=False)
self.conv_1xk = nn.Conv2d(c, c, (1, k), stride=s, padding=(0, p), bias=True, dilation=(1, d))
self.conv_kx1 = nn.Conv2d(c, c, (k, 1), stride=s, padding=(p, 0), bias=True, dilation=(d, 1))
self.conv_out = nn.Conv2d(c, c_out, kernel_size=1, bias=False)
self.residual = residual
def forward(self, x):
y = self.conv_in(x)
y = F.relu(y, inplace=True)
y = self.conv_1xk(y)
y = F.relu(y, inplace=True)
y = self.conv_kx1(y)
y = F.relu(y, inplace=True)
y = self.conv_out(y)
y = F.relu(y + x, inplace=True) if self.residual else F.relu(y, inplace=True)
return y
class BottleneckDDR3d(nn.Module):
def __init__(self, c_in, c, c_out, kernel=3, stride=1, dilation=1, residual=True):
super(BottleneckDDR3d, self).__init__()
s = stride
k = kernel
d = dilation
p = k // 2 * d
self.conv_in = nn.Conv3d(c_in, c, kernel_size=1, bias=False)
self.conv1x1x3 = nn.Conv3d(c, c, (1, 1, k), stride=s, padding=(0, 0, p), bias=True, dilation=(1, 1, d))
self.conv1x3x1 = nn.Conv3d(c, c, (1, k, 1), stride=s, padding=(0, p, 0), bias=True, dilation=(1, d, 1))
self.conv3x1x1 = nn.Conv3d(c, c, (k, 1, 1), stride=s, padding=(p, 0, 0), bias=True, dilation=(d, 1, 1))
self.conv_out = nn.Conv3d(c, c_out, kernel_size=1, bias=False)
self.residual = residual
def forward(self, x):
y0 = self.conv_in(x)
y0 = F.relu(y0, inplace=True)
y1 = self.conv1x1x3(y0)
y1 = F.relu(y1, inplace=True)
y2 = self.conv1x3x1(y1) + y1
y2 = F.relu(y2, inplace=True)
y3 = self.conv3x1x1(y2) + y2 + y1
y3 = F.relu(y3, inplace=True)
y = self.conv_out(y3)
y = F.relu(y + x, inplace=True) if self.residual else F.relu(y, inplace=True)
return y
class DownsampleBlock3d(nn.Module):
def __init__(self, c_in, c_out, k=3, s=2, p=1):
super(DownsampleBlock3d, self).__init__()
self.conv = nn.Conv3d(c_in, c_out-c_in, kernel_size=k, stride=s, padding=p, bias=False)
self.pool = nn.MaxPool3d(2, stride=2)
# self.bn = nn.BatchNorm2d(c_out, eps=1e-3)
def forward(self, x):
y = torch.cat([self.conv(x), self.pool(x)], 1)
# y = self.bn(y)
y = F.relu(y, inplace=True)
return y
class DDR_ASPP3d(nn.Module):
def __init__(self, c_in, c, c_out, residual=False):
super(DDR_ASPP3d, self).__init__()
print('DDR_ASPP3d: c_in:{}, c:{}, c_out:{}'.format(c_in, c, c_out))
self.aspp0 = nn.Conv3d(c_in, c_out, kernel_size=1, stride=1, padding=0, dilation=1, bias=False)
self.aspp1 = BottleneckDDR3d(c_in, c, c_out, dilation=6, residual=residual)
self.aspp2 = BottleneckDDR3d(c_in, c, c_out, dilation=12, residual=residual)
self.aspp3 = BottleneckDDR3d(c_in, c, c_out, dilation=18, residual=residual)
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)),
nn.Conv3d(c_in, c_out, 1, stride=1, bias=False))
def forward(self, x):
x0 = self.aspp0(x)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x_ = self.global_avg_pool(x)
# x_ = F.upsample(x_, size=x.size()[2:], mode='trilinear', align_corners=True)
x_ = F.interpolate(x_, size=x.size()[2:], mode='trilinear', align_corners=True)
x = torch.cat((x0, x1, x2, x3, x_), dim=1)
# print(x0.shape, x1.shape, x2.shape, x3.shape, x_.shape, x.shape)
return x
| 5,848 | 36.254777 | 120 | py |
SSC | SSC-master/models/projection_layer.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Project feature tensers of 2D image to 3D space
jieli_cn@163.com
"""
import torch.nn as nn
from torch_scatter import scatter_max
class Project2Dto3D(nn.Module):
def __init__(self, w=240, h=144, d=240):
super(Project2Dto3D, self).__init__()
self.w = w
self.h = h
self.d = d
def forward(self, x2d, idx):
# bs, c, img_h, img_w = x2d.shape
bs, c, _, _ = x2d.shape
src = x2d.view(bs, c, -1)
idx = idx.view(bs, 1, -1)
index = idx.expand(-1, c, -1) # expand to c channels
x3d = x2d.new_zeros((bs, c, self.w*self.h*self.d))
x3d, _ = scatter_max(src, index, out=x3d) # dim_size=240*144*240,
x3d = x3d.view(bs, c, self.w, self.h, self.d) # (BS, c, vW, vH, vD)
x3d = x3d.permute(0, 1, 4, 3, 2) # (BS, c, vW, vH, vD)--> (BS, c, vD, vH, vW)
return x3d
| 922 | 27.84375 | 89 | py |
SSC | SSC-master/models/AICNet.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
AICNet
jieli_cn@163.com
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from .projection_layer import Project2Dto3D
from .DDR import BottleneckDDR2d, BottleneckDDR3d, DownsampleBlock3d
class BasicAIC3d(nn.Module):
def __init__(self, channel, kernel=(3, 5, 7), dilation=(1, 1, 1), residual=True):
super(BasicAIC3d, self).__init__()
self.channel = channel
self.residual = residual
self.n = len(kernel) # number of kernels
self.conv_mx = nn.Conv3d(channel, 3 * self.n, (1, 1, 1), stride=1, padding=0, bias=False, dilation=1)
self.softmax = nn.Softmax(dim=2) # Applies the Softmax function in each axis
# ---- Convs of each axis
self.conv_1x1xk = nn.ModuleList()
self.conv_1xkx1 = nn.ModuleList()
self.conv_kx1x1 = nn.ModuleList()
c = channel
for _idx in range(self.n):
k = kernel[_idx]
d = dilation[_idx]
p = k // 2 * d
self.conv_1x1xk.append(nn.Conv3d(c, c, (1, 1, k), stride=1, padding=(0, 0, p), bias=True, dilation=(1, 1, d)))
self.conv_1xkx1.append(nn.Conv3d(c, c, (1, k, 1), stride=1, padding=(0, p, 0), bias=True, dilation=(1, d, 1)))
self.conv_kx1x1.append(nn.Conv3d(c, c, (k, 1, 1), stride=1, padding=(p, 0, 0), bias=True, dilation=(d, 1, 1)))
def forward(self, x):
mx = self.conv_mx(x) # (BS, 3n, D, H, W)
_bs, _tn, _d, _h, _w = mx.size()
mx = mx.view(_bs, 3, -1, _d, _h, _w) # (BS, 3, n, D, H, W)
# print("After 'view', mx.size() is: {}".format(mx.size()))
mx = self.softmax(mx) # dim=2
mx_c = torch.unsqueeze(mx, dim=3) # (BS, 3, n, 1, D, H, W)
mx_c = mx_c.expand(-1, -1, -1, self.channel, -1, -1, -1) # (BS, 3, n, c, D, H, W)
# mx1, mx2, mx3 = torch.split(mx_c, 1, dim=2) # n x (BS, 3, 1, c, D, H, W)
mx_list = torch.split(mx_c, 1, dim=2) # n x (BS, 3, 1, c, D, H, W)
mx_z_list = []
mx_y_list = []
mx_x_list = []
for i in range(self.n):
# mx_list[i] = torch.squeeze(mx_list[i], dim=2) # (BS, 3, c, D, H, W)
# mx_z, mx_y, mx_x = torch.split(mx_list[i], 1, dim=1) # 3 x (BS, 1, c, D, H, W)
mx_z, mx_y, mx_x = torch.split(torch.squeeze(mx_list[i], dim=2), 1, dim=1) # 3 x (BS, 1, c, D, H, W)
mx_z_list.append(torch.squeeze(mx_z, dim=1)) # (BS, c, D, H, W)
mx_y_list.append(torch.squeeze(mx_y, dim=1)) # (BS, c, D, H, W)
mx_x_list.append(torch.squeeze(mx_x, dim=1)) # (BS, c, D, H, W)
# ------ x ------
y_x = None
for _idx in range(self.n):
y1_x = self.conv_1x1xk[_idx](x)
y1_x = F.relu(y1_x, inplace=True)
y1_x = torch.mul(mx_x_list[_idx], y1_x)
y_x = y1_x if y_x is None else y_x + y1_x
# ------ y ------
y_y = None
for _idx in range(self.n):
y1_y = self.conv_1xkx1[_idx](y_x)
y1_y = F.relu(y1_y, inplace=True)
y1_y = torch.mul(mx_y_list[_idx], y1_y)
y_y = y1_y if y_y is None else y_y + y1_y
# ------ z ------
y_z = None
for _idx in range(self.n):
y1_z = self.conv_kx1x1[_idx](y_y)
y1_z = F.relu(y1_z, inplace=True)
y1_z = torch.mul(mx_z_list[_idx], y1_z)
y_z = y1_z if y_z is None else y_z + y1_z
y = F.relu(y_z + x, inplace=True) if self.residual else F.relu(y_z, inplace=True)
return y
class BottleneckAIC3d(nn.Module):
def __init__(self, c_in, c, c_out, kernel=(3, 5, 7), dilation=(1, 1, 1), residual=True, neighbours=0, pooling_kernel=0):
super(BottleneckAIC3d, self).__init__()
self.residual = residual
self.conv_in = nn.Conv3d(c_in, c, kernel_size=1, bias=False)
self.basic_aic = BasicAIC3d(c, kernel=kernel, dilation=dilation, residual=True)
self.conv_out = nn.Conv3d(c, c_out, kernel_size=1, bias=False)
def forward(self, x):
y = self.conv_in(x)
y = F.relu(y, inplace=True)
y = self.basic_aic(y)
y = self.conv_out(y)
y = F.relu(y + x, inplace=True) if self.residual else F.relu(y, inplace=True)
return y
class SSC_RGBD_AICNet(nn.Module):
def __init__(self, num_classes=12):
super(SSC_RGBD_AICNet, self).__init__()
print('SSC_RGBD_AICNet.')
w, h, d = 240, 144, 240
k = ((3, 5, 7), (3, 5, 7), (3, 5, 7))
ks = (3, 5, 7)
# --- depth
c_in, c, c_out, dilation, residual = 1, 4, 8, 1, True
self.dep_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_dep = Project2Dto3D(w, h, d) # w=240, h=144, d=240
self.dep_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=8, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
# --- RGB
c_in, c, c_out, dilation, residual = 3, 4, 8, 1, True
self.rgb_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_rgb = Project2Dto3D(w, h, d) # w=240, h=144, d=240
self.rgb_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=8, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
ck = 64
c = int(ck / 2)
dilation = ((1, 1, 1), (1, 1, 1), (1, 1, 1))
# ---- depth stream
self.res3d_1d = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[0], dilation=dilation[0], residual=True)
self.res3d_2d = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[1], dilation=dilation[1], residual=True)
self.res3d_3d = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[2], dilation=dilation[2], residual=True)
# ---- rgb stream
self.res3d_1r = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[0], dilation=dilation[0], residual=True)
self.res3d_2r = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[1], dilation=dilation[1], residual=True)
self.res3d_3r = BottleneckAIC3d(c_in=ck, c=c, c_out=ck, kernel=k[2], dilation=dilation[2], residual=True)
d = (1, 1, 1)
self.aspp_1 = BottleneckAIC3d(c_in=int(ck * 4), c=ck, c_out=int(ck * 4), kernel=ks, dilation=d, residual=True)
self.aspp_2 = BottleneckAIC3d(c_in=int(ck * 4), c=ck, c_out=int(ck * 4), kernel=ks, dilation=d, residual=True)
self.conv_out = nn.Sequential(
nn.Conv3d(int(ck * 4), 128, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(128, 128, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(128, num_classes, 1, 1, 0)
)
# ---- weights init
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.xavier_uniform_(m.weight.data) # gain=1
# nn.init.constant(m.bias.data, 0)
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, mean=0, std=0.1)
def forward(self, x_depth=None, x_rgb=None, p=None):
# input: x (BS, 3L, 240L, 144L, 240L)
# print('SSC: x.shape', x.shape)
f0_r = self.rgb_feature2d(x_rgb)
f0_r = self.project_layer_rgb(f0_r, p)
f0_r = self.rgb_feature3d(f0_r)
f0_d = self.dep_feature2d(x_depth)
f0_d = self.project_layer_dep(f0_d, p)
f0_d = self.dep_feature3d(f0_d)
# -------------------------------------------------------------------
f0 = torch.add(f0_d, f0_r)
f1_d = self.res3d_1d(f0_d)
f1_r = self.res3d_1r(f0_r)
f1 = torch.add(f1_d, f1_r)
f2_d = self.res3d_2d(f1_d)
f2_r = self.res3d_2r(f1_r)
f2 = torch.add(f2_d, f2_r)
f3_d = self.res3d_3d(f2_d)
f3_r = self.res3d_3r(f2_r)
f3 = torch.add(f3_d, f3_r)
y = torch.cat((f0, f1, f2, f3), dim=1) # channels concatenate
# print('SSC: channels concatenate x', x.size()) # (BS, 256L, 60L, 36L, 60L)
y = self.aspp_1(y)
y = self.aspp_2(y)
y = self.conv_out(y) # (BS, 12L, 60L, 36L, 60L)
return y
| 8,927 | 40.142857 | 124 | py |
SSC | SSC-master/models/GRFNet.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
GRFNet
jieli_cn@163.com
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from .projection_layer import Project2Dto3D
from .DDR import DDR_ASPP3d
from .DDR import BottleneckDDR2d, BottleneckDDR3d, DownsampleBlock3d
class Conv3dGRUCell(nn.Module):
def __init__(self, input_channels, hidden_channels, kernel_size, bias=True):
super(Conv3dGRUCell, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.padding = kernel_size // 2
self.bias = bias
self.in_conv = nn.Conv3d(in_channels=self.input_channels + self.hidden_channels,
out_channels=2 * self.hidden_channels,
kernel_size=self.kernel_size,
stride=1,
dilation=1,
padding=self.padding,
bias=self.bias)
self.out_conv = nn.Conv3d(in_channels=self.input_channels + self.hidden_channels,
out_channels=self.hidden_channels,
kernel_size=self.kernel_size,
stride=1,
dilation=1,
padding=self.padding,
bias=self.bias)
def forward(self, input_tensor, hidden_state):
# print('input_tensor.size()', input_tensor.size(), 'hidden_state.size()', hidden_state.size())
h_cur = hidden_state
combined = torch.cat((input_tensor, h_cur), dim=1) # concatenate along channel axis
combined_conv = self.in_conv(combined)
cc_r, cc_z = torch.split(combined_conv, self.hidden_channels, dim=1)
# print('cc_r.size()', cc_r.size(), 'cc_z.size()', cc_z.size())
r = torch.sigmoid(cc_r) # reset gate
z = torch.sigmoid(cc_z) # update gate
h_cur_bar = h_cur * r
cc_h = self.out_conv(torch.cat((input_tensor, h_cur_bar), dim=1))
h_bar = torch.tanh(cc_h)
h_next = z * h_cur + (1 - z) * h_bar
return h_next
class SSC_RGBD_GRFNet(nn.Module):
def __init__(self, num_classes=12):
super(SSC_RGBD_GRFNet, self).__init__()
print('SSC_RGBD_GRFNet.')
# --- depth
c_in, c, c_out, dilation, residual = 1, 4, 8, 1, True
self.dep_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_dep = Project2Dto3D(240, 144, 240) # w=240, h=144, d=240
self.dep_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=8, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
# --- RGB
c_in, c, c_out, dilation, residual = 3, 4, 8, 1, True
self.rgb_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_rgb = Project2Dto3D(240, 144, 240) # w=240, h=144, d=240
self.rgb_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=8, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
# -------------1/4
ck = 64
c = ck // 4
# --- RGB
self.res3d_1r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=2, residual=True)
self.res3d_2r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=3, residual=True)
self.res3d_3r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=5, residual=True)
# --- Depth
self.res3d_1d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=2, residual=True)
self.res3d_2d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=3, residual=True)
self.res3d_3d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, dilation=5, residual=True)
# self.lstm = DDRConv3dLSTMCell(input_channels=128, hidden_channels=64, kernel_size=(3, 3, 3), bias=True)
self.gru = Conv3dGRUCell(input_channels=64, hidden_channels=64, kernel_size=3, bias=True)
self.aspp = DDR_ASPP3d(c_in=ck, c=16, c_out=64)
self.conv_out = nn.Sequential(
nn.Conv3d(320, 160, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(160, num_classes, 1, 1, 0)
)
# ---- weights init
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
# nn.init.xavier_normal(m.weight.data, gain=math.sqrt(2. / n))
# nn.init.xavier_uniform(m.weight.data, gain=math.sqrt(2. / n))
nn.init.xavier_uniform_(m.weight.data) # gain=1
# nn.init.constant(m.bias.data, 0)
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, mean=0, std=0.1)
def forward(self, x_depth=None, x_rgb=None, p=None):
# input: x (BS, 3L, 240L, 144L, 240L)
# print('SSC: x.shape', x.shape)
if x_rgb is not None:
x0_rgb = self.rgb_feature2d(x_rgb)
x0_rgb = self.project_layer_rgb(x0_rgb, p)
x0_rgb = self.rgb_feature3d(x0_rgb)
# pass
if x_depth is not None:
x0_depth = self.dep_feature2d(x_depth)
x0_depth = self.project_layer_dep(x0_depth, p)
x0_depth = self.dep_feature3d(x0_depth)
# -------------------------------------------------------------------
# ---- 1/4
x_4_d = self.res3d_1d(x0_depth)
x_4_r = self.res3d_1r(x0_rgb)
# f1 = torch.add(x_4_d, x_4_r)
x_5_d = self.res3d_2d(x_4_d)
x_5_r = self.res3d_2r(x_4_r)
# f2 = torch.add(x_5_d, x_5_r)
x_6_d = self.res3d_3d(x_5_d)
x_6_r = self.res3d_3r(x_5_r)
# f3 = torch.add(x_6_d, x_6_r)
h0 = torch.add(x0_depth, x0_depth)
# Fusion stage: 1
h1_1 = self.gru(input_tensor=x0_depth, hidden_state=h0)
h1 = self.gru(input_tensor=x0_rgb, hidden_state=h1_1)
# Fusion stage: 2
h2_1 = self.gru(input_tensor=x_4_d, hidden_state=h1)
h2 = self.gru(input_tensor=x_4_r, hidden_state=h2_1)
# Fusion stage: 3
h3_1 = self.gru(input_tensor=x_5_d, hidden_state=h2)
h3 = self.gru(input_tensor=x_5_r, hidden_state=h3_1)
# Fusion stage: 4
h4_1 = self.gru(input_tensor=x_6_d, hidden_state=h3)
h4 = self.gru(input_tensor=x_6_r, hidden_state=h4_1)
y = self.aspp(h4)
y = self.conv_out(y) # (BS, 12L, 60L, 36L, 60L)
return y
| 7,381 | 39.119565 | 113 | py |
SSC | SSC-master/models/DDRNet.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
DDRNet
jieli_cn@163.com
"""
import torch
import torch.nn as nn
from .projection_layer import Project2Dto3D
from .DDR import DDR_ASPP3d
from .DDR import BottleneckDDR2d, BottleneckDDR3d, DownsampleBlock3d
# DDRNet
# ----------------------------------------------------------------------
class SSC_RGBD_DDRNet(nn.Module):
def __init__(self, num_classes=12):
super(SSC_RGBD_DDRNet, self).__init__()
print('SSC_RGBD_DDRNet: RGB and Depth streams with DDR blocks for Semantic Scene Completion')
w, h, d = 240, 144, 240
# --- depth
c_in, c, c_out, dilation, residual = 1, 4, 8, 1, True
self.dep_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_dep = Project2Dto3D(w, h, d) # w=240, h=144, d=240
self.dep_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=4, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
# --- RGB
c_in, c, c_out, dilation, residual = 3, 4, 8, 1, True
self.rgb_feature2d = nn.Sequential(
nn.Conv2d(c_in, c_out, 1, 1, 0), # reduction
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
BottleneckDDR2d(c_out, c, c_out, dilation=dilation, residual=residual),
)
self.project_layer_rgb = Project2Dto3D(w, h, d) # w=240, h=144, d=240
self.rgb_feature3d = nn.Sequential(
DownsampleBlock3d(8, 16),
BottleneckDDR3d(c_in=16, c=4, c_out=16, dilation=1, residual=True),
DownsampleBlock3d(16, 64), # nn.MaxPool3d(kernel_size=2, stride=2)
BottleneckDDR3d(c_in=64, c=16, c_out=64, dilation=1, residual=True),
)
# -------------1/4
# ck = 256
# self.ds = DownsamplerBlock_3d(64, ck)
ck = 64
c = 16
# c_in, c, c_out, kernel=3, stride=1, dilation=1, residual=True
self.res3d_1d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=2, residual=True)
self.res3d_2d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=3, residual=True)
self.res3d_3d = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=5, residual=True)
self.res3d_1r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=2, residual=True)
self.res3d_2r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=3, residual=True)
self.res3d_3r = BottleneckDDR3d(c_in=ck, c=c, c_out=ck, kernel=3, dilation=5, residual=True)
self.aspp = DDR_ASPP3d(c_in=int(ck * 4), c=16, c_out=64)
# self.aspp = DDR_ASPP3d(c_in=int(ck * 4), c=64, c_out=int(ck * 4))
# 64 * 5 = 320
self.conv_out = nn.Sequential(
nn.Conv3d(320, 128, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(128, 128, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv3d(128, num_classes, 1, 1, 0)
)
# ---- weights init
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.xavier_uniform_(m.weight.data) # gain=1
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, mean=0, std=0.1)
def forward(self, x_depth=None, x_rgb=None, p=None):
# input: x (BS, 3L, 240L, 144L, 240L)
# print('SSC: x.shape', x.shape)
x0_rgb = self.rgb_feature2d(x_rgb)
x0_rgb = self.project_layer_rgb(x0_rgb, p)
x0_rgb = self.rgb_feature3d(x0_rgb)
x0_depth = self.dep_feature2d(x_depth)
x0_depth = self.project_layer_dep(x0_depth, p)
x0_depth = self.dep_feature3d(x0_depth)
f0 = torch.add(x0_depth, x0_rgb)
x_4_d = self.res3d_1d(x0_depth)
x_4_r = self.res3d_1r(x0_rgb)
f1 = torch.add(x_4_d, x_4_r)
x_5_d = self.res3d_2d(x_4_d)
x_5_r = self.res3d_2r(x_4_r)
f2 = torch.add(x_5_d, x_5_r)
x_6_d = self.res3d_3d(x_5_d)
x_6_r = self.res3d_3r(x_5_r)
f3 = torch.add(x_6_d, x_6_r)
x = torch.cat((f0, f1, f2, f3), dim=1) # channels concatenate
# print('SSC: channels concatenate x', x.size()) # (BS, 256L, 60L, 36L, 60L)
x = self.aspp(x)
y = self.conv_out(x) # (BS, 12L, 60L, 36L, 60L)
return y
| 4,722 | 36.784 | 101 | py |
SSC | SSC-master/dataloaders/dataloader.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Class of pytorch data loader
---
Jie Li
jieli_cn@163.com
Nanjing University of Science and Technology
Aug 10, 2019
"""
import glob
import imageio
import numpy as np
import numpy.matlib
import torch.utils.data
from pathlib import Path
from torchvision import transforms
from config import colorMap
# C_NUM = 12 # number of classes
# 'empty','ceiling','floor','wall','window','chair','bed','sofa','table','tvs','furn','objs'
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
seg_class_map = [0, 1, 2, 3, 4, 11, 5, 6, 7, 8, 8, 10, 10, 10, 11, 11, 9, 8, 11, 11, 11,
11, 11, 11, 11, 11, 11, 10, 10, 11, 8, 10, 11, 9, 11, 11, 11] # 0 - 11
# 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
class NYUDataset(torch.utils.data.Dataset):
def __init__(self, root, istest=False):
self.param = {'voxel_size': (240, 144, 240),
'voxel_unit': 0.02, # 0.02m, length of each grid == 20mm
'cam_k': [[518.8579, 0, 320], # K is [fx 0 cx; 0 fy cy; 0 0 1];
[0, 518.8579, 240], # cx = K(1,3); cy = K(2,3);
[0, 0, 1]], # fx = K(1,1); fy = K(2,2);
}
#
self.subfix = 'npz'
self.istest = istest
self.downsample = 4 # int, downsample = 4, in labeled data, get 1 voxel from each 4
self.filepaths = self.get_filelist(root, self.subfix)
# Converts a PIL Image or numpy.ndarray (H x W x C) in the range [0, 255] \
# to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
self.transforms_rgb = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
print('Dataset:{} files'.format(len(self.filepaths)))
correct = False
if self.subfix == 'npz' and correct:
self._correct_labels()
def _correct_labels(self):
print ("Correcting labels with projection indices!")
N = len(self.filepaths)
for index in range(N):
print ("Correcting {}/{} label..".format(index+1, N))
#name = self.filepaths[index][48:-11]
filepath = Path(self.filepaths[index])
target_file = filepath.parent.with_name(filepath.parent.stem + "_fixed") / filepath.stem
target_file.parent.mkdir(parents=True, exist_ok=True)
if target_file.with_suffix('.npz').exists():
print("{} exists! skipping...".format(target_file.with_suffix('.npz')))
continue
with np.load(self.filepaths[index]) as npz_file:
# print(npz_file.files)
rgb_tensor = npz_file['rgb']
depth_tensor = npz_file['depth']
tsdf_hr = npz_file['tsdf_hr'] # flipped TSDF, (240, 144, 240, 1)
# target_hr = npz_file['target_hr']
tsdf_lr = npz_file['tsdf_lr']
target_lr = npz_file['target_lr']
position = npz_file['position']
vox_origin, cam_pose, _ = self._read_rle('/media/scratch1/mcheem/datasets/depthbin/{}/{}.bin'.format("NYUtest" if self.istest else "NYUtrain", filepath.stem[:-7]))
depth = self._read_depth('/media/scratch1/mcheem/datasets/depthbin/{}/{}.png'.format("NYUtest" if self.istest else "NYUtrain", filepath.stem[:-7]))
_, _, position2, position4 = self._depth2voxel(depth, cam_pose, vox_origin, self.param)
np.savez_compressed(target_file , rgb=rgb_tensor, depth=depth_tensor, tsdf_hr=tsdf_hr, target_lr=target_lr, position=position2, tsdf_lr=tsdf_lr)
def __getitem__(self, index):
_name = self.filepaths[index][:-4]
# print(_name)
# ---------------------------------------------------------------------------
# Processing repackaged data provided by DDRNet
# ---------------------------------------------------------------------------
if self.subfix == 'npz':
with np.load(self.filepaths[index]) as npz_file:
# print(npz_file.files)
rgb_tensor = npz_file['rgb']
depth_tensor = npz_file['depth']
tsdf_hr = npz_file['tsdf_hr'] # flipped TSDF, (240, 144, 240, 1)
# target_hr = npz_file['target_hr']
target_lr = npz_file['target_lr']
position = npz_file['position']
if self.istest:
tsdf_lr = npz_file['tsdf_lr'] # ( 60, 36, 60)
# nonempty = self.get_nonempty(tsdf, 'TSDF')
nonempty = self.get_nonempty2(tsdf_lr, target_lr, 'TSDF') # 这个更符合SUNCG的做法
return rgb_tensor, depth_tensor, tsdf_hr, target_lr.T, nonempty.T, position, _name + '.png'
return rgb_tensor, depth_tensor, tsdf_hr, target_lr.T, position, _name + '.png'
# else:
#
# ---------------------------------------------------------------------------
# Processing data provided by SSCNet
# ---------------------------------------------------------------------------
# --- read depth, shape: (h, w)
depth = self._read_depth(_name + '.png') #
depth_tensor = depth.reshape((1,) + depth.shape)
# --- read rgb image, shape: (h, w, 3)
# rgb = self._read_rgb(_name + '.jpg') #
rgb = self._read_rgb(_name[:-4] + 'rgb.png')
rgb_tensor = self.transforms_rgb(rgb) # channel first, shape: (3, h, w)
# --- read ground truth
vox_origin, cam_pose, rle = self._read_rle(_name + '.bin')
target_hr = self._rle2voxel(rle, self.param['voxel_size'], _name + '.bin')
target_lr = self._downsample_label(target_hr, self.param['voxel_size'], self.downsample)
binary_vox, _, position, position4 = self._depth2voxel(depth, cam_pose, vox_origin, self.param)
npz_file = np.load(_name + '.npz')
tsdf_hr = npz_file['tsdf'] # SUNCG (W, H, D)
if self.istest:
tsdf_lr = self._downsample_tsdf(tsdf_hr, self.downsample)
# nonempty = self.get_nonempty(tsdf, 'TSDF')
nonempty = self.get_nonempty2(tsdf_lr, target_lr, 'TSDF') # 这个更符合SUNCG的做法
return rgb_tensor, depth_tensor, tsdf_hr, target_lr.T, nonempty.T, position, _name + '.png'
return rgb_tensor, depth_tensor, tsdf_hr, target_lr.T, position, _name + '.png'
def __len__(self):
return len(self.filepaths)
def get_filelist(self, root, subfix):
if root is None:
raise Exception("Oops! 'root' is None, please set the right file path.")
_filepaths = list()
if isinstance(root, list): # 将多个root
for root_i in root:
fp = glob.glob(root_i + '/*.' + subfix)
fp.sort()
_filepaths.extend(fp)
elif isinstance(root, str):
_filepaths = glob.glob(root + '/*.' + subfix) # List all files in data folder
_filepaths.sort()
if len(_filepaths) == 0:
raise Exception("Oops! That was no valid data in '{}'.".format(root))
return _filepaths
@staticmethod
def _read_depth(depth_filename):
r"""Read a depth image with size H x W
and save the depth values (in millimeters) into a 2d numpy array.
The depth image file is assumed to be in 16-bit PNG format, depth in millimeters.
"""
# depth = misc.imread(depth_filename) / 8000.0 # numpy.float64
depth = imageio.imread(depth_filename) / 8000.0 # numpy.float64
# assert depth.shape == (img_h, img_w), 'incorrect default size'
depth = np.asarray(depth)
return depth
@staticmethod
def _read_rgb(rgb_filename): # 0.01s
r"""Read a RGB image with size H x W
"""
# rgb = misc.imread(rgb_filename) # <type 'numpy.ndarray'>, numpy.uint8, (480, 640, 3)
rgb = imageio.imread(rgb_filename) # <type 'numpy.ndarray'>, numpy.uint8, (480, 640, 3)
# rgb = np.rollaxis(rgb, 2, 0) # (H, W, 3)-->(3, H, W)
return rgb
@staticmethod
def _read_rle(rle_filename): # 0.0005s
r"""Read RLE compression data
Return:
vox_origin,
cam_pose,
vox_rle, voxel label data from file
Shape:
vox_rle, (240, 144, 240)
"""
fid = open(rle_filename, 'rb')
vox_origin = np.fromfile(fid, np.float32, 3).T # Read voxel origin in world coordinates
cam_pose = np.fromfile(fid, np.float32, 16).reshape((4, 4)) # Read camera pose
vox_rle = np.fromfile(fid, np.uint32).reshape((-1, 1)).T # Read voxel label data from file
vox_rle = np.squeeze(vox_rle) # 2d array: (1 x N), to 1d array: (N , )
fid.close()
return vox_origin, cam_pose, vox_rle
# this version takes 0.9s
@classmethod
def _rle2voxel(cls, rle, voxel_size=(240, 144, 240), rle_filename=''):
r"""Read voxel label data from file (RLE compression), and convert it to fully occupancy labeled voxels.
In the data loader of pytorch, only single thread is allowed.
For multi-threads version and more details, see 'readRLE.py'.
output: seg_label: 3D numpy array, size 240 x 144 x 240
"""
# ---- Read RLE
# vox_origin, cam_pose, rle = cls._read_rle(rle_filename)
# ---- Uncompress RLE, 0.9s
seg_label = np.zeros(voxel_size[0] * voxel_size[1] * voxel_size[2], dtype=np.uint8) # segmentation label
vox_idx = 0
for idx in range(int(rle.shape[0] / 2)):
check_val = rle[idx * 2]
check_iter = rle[idx * 2 + 1]
if check_val >= 37 and check_val != 255: # 37 classes to 12 classes
print('RLE {} check_val: {}'.format(rle_filename, check_val))
# seg_label_val = 1 if check_val < 37 else 0 # 37 classes to 2 classes: empty or occupancy
# seg_label_val = 255 if check_val == 255 else seg_class_map[check_val]
seg_label_val = seg_class_map[check_val] if check_val != 255 else 255 # 37 classes to 12 classes
seg_label[vox_idx: vox_idx + check_iter] = np.matlib.repmat(seg_label_val, 1, check_iter)
vox_idx = vox_idx + check_iter
seg_label = seg_label.reshape(voxel_size) # 3D array, size 240 x 144 x 240
return seg_label
# this version takes 3s
@classmethod # method 2, new
def _depth2voxel(cls, depth, cam_pose, vox_origin, param):
cam_k = param['cam_k']
voxel_size = param['voxel_size'] # (240, 144, 240)
unit = param['voxel_unit'] # 0.02
# ---- Get point in camera coordinate
H, W = depth.shape
gx, gy = np.meshgrid(range(W), range(H))
pt_cam = np.zeros((H, W, 3), dtype=np.float32)
pt_cam[:, :, 0] = (gx - cam_k[0][2]) * depth / cam_k[0][0] # x
pt_cam[:, :, 1] = (gy - cam_k[1][2]) * depth / cam_k[1][1] # y
pt_cam[:, :, 2] = depth # z, in meter
# ---- Get point in world coordinate
p = cam_pose
pt_world = np.zeros((H, W, 3), dtype=np.float32)
pt_world[:, :, 0] = p[0][0] * pt_cam[:, :, 0] + p[0][1] * pt_cam[:, :, 1] + p[0][2] * pt_cam[:, :, 2] + p[0][3]
pt_world[:, :, 1] = p[1][0] * pt_cam[:, :, 0] + p[1][1] * pt_cam[:, :, 1] + p[1][2] * pt_cam[:, :, 2] + p[1][3]
pt_world[:, :, 2] = p[2][0] * pt_cam[:, :, 0] + p[2][1] * pt_cam[:, :, 1] + p[2][2] * pt_cam[:, :, 2] + p[2][3]
pt_world[:, :, 0] = pt_world[:, :, 0] - vox_origin[0]
pt_world[:, :, 1] = pt_world[:, :, 1] - vox_origin[1]
pt_world[:, :, 2] = pt_world[:, :, 2] - vox_origin[2]
# ---- Aline the coordinates with labeled data (RLE .bin file)
pt_world2 = np.zeros(pt_world.shape, dtype=np.float32) # (h, w, 3)
# pt_world2 = pt_world
pt_world2[:, :, 0] = pt_world[:, :, 0] # x 水平
pt_world2[:, :, 1] = pt_world[:, :, 2] # y 高低
pt_world2[:, :, 2] = pt_world[:, :, 1] # z 深度
# pt_world2[:, :, 0] = pt_world[:, :, 1] # x 原始paper方法
# pt_world2[:, :, 1] = pt_world[:, :, 2] # y
# pt_world2[:, :, 2] = pt_world[:, :, 0] # z
# ---- World coordinate to grid/voxel coordinate
point_grid = pt_world2 / unit # Get point in grid coordinate, each grid is a voxel
point_grid = np.rint(point_grid).astype(np.int32) # .reshape((-1, 3)) # (H*W, 3) (H, W, 3)
# ---- crop depth to grid/voxel
# binary encoding '01': 0 for empty, 1 for occupancy
# voxel_binary = np.zeros(voxel_size, dtype=np.uint8) # (W, H, D)
voxel_binary = np.zeros([_ + 1 for _ in voxel_size], dtype=np.float32) # (W, H, D)
voxel_xyz = np.zeros(voxel_size + (3,), dtype=np.float32) # (W, H, D, 3)
position = np.zeros((H, W), dtype=np.int32)
position4 = np.zeros((H, W), dtype=np.int32)
# position44 = np.zeros((H/4, W/4), dtype=np.int32)
voxel_size_lr = (voxel_size[0] // 4, voxel_size[1] // 4, voxel_size[2] // 4)
for h in range(H):
for w in range(W):
i_x, i_y, i_z = point_grid[h, w, :]
if 0 <= i_x < voxel_size[0] and 0 <= i_y < voxel_size[1] and 0 <= i_z < voxel_size[2]:
voxel_binary[i_x, i_y, i_z] = 1 # the bin has at least one point (bin is not empty)
voxel_xyz[i_x, i_y, i_z, :] = point_grid[h, w, :]
# position[h, w, :] = point_grid[h, w, :] # 记录图片上的每个像素对应的voxel位置
# 记录图片上的每个像素对应的voxel位置
position[h, w] = np.ravel_multi_index(point_grid[h, w, :], voxel_size)
# TODO 这个project的方式可以改进
position4[h, ] = np.ravel_multi_index((point_grid[h, w, :] / 4).astype(np.int32), voxel_size_lr)
# position44[h / 4, w / 4] = np.ravel_multi_index(point_grid[h, w, :] / 4, voxel_size_lr)
# output --- 3D Tensor, 240 x 144 x 240
del depth, gx, gy, pt_cam, pt_world, pt_world2, point_grid # Release Memory
return voxel_binary, voxel_xyz, position, position4 # (W, H, D), (W, H, D, 3)
# this version takes about 0.6s on CPU
@staticmethod
def _downsample_label(label, voxel_size=(240, 144, 240), downscale=4):
r"""downsample the labeled data,
Shape:
label, (240, 144, 240)
label_downscale, if downsample==4, then (60, 36, 60)
"""
if downscale == 1:
return label
ds = downscale
small_size = (voxel_size[0] // ds, voxel_size[1] // ds, voxel_size[2] // ds) # small size
label_downscale = np.zeros(small_size, dtype=np.uint8)
empty_t = 0.95 * ds * ds * ds # threshold
s01 = small_size[0] * small_size[1]
label_i = np.zeros((ds, ds, ds), dtype=np.int32)
for i in range(small_size[0]*small_size[1]*small_size[2]):
z = int(i / s01)
y = int((i - z * s01) / small_size[0])
x = int(i - z * s01 - y * small_size[0])
# z, y, x = np.unravel_index(i, small_size) # 速度更慢了
# print(x, y, z)
label_i[:, :, :] = label[x * ds:(x + 1) * ds, y * ds:(y + 1) * ds, z * ds:(z + 1) * ds]
label_bin = label_i.flatten() # faltten 返回的是真实的数组,需要分配新的内存空间
# label_bin = label_i.ravel() # 将多维数组变成 1维数组,而ravel 返回的是数组的视图
# zero_count_0 = np.sum(label_bin == 0)
# zero_count_255 = np.sum(label_bin == 255)
zero_count_0 = np.array(np.where(label_bin == 0)).size # 要比sum更快
zero_count_255 = np.array(np.where(label_bin == 255)).size
zero_count = zero_count_0 + zero_count_255
if zero_count > empty_t:
label_downscale[x, y, z] = 0 if zero_count_0 > zero_count_255 else 255
else:
# label_i_s = label_bin[np.nonzero(label_bin)] # get the none empty class labels
label_i_s = label_bin[np.where(np.logical_and(label_bin > 0, label_bin < 255))]
label_downscale[x, y, z] = np.argmax(np.bincount(label_i_s))
return label_downscale
@staticmethod
def _downsample_tsdf(tsdf, downscale=4): # 仅在Get None empty 时会用到
r"""
Shape:
tsdf, (240, 144, 240)
tsdf_downscale, (60, 36, 60), (stsdf.shape[0]/4, stsdf.shape[1]/4, stsdf.shape[2]/4)
"""
if downscale == 1:
return tsdf
# TSDF_EMPTY = np.float32(0.001)
# TSDF_SURFACE: 1, sign >= 0
# TSDF_OCCLUD: sign < 0 np.float32(-0.001)
ds = downscale
small_size = (int(tsdf.shape[0] / ds), int(tsdf.shape[1] / ds), int(tsdf.shape[2] / ds))
tsdf_downscale = np.ones(small_size, dtype=np.float32) * np.float32(0.001) # init 0.001 for empty
s01 = small_size[0] * small_size[1]
tsdf_sr = np.ones((ds, ds, ds), dtype=np.float32) # search region
for i in range(small_size[0] * small_size[1] * small_size[2]):
z = int(i / s01)
y = int((i - z * s01) / small_size[0])
x = int(i - z * s01 - y * small_size[0])
tsdf_sr[:, :, :] = tsdf[x * ds:(x + 1) * ds, y * ds:(y + 1) * ds, z * ds:(z + 1) * ds]
tsdf_bin = tsdf_sr.flatten()
# none_empty_count = np.array(np.where(tsdf_bin != TSDF_EMPTY)).size
none_empty_count = np.array(np.where(np.logical_or(tsdf_bin <= 0, tsdf_bin == 1))).size
if none_empty_count > 0:
# surface_count = np.array(np.where(stsdf_bin == 1)).size
# occluded_count = np.array(np.where(stsdf_bin == -2)).size
# surface_count = np.array(np.where(tsdf_bin > 0)).size # 这个存在问题
surface_count = np.array(np.where(tsdf_bin == 1)).size
# occluded_count = np.array(np.where(tsdf_bin < 0)).size
# tsdf_downscale[x, y, z] = 0 if surface_count > occluded_count else np.float32(-0.001)
tsdf_downscale[x, y, z] = 1 if surface_count > 2 else np.float32(-0.001) # 1 or 0 ?
# else:
# tsdf_downscale[x, y, z] = empty # TODO 不应该将所有值均设为0.001
return tsdf_downscale
@staticmethod
def get_nonempty(voxels, encoding): # Get none empty from depth voxels
data = np.zeros(voxels.shape, dtype=np.float32) # init 0 for empty
# if encoding == 'STSDF': # surface, empty, occulted: 1, 0, -1
# data[voxels == 1] = 1
# return data
if encoding == 'STSDF': # surface, empty, occulted: 1, 0, -1
data[voxels != 0] = 1
surface = np.array(np.where(voxels == 1)) # surface=1
elif encoding == 'TSDF':
data[np.where(np.logical_or(voxels <= 0, voxels == 1))] = 1
surface = np.array(np.where(voxels == 1)) # surface
# surface = np.array(np.where(np.logical_and(voxels > 0, voxels != np.float32(0.001)))) # surface
else:
raise Exception("Encoding error: {} is not validate".format(encoding))
min_idx = np.amin(surface, axis=1)
max_idx = np.amax(surface, axis=1)
# print('min_idx, max_idx', min_idx, max_idx)
# data[:a], data[a]不包含在内, data[b:], data[b]包含在内
# min_idx = min_idx
max_idx = max_idx + 1
# 本该扩大一圈就够了,但由于GT标注的不是很精确,故在高分辨率情况下,多加大一圈
# min_idx = min_idx - 1
# max_idx = max_idx + 2
min_idx[min_idx < 0] = 0
max_idx[0] = min(voxels.shape[0], max_idx[0])
max_idx[1] = min(voxels.shape[1], max_idx[1])
max_idx[2] = min(voxels.shape[2], max_idx[2])
data[:min_idx[0], :, :] = 0 # data[:a], data[a]不包含在内
data[:, :min_idx[1], :] = 0
data[:, :, :min_idx[2]] = 0
data[max_idx[0]:, :, :] = 0 # data[b:], data[b]包含在内
data[:, max_idx[1]:, :] = 0
data[:, :, max_idx[2]:] = 0
return data
@staticmethod
def get_nonempty2(voxels, target, encoding): # Get none empty from depth voxels
data = np.ones(voxels.shape, dtype=np.float32) # init 1 for none empty
data[target == 255] = 0
if encoding == 'STSDF': # surface, empty, occulted: 1, 0, -1
data[voxels == 0] = 0
elif encoding == 'TSDF':
# --0
# data[voxels == np.float32(0.001)] = 0
# --1
# data[voxels > 0] = 0
# --2
# data[voxels >= np.float32(0.001)] = 0
# --3
data[voxels >= np.float32(0.001)] = 0
data[voxels == 1] = 1
return data
@staticmethod
def _get_xyz(size):
"""x 水平 y高低 z深度"""
_x = np.zeros(size, dtype=np.int32)
_y = np.zeros(size, dtype=np.int32)
_z = np.zeros(size, dtype=np.int32)
for i_h in range(size[0]): # x, y, z
_x[i_h, :, :] = i_h # x, left-right flip
for i_w in range(size[1]):
_y[:, i_w, :] = i_w # y, up-down flip
for i_d in range(size[2]):
_z[:, :, i_d] = i_d # z, front-back flip
return _x, _y, _z
@classmethod
def labeled_voxel2ply(cls, vox_labeled, ply_filename): #
"""Save labeled voxels to disk in colored-point cloud format: x y z r g b, with '.ply' suffix
vox_labeled.shape: (W, H, D)
""" #
# ---- Check data type, numpy ndarray
if type(vox_labeled) is not np.ndarray:
raise Exception("Oops! Type of vox_labeled should be 'numpy.ndarray', not {}.".format(type(vox_labeled)))
# ---- Check data validation
if np.amax(vox_labeled) == 0:
print('Oops! All voxel is labeled empty.')
return
# ---- get size
size = vox_labeled.shape
# print('vox_labeled.shape:', vox_labeled.shape)
# ---- Convert to list
vox_labeled = vox_labeled.flatten()
# ---- Get X Y Z
_x, _y, _z = cls._get_xyz(size)
_x = _x.flatten()
_y = _y.flatten()
_z = _z.flatten()
# print('_x.shape', _x.shape)
# ---- Get R G B
vox_labeled[vox_labeled == 255] = 0 # empty
# vox_labeled[vox_labeled == 255] = 12 # ignore
_rgb = colorMap[vox_labeled[:]]
# print('_rgb.shape:', _rgb.shape)
# ---- Get X Y Z R G B
xyz_rgb = zip(_x, _y, _z, _rgb[:, 0], _rgb[:, 1], _rgb[:, 2]) # python2.7
xyz_rgb = list(xyz_rgb) # python3
# print('xyz_rgb.shape-1', xyz_rgb.shape)
# xyz_rgb = zip(_z, _y, _x, _rgb[:, 0], _rgb[:, 1], _rgb[:, 2]) # 将X轴和Z轴交换,用于meshlab显示
# ---- Get ply data without empty voxel
xyz_rgb = np.array(xyz_rgb)
# print('xyz_rgb.shape-1', xyz_rgb.shape)
ply_data = xyz_rgb[np.where(vox_labeled > 0)]
if len(ply_data) == 0:
raise Exception("Oops! That was no valid ply data.")
ply_head = 'ply\n' \
'format ascii 1.0\n' \
'element vertex %d\n' \
'property float x\n' \
'property float y\n' \
'property float z\n' \
'property uchar red\n' \
'property uchar green\n' \
'property uchar blue\n' \
'end_header' % len(ply_data)
# ---- Save ply data to disk
np.savetxt(ply_filename, ply_data, fmt="%d %d %d %d %d %d", header=ply_head, comments='') # It takes 20s
del vox_labeled, _x, _y, _z, _rgb, xyz_rgb, ply_data, ply_head
# print('Saved-->{}'.format(ply_filename))
if __name__ == '__main__':
# ---- Data loader
data_dir = '/home/amax/jie/Data_zoo/NYU_SSC/NYUCADval40'
# ------------------------------------------------
data_loader = torch.utils.data.DataLoader(
dataset=NYUDataset(data_dir),
batch_size=1,
shuffle=False,
num_workers=1
)
for step, (rgb_tesnor, depth, target_lr, position, _filename) in enumerate(data_loader):
print('step:', step, _filename)
| 24,291 | 44.920605 | 180 | py |
SSC | SSC-master/dataloaders/__init__.py |
from .dataloader import NYUDataset
from config import Path
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset:
base_dirs = Path.db_root_dir(args.dataset)
print('Training data:{}'.format(base_dirs['train']))
train_loader = DataLoader(
dataset=NYUDataset(base_dirs['train'], istest=False),
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers
)
print('Validate data:{}'.format(base_dirs['val']))
val_loader = DataLoader(
dataset=NYUDataset(base_dirs['val'], istest=True),
batch_size=args.batch_size, # 1 * torch.cuda.device_count(), 1 for each GPU
shuffle=False,
num_workers=args.workers # 1 * torch.cuda.device_count()
)
return train_loader, val_loader
| 883 | 28.466667 | 88 | py |
SSC | SSC-master/utils/seed.py |
import numpy as np
import scipy.misc
import os
import random
import torch
def seed_torch(seed=3055):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
| 416 | 18.857143 | 66 | py |
Unilm | Unilm-master/conver_torch_to_tf.py | """
@author: liucong
@contact: logcongcong@gmail.com
@time: 2020/7/27 13:39
"""
from convert_unilm_pytorch_checkpoint_to_original_tf import convert_pytorch_checkpoint_to_tf
from modeling_unilm import UnilmForLM
import os
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
def f(torch_bert_dir, save_dir):
model = UnilmForLM.from_pretrained(torch_bert_dir)
convert_pytorch_checkpoint_to_tf(model, save_dir, "bert_model")
if __name__ == "__main__":
torch_bert_dir = "yunwen_github/Unilm/model"
save_dir = "yunwen_github/Unilm/model_tf"
f(torch_bert_dir, save_dir)
| 626 | 25.125 | 92 | py |
Unilm | Unilm-master/modeling_unilm.py | # coding=utf-8
"""PyTorch UniLM model. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from transformers.modeling_utils import PreTrainedModel
from configuration_unilm import UnilmConfig
from transformers.modeling_bert import load_tf_weights_in_bert, BertPooler, BertIntermediate, BertOutput, BertPredictionHeadTransform, BertSelfOutput, BertLMPredictionHead, BertOnlyMLMHead, BertOnlyMLMHead, BertEmbeddings, BertOnlyNSPHead
logger = logging.getLogger(__name__)
UNILM_PRETRAINED_MODEL_ARCHIVE_MAP = {
'unilm-base-cased': "",
'unilm-large-cased': ""
}
BertLayerNorm = torch.nn.LayerNorm
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
sz = x.size()[:-1] + (self.num_attention_heads,
self.attention_head_size)
x = x.view(*sz)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(x_states)
mixed_value_layer = self.value(x_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(
query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, history_states=None):
self_output = self.self(
input_tensor, attention_mask, history_states=history_states)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, history_states=None):
attention_output = self.attention(
hidden_states, attention_mask, history_states=history_states)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, prev_embedding=None, prev_encoded_layers=None):
assert (prev_embedding is None) == (prev_encoded_layers is None)
all_encoder_layers = []
if (prev_embedding is not None) and (prev_encoded_layers is not None):
history_states = prev_embedding
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states, attention_mask, history_states=history_states)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if prev_encoded_layers is not None:
history_states = prev_encoded_layers[i]
else:
for layer_module in self.layer:
hidden_states = layer_module(
hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class UnilmPreTrainedModel(PreTrainedModel):
config_class = UnilmConfig
pretrained_model_archive_map = UNILM_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "unilm"
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class UnilmModel(UnilmPreTrainedModel):
def __init__(self, config):
super(UnilmModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class UnilmModelIncr(UnilmModel):
def __init__(self, config):
super(UnilmModelIncr, self).__init__(config)
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True, prev_embedding=None,
prev_encoded_layers=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, position_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class LabelSmoothingLoss(_Loss):
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob.type_as(output), reduction='none').view(batch_size, num_pos, -1).sum(2)
class UnilmForLM(UnilmPreTrainedModel):
def __init__(self, config):
super(UnilmForLM, self).__init__(config)
self.bert = UnilmModel(config)
self.cls = BertOnlyMLMHead(config)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
if hasattr(config, 'label_smoothing') and config.label_smoothing:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
else:
self.crit_mask_lm_smoothed = None
self.num_labels = 2
self.cls2 = BertOnlyNSPHead(config)
self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1)
self.init_weights()
self.tie_weights()
def tie_weights(self):
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, masked_pos=None, masked_weights=None, next_sentence_label=None):
sequence_output, pooled_output = self.bert(
input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
def gather_seq_out_by_pos_average(seq, pos, mask):
batch_size, max_token_num = pos.size(0), pos.size(-1)
pos_vec = torch.gather(seq, 1, pos.view(batch_size, -1).unsqueeze(
2).expand(-1, -1, seq.size(-1))).view(batch_size, -1, max_token_num, seq.size(-1))
mask = mask.type_as(pos_vec)
pos_vec_masked_sum = (
pos_vec * mask.unsqueeze(3).expand_as(pos_vec)).sum(2)
return pos_vec_masked_sum / mask.sum(2, keepdim=True).expand_as(pos_vec_masked_sum)
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
if masked_lm_labels is None:
if masked_pos is None:
prediction_scores = self.cls(sequence_output)
else:
sequence_output_masked = gather_seq_out_by_pos(
sequence_output, masked_pos)
prediction_scores = self.cls(sequence_output_masked)
return prediction_scores
sequence_output_masked = gather_seq_out_by_pos(
sequence_output, masked_pos)
prediction_scores_masked = self.cls(sequence_output_masked)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_lm_labels)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), masked_lm_labels)
masked_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), masked_weights)
seq_relationship_score = self.cls2(pooled_output)
if next_sentence_label is None:
total_loss = masked_lm_loss
else:
next_sentence_loss = self.crit_next_sent(
seq_relationship_score.view(-1, self.num_labels).float(), next_sentence_label.view(-1))
total_loss = next_sentence_loss + masked_lm_loss
return total_loss
class UnilmForSeq2Seq(UnilmPreTrainedModel):
"""refer to BertForPreTraining"""
def __init__(self, config):
super(UnilmForSeq2Seq, self).__init__(config)
self.bert = UnilmModel(config)
self.cls = BertOnlyMLMHead(config)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
if hasattr(config, 'label_smoothing') and config.label_smoothing:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
else:
self.crit_mask_lm_smoothed = None
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, masked_pos=None, masked_weights=None, num_tokens_a=None, num_tokens_b=None):
sequence_output, __ = self.bert(
input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
def gather_seq_out_by_pos_average(seq, pos, mask):
batch_size, max_token_num = pos.size(0), pos.size(-1)
pos_vec = torch.gather(seq, 1, pos.view(batch_size, -1).unsqueeze(
2).expand(-1, -1, seq.size(-1))).view(batch_size, -1, max_token_num, seq.size(-1))
mask = mask.type_as(pos_vec)
pos_vec_masked_sum = (
pos_vec * mask.unsqueeze(3).expand_as(pos_vec)).sum(2)
return pos_vec_masked_sum / mask.sum(2, keepdim=True).expand_as(pos_vec_masked_sum)
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
if masked_lm_labels is None:
if masked_pos is None:
prediction_scores = self.cls(sequence_output)
else:
sequence_output_masked = gather_seq_out_by_pos(
sequence_output, masked_pos)
prediction_scores = self.cls(sequence_output_masked)
return prediction_scores
sequence_output_masked = gather_seq_out_by_pos(
sequence_output, masked_pos)
prediction_scores_masked = self.cls(sequence_output_masked)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_lm_labels)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), masked_lm_labels)
masked_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), masked_weights)
return masked_lm_loss
class UnilmForSeq2SeqDecode(UnilmPreTrainedModel):
def __init__(self, config, mask_word_id=0,
search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0,
forbid_duplicate_ngrams=False, forbid_ignore_set=None, ngram_size=3, min_len=0):
super(UnilmForSeq2SeqDecode, self).__init__(config)
self.bert = UnilmModelIncr(config)
self.cls = BertOnlyMLMHead(config)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
self.mask_word_id = mask_word_id
self.search_beam_size = search_beam_size
self.length_penalty = length_penalty
self.eos_id = eos_id
self.sos_id = sos_id
self.forbid_duplicate_ngrams = forbid_duplicate_ngrams
self.forbid_ignore_set = forbid_ignore_set
self.ngram_size = ngram_size
self.min_len = min_len
self.init_weights()
self.tie_weights()
def tie_weights(self):
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids, position_ids, attention_mask):
if self.search_beam_size > 1:
return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask)
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos+1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos+1, :next_pos+1]
curr_position_ids = position_ids[:, start_pos:next_pos+1]
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores = self.cls(last_hidden)
_, max_ids = torch.max(prediction_scores, dim=-1)
output_ids.append(max_ids)
if prev_embedding is None:
prev_embedding = new_embedding[:, :-1, :]
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x[:, :-1, :]
for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
curr_ids = max_ids
next_pos += 1
return torch.cat(output_ids, dim=1)
def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask):
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
K = self.search_beam_size
total_scores = []
beam_masks = []
step_ids = []
step_back_ptrs = []
partial_seqs = []
forbid_word_mask = None
buf_matrix = None
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores = self.cls(last_hidden)
log_scores = torch.nn.functional.log_softmax(
prediction_scores, dim=-1)
if forbid_word_mask is not None:
log_scores += (forbid_word_mask * -10000.0)
if self.min_len and (next_pos-input_length+1 <= self.min_len):
log_scores[:, :, self.eos_id].fill_(-10000.0)
kk_scores, kk_ids = torch.topk(log_scores, k=K)
if len(total_scores) == 0:
k_ids = torch.reshape(kk_ids, [batch_size, K])
back_ptrs = torch.zeros(batch_size, K, dtype=torch.long)
k_scores = torch.reshape(kk_scores, [batch_size, K])
else:
last_eos = torch.reshape(
beam_masks[-1], [batch_size * K, 1, 1])
last_seq_scores = torch.reshape(
total_scores[-1], [batch_size * K, 1, 1])
kk_scores += last_eos * (-10000.0) + last_seq_scores
kk_scores = torch.reshape(kk_scores, [batch_size, K * K])
k_scores, k_ids = torch.topk(kk_scores, k=K)
back_ptrs = torch.div(k_ids, K)
kk_ids = torch.reshape(kk_ids, [batch_size, K * K])
k_ids = torch.gather(kk_ids, 1, k_ids)
step_back_ptrs.append(back_ptrs)
step_ids.append(k_ids)
beam_masks.append(torch.eq(k_ids, self.eos_id).float())
total_scores.append(k_scores)
def first_expand(x):
input_shape = list(x.size())
expanded_shape = input_shape[:1] + [1] + input_shape[1:]
x = torch.reshape(x, expanded_shape)
repeat_count = [1, K] + [1] * (len(input_shape) - 1)
x = x.repeat(*repeat_count)
x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
return x
def select_beam_items(x, ids):
id_shape = list(ids.size())
id_rank = len(id_shape)
assert len(id_shape) == 2
x_shape = list(x.size())
x = torch.reshape(x, [batch_size, K] + x_shape[1:])
x_rank = len(x_shape) + 1
assert x_rank >= 2
if id_rank < x_rank:
ids = torch.reshape(
ids, id_shape + [1] * (x_rank - id_rank))
ids = ids.expand(id_shape + x_shape[1:])
y = torch.gather(x, 1, ids)
y = torch.reshape(y, x_shape)
return y
is_first = (prev_embedding is None)
if prev_embedding is None:
prev_embedding = first_expand(new_embedding[:, :-1, :])
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x[:, :-1, :]) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
curr_ids = torch.reshape(k_ids, [batch_size * K, 1])
if is_first:
token_type_ids = first_expand(token_type_ids)
position_ids = first_expand(position_ids)
attention_mask = first_expand(attention_mask)
mask_ids = first_expand(mask_ids)
if self.forbid_duplicate_ngrams:
wids = step_ids[-1].tolist()
ptrs = step_back_ptrs[-1].tolist()
if is_first:
partial_seqs = []
for b in range(batch_size):
for k in range(K):
partial_seqs.append([wids[b][k]])
else:
new_partial_seqs = []
for b in range(batch_size):
for k in range(K):
new_partial_seqs.append(
partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])
partial_seqs = new_partial_seqs
def get_dup_ngram_candidates(seq, n):
cands = set()
if len(seq) < n:
return []
tail = seq[-(n-1):]
if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):
return []
for i in range(len(seq) - (n - 1)):
mismatch = False
for j in range(n - 1):
if tail[j] != seq[i + j]:
mismatch = True
break
if (not mismatch) and not(self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):
cands.add(seq[i + n - 1])
return list(sorted(cands))
if len(partial_seqs[0]) >= self.ngram_size:
dup_cands = []
for seq in partial_seqs:
dup_cands.append(
get_dup_ngram_candidates(seq, self.ngram_size))
if max(len(x) for x in dup_cands) > 0:
if buf_matrix is None:
vocab_size = list(log_scores.size())[-1]
buf_matrix = np.zeros(
(batch_size * K, vocab_size), dtype=float)
else:
buf_matrix.fill(0)
for bk, cands in enumerate(dup_cands):
for i, wid in enumerate(cands):
buf_matrix[bk, wid] = 1.0
forbid_word_mask = torch.tensor(
buf_matrix, dtype=log_scores.dtype)
forbid_word_mask = torch.reshape(
forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda()
else:
forbid_word_mask = None
next_pos += 1
total_scores = [x.tolist() for x in total_scores]
step_ids = [x.tolist() for x in step_ids]
step_back_ptrs = [x.tolist() for x in step_back_ptrs]
traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}
for b in range(batch_size):
scores = [x[b] for x in total_scores]
wids_list = [x[b] for x in step_ids]
ptrs = [x[b] for x in step_back_ptrs]
traces['scores'].append(scores)
traces['wids'].append(wids_list)
traces['ptrs'].append(ptrs)
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid == self.eos_id for wid in wids):
last_frame_id = i
break
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if wid == self.eos_id or fid == last_frame_id:
s = scores[fid][i]
if self.length_penalty > 0:
s /= math.pow((5 + fid + 1) / 6.0,
self.length_penalty)
if s > max_score:
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
traces['pred_seq'].append([0])
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
traces['pred_seq'].append(seq)
def _pad_sequence(sequences, max_len, padding_value=0):
trailing_dims = sequences[0].size()[1:]
out_dims = (len(sequences), max_len) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
out_tensor[i, :length, ...] = tensor
return out_tensor
for k in ('pred_seq', 'scores', 'wids', 'ptrs'):
ts_list = traces[k]
if not isinstance(ts_list[0], torch.Tensor):
dt = torch.float if k == 'scores' else torch.long
ts_list = [torch.tensor(it, dtype=dt) for it in ts_list]
traces[k] = _pad_sequence(
ts_list, output_length, padding_value=0).to(input_ids.device)
return traces
| 31,656 | 44.095442 | 238 | py |
Unilm | Unilm-master/run_seq2seq.py | # coding=utf-8
import os
import logging
import glob
import math
import json
import argparse
import random
from pathlib import Path
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import RandomSampler
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
from tokenization_unilm import UnilmTokenizer, WhitespaceTokenizer
from modeling_unilm import UnilmForSeq2Seq, UnilmConfig
from transformers import AdamW, get_linear_schedule_with_warmup
import utils_seq2seq
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys())
for conf in (UnilmConfig,)), ())
MODEL_CLASSES = {
'unilm': (UnilmConfig, UnilmForSeq2Seq, UnilmTokenizer)
}
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def _get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(output_dir, "model.*.bin"))
fn_optim_list = glob.glob(os.path.join(output_dir, "optim.*.bin"))
if (not fn_model_list) or (not fn_optim_list):
return None
both_set = set([int(Path(fn).stem.split('.')[-1]) for fn in fn_model_list]
) & set([int(Path(fn).stem.split('.')[-1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--src_file", default=None, type=str,
help="The input data file name.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--log_dir", default='', type=str,
help="The output directory where the log will be written.")
parser.add_argument("--model_recover_path", default=None, type=str,
help="The file of fine-tuned pretraining model.")
parser.add_argument("--optim_recover_path", default=None, type=str,
help="The file of pretraining optimizer.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
# Other parameters
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument('--max_position_embeddings', type=int, default=None,
help="max position embeddings")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size", default=32, type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--label_smoothing", default=0, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="The weight decay rate for Adam.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--hidden_dropout_prob", default=0.1, type=float,
help="Dropout rate for hidden states.")
parser.add_argument("--attention_probs_dropout_prob", default=0.1, type=float,
help="Dropout rate for attention probabilities.")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--tokenized_input', action='store_true',
help="Whether the input is tokenized.")
parser.add_argument('--max_len_a', type=int, default=0,
help="Truncate_config: maximum length of segment A.")
parser.add_argument('--max_len_b', type=int, default=0,
help="Truncate_config: maximum length of segment B.")
parser.add_argument('--trunc_seg', default='',
help="Truncate_config: first truncate segment A/B (option: a, b).")
parser.add_argument('--always_truncate_tail', action='store_true',
help="Truncate_config: Whether we should always truncate tail.")
parser.add_argument("--mask_prob", default=0.20, type=float,
help="Number of prediction is sometimes less than max_pred when sequence is short.")
parser.add_argument("--mask_prob_eos", default=0, type=float,
help="Number of prediction is sometimes less than max_pred when sequence is short.")
parser.add_argument('--max_pred', type=int, default=20,
help="Max tokens of prediction.")
parser.add_argument("--num_workers", default=0, type=int,
help="Number of workers for the data loader.")
parser.add_argument('--mask_source_words', action='store_true',
help="Whether to mask source words for training")
parser.add_argument('--skipgram_prb', type=float, default=0.0,
help='prob of ngram mask')
parser.add_argument('--skipgram_size', type=int, default=1,
help='the max size of ngram mask')
parser.add_argument('--mask_whole_word', action='store_true',
help="Whether masking a whole word.")
args = parser.parse_args()
if not(args.model_recover_path and Path(args.model_recover_path).exists()):
args.model_recover_path = None
args.output_dir = args.output_dir.replace(
'[PT_OUTPUT_DIR]', os.getenv('PT_OUTPUT_DIR', ''))
args.log_dir = args.log_dir.replace(
'[PT_OUTPUT_DIR]', os.getenv('PT_OUTPUT_DIR', ''))
os.makedirs(args.output_dir, exist_ok=True)
if args.log_dir:
os.makedirs(args.log_dir, exist_ok=True)
json.dump(args.__dict__, open(os.path.join(
args.output_dir, 'opt.json'), 'w'), sort_keys=True, indent=2)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
dist.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(
args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError(
"At least one of `do_train` or `do_eval` must be True.")
if args.local_rank not in (-1, 0):
# Make sure only the first process in distributed training will download model & vocab
dist.barrier()
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path, max_position_embeddings=args.max_position_embeddings, label_smoothing=args.label_smoothing)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
data_tokenizer = WhitespaceTokenizer() if args.tokenized_input else tokenizer
if args.local_rank == 0:
dist.barrier()
if args.do_train:
print("Loading Train Dataset", args.data_dir)
bi_uni_pipeline = [utils_seq2seq.Preprocess4Seq2seq(args.max_pred, args.mask_prob, list(tokenizer.vocab.keys()), tokenizer.convert_tokens_to_ids, args.max_seq_length, mask_source_words=False, skipgram_prb=args.skipgram_prb, skipgram_size=args.skipgram_size, mask_whole_word=args.mask_whole_word, tokenizer=data_tokenizer)]
file = os.path.join(
args.data_dir, args.src_file if args.src_file else 'train.tgt')
train_dataset = utils_seq2seq.Seq2SeqDataset(
file, args.train_batch_size, data_tokenizer, args.max_seq_length, bi_uni_pipeline=bi_uni_pipeline)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset, replacement=False)
_batch_size = args.train_batch_size
else:
train_sampler = DistributedSampler(train_dataset)
_batch_size = args.train_batch_size // dist.get_world_size()
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=_batch_size, sampler=train_sampler,
num_workers=args.num_workers, collate_fn=utils_seq2seq.batch_list_to_batch_tensors, pin_memory=False)
# note: args.train_batch_size has been changed to (/= args.gradient_accumulation_steps)
# t_total = int(math.ceil(len(train_dataset.ex_list) / args.train_batch_size)
t_total = int(len(train_dataloader) * args.num_train_epochs /
args.gradient_accumulation_steps)
# Prepare model
recover_step = _get_max_epoch_model(args.output_dir)
if args.local_rank not in (-1, 0):
# Make sure only the first process in distributed training will download model & vocab
dist.barrier()
global_step = 0
if (recover_step is None) and (args.model_recover_path is None):
model_recover = None
else:
if recover_step:
logger.info("***** Recover model: %d *****", recover_step)
model_recover = torch.load(os.path.join(
args.output_dir, "model.{0}.bin".format(recover_step)), map_location='cpu')
# recover_step == number of epochs
global_step = math.floor(
recover_step * t_total / args.num_train_epochs)
elif args.model_recover_path:
logger.info("***** Recover model: %s *****",
args.model_recover_path)
model_recover = torch.load(
args.model_recover_path, map_location='cpu')
model = model_class.from_pretrained(
args.model_name_or_path, state_dict=model_recover, config=config)
if args.local_rank == 0:
dist.barrier()
model.to(device)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_proportion*t_total), num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level)
if args.local_rank != -1:
try:
from torch.nn.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("DistributedDataParallel")
model = DDP(model, device_ids=[
args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
if recover_step:
logger.info("***** Recover optimizer: %d *****", recover_step)
optim_recover = torch.load(os.path.join(
args.output_dir, "optim.{0}.bin".format(recover_step)), map_location='cpu')
if hasattr(optim_recover, 'state_dict'):
optim_recover = optim_recover.state_dict()
optimizer.load_state_dict(optim_recover)
logger.info("***** Recover amp: %d *****", recover_step)
amp_recover = torch.load(os.path.join(
args.output_dir, "amp.{0}.bin".format(recover_step)), map_location='cpu')
amp.load_state_dict(amp_recover)
logger.info("***** Recover scheduler: %d *****", recover_step)
scheduler_recover = torch.load(os.path.join(
args.output_dir, "sched.{0}.bin".format(recover_step)), map_location='cpu')
scheduler.load_state_dict(scheduler_recover)
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.do_train:
logger.info("***** Running training *****")
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", t_total)
model.train()
if recover_step:
start_epoch = recover_step+1
else:
start_epoch = 1
for i_epoch in trange(start_epoch, int(args.num_train_epochs)+1, desc="Epoch", disable=args.local_rank not in (-1, 0)):
if args.local_rank != -1:
train_sampler.set_epoch(i_epoch)
iter_bar = tqdm(train_dataloader, desc='Iter (loss=X.XXX)',
disable=args.local_rank not in (-1, 0))
for step, batch in enumerate(iter_bar):
batch = [
t.to(device) if t is not None else None for t in batch]
input_ids, segment_ids, input_mask, lm_label_ids, masked_pos, masked_weights, _ = batch
masked_lm_loss = model(input_ids, segment_ids, input_mask, lm_label_ids,
masked_pos=masked_pos, masked_weights=masked_weights)
if n_gpu > 1: # mean() to average on multi-gpu.
# loss = loss.mean()
masked_lm_loss = masked_lm_loss.mean()
loss = masked_lm_loss
# logging for each step (i.e., before normalization by args.gradient_accumulation_steps)
iter_bar.set_description('Iter (loss=%5.3f)' % loss.item())
# ensure that accumlated gradients are normalized
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
global_step += 1
# Save a trained model
if (args.local_rank == -1 or torch.distributed.get_rank() == 0):
logger.info(
"** ** * Saving fine-tuned model and optimizer ** ** * ")
model_to_save = model.module if hasattr(
model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(
args.output_dir, "model.{0}.bin".format(i_epoch))
torch.save(model_to_save.state_dict(), output_model_file)
output_optim_file = os.path.join(
args.output_dir, "optim.{0}.bin".format(i_epoch))
torch.save(optimizer.state_dict(), output_optim_file)
if args.fp16:
output_amp_file = os.path.join(
args.output_dir, "amp.{0}.bin".format(i_epoch))
torch.save(amp.state_dict(), output_amp_file)
output_sched_file = os.path.join(
args.output_dir, "sched.{0}.bin".format(i_epoch))
torch.save(scheduler.state_dict(), output_sched_file)
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if __name__ == "__main__":
main()
| 19,869 | 50.343669 | 330 | py |
Unilm | Unilm-master/decode_seq2seq.py | # coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import glob
import argparse
import math
import random
from tqdm import tqdm, trange
import pickle
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tokenization_unilm import UnilmTokenizer, WhitespaceTokenizer
from transformers import AdamW, get_linear_schedule_with_warmup
from modeling_unilm import UnilmForSeq2SeqDecode, UnilmConfig
# from transformers import (UnilmTokenizer, WhitespaceTokenizer,
# UnilmForSeq2SeqDecode, AdamW, UnilmConfig)
import utils_seq2seq
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys())
for conf in (UnilmConfig,)), ())
MODEL_CLASSES = {
'unilm': (UnilmConfig, UnilmForSeq2SeqDecode, UnilmTokenizer)
}
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--model_recover_path", default=None, type=str,
help="The file of fine-tuned pretraining model.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--max_seq_length", default=512, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
# decoding parameters
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--input_file", type=str, help="Input file")
parser.add_argument('--subset', type=int, default=0,
help="Decode a subset of the input dataset.")
parser.add_argument("--output_file", type=str, help="output file")
parser.add_argument("--split", type=str, default="",
help="Data split (train/val/test).")
parser.add_argument('--tokenized_input', action='store_true',
help="Whether the input is tokenized.")
parser.add_argument('--seed', type=int, default=123,
help="random seed for initialization")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--batch_size', type=int, default=4,
help="Batch size for decoding.")
parser.add_argument('--beam_size', type=int, default=1,
help="Beam size for searching")
parser.add_argument('--length_penalty', type=float, default=0,
help="Length penalty for beam search")
parser.add_argument('--forbid_duplicate_ngrams', action='store_true')
parser.add_argument('--forbid_ignore_word', type=str, default=None,
help="Forbid the word during forbid_duplicate_ngrams")
parser.add_argument("--min_len", default=None, type=int)
parser.add_argument('--need_score_traces', action='store_true')
parser.add_argument('--ngram_size', type=int, default=3)
parser.add_argument('--max_tgt_length', type=int, default=128,
help="maximum length of target sequence")
args = parser.parse_args()
if args.need_score_traces and args.beam_size <= 1:
raise ValueError(
"Score trace is only available for beam search with beam size > 1.")
if args.max_tgt_length >= args.max_seq_length - 2:
raise ValueError("Maximum tgt length exceeds max seq length - 2.")
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path, max_position_embeddings=args.max_seq_length)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
bi_uni_pipeline = []
bi_uni_pipeline.append(utils_seq2seq.Preprocess4Seq2seqDecode(list(tokenizer.vocab.keys()), tokenizer.convert_tokens_to_ids,
args.max_seq_length, max_tgt_length=args.max_tgt_length))
# Prepare model
mask_word_id, eos_word_ids, sos_word_id = tokenizer.convert_tokens_to_ids(
["[MASK]", "[SEP]", "[S2S_SOS]"])
forbid_ignore_set = None
if args.forbid_ignore_word:
w_list = []
for w in args.forbid_ignore_word.split('|'):
if w.startswith('[') and w.endswith(']'):
w_list.append(w.upper())
else:
w_list.append(w)
forbid_ignore_set = set(tokenizer.convert_tokens_to_ids(w_list))
print(args.model_recover_path)
for model_recover_path in glob.glob(args.model_recover_path.strip()):
logger.info("***** Recover model: %s *****", model_recover_path)
model_recover = torch.load(model_recover_path)
model = model_class.from_pretrained(args.model_name_or_path, state_dict=model_recover, config=config, mask_word_id=mask_word_id, search_beam_size=args.beam_size, length_penalty=args.length_penalty,
eos_id=eos_word_ids, sos_id=sos_word_id, forbid_duplicate_ngrams=args.forbid_duplicate_ngrams, forbid_ignore_set=forbid_ignore_set, ngram_size=args.ngram_size, min_len=args.min_len)
del model_recover
model.to(device)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model = amp.initialize(model, opt_level=args.fp16_opt_level)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
torch.cuda.empty_cache()
model.eval()
next_i = 0
max_src_length = args.max_seq_length - 2 - args.max_tgt_length
with open(args.input_file, encoding="utf-8") as fin:
input_lines = [x.strip() for x in fin.readlines()]
if args.subset > 0:
logger.info("Decoding subset: %d", args.subset)
input_lines = input_lines[:args.subset]
data_tokenizer = WhitespaceTokenizer() if args.tokenized_input else tokenizer
input_lines = [data_tokenizer.tokenize(
x)[:max_src_length] for x in input_lines]
input_lines = sorted(list(enumerate(input_lines)),
key=lambda x: -len(x[1]))
output_lines = [""] * len(input_lines)
score_trace_list = [None] * len(input_lines)
total_batch = math.ceil(len(input_lines) / args.batch_size)
with tqdm(total=total_batch) as pbar:
while next_i < len(input_lines):
_chunk = input_lines[next_i:next_i + args.batch_size]
buf_id = [x[0] for x in _chunk]
buf = [x[1] for x in _chunk]
next_i += args.batch_size
max_a_len = max([len(x) for x in buf])
instances = []
for instance in [(x, max_a_len) for x in buf]:
for proc in bi_uni_pipeline:
instances.append(proc(instance))
with torch.no_grad():
batch = utils_seq2seq.batch_list_to_batch_tensors(
instances)
batch = [
t.to(device) if t is not None else None for t in batch]
input_ids, token_type_ids, position_ids, input_mask = batch
traces = model(input_ids, token_type_ids,
position_ids, input_mask)
if args.beam_size > 1:
traces = {k: v.tolist() for k, v in traces.items()}
output_ids = traces['pred_seq']
else:
output_ids = traces.tolist()
for i in range(len(buf)):
w_ids = output_ids[i]
output_buf = tokenizer.convert_ids_to_tokens(w_ids)
output_tokens = []
for t in output_buf:
if t in ("[SEP]", "[PAD]"):
break
output_tokens.append(t)
output_sequence = ' '.join(detokenize(output_tokens))
output_lines[buf_id[i]] = output_sequence
if args.need_score_traces:
score_trace_list[buf_id[i]] = {
'scores': traces['scores'][i], 'wids': traces['wids'][i], 'ptrs': traces['ptrs'][i]}
pbar.update(1)
if args.output_file:
fn_out = args.output_file
else:
fn_out = model_recover_path+'.'+args.split
with open(fn_out, "w", encoding="utf-8") as fout:
for l in output_lines:
fout.write(l)
fout.write("\n")
if args.need_score_traces:
with open(fn_out + ".trace.pickle", "wb") as fout_trace:
pickle.dump(
{"version": 0.0, "num_samples": len(input_lines)}, fout_trace)
for x in score_trace_list:
pickle.dump(x, fout_trace)
if __name__ == "__main__":
main()
| 12,437 | 46.473282 | 225 | py |
Unilm | Unilm-master/convert_unilm_pytorch_checkpoint_to_original_tf.py | """
@author: liucong
@contact: logcongcong@gmail.com
@time: 2020/7/27 13:53
"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from modeling_unilm import UnilmForLM
def convert_pytorch_checkpoint_to_tf(model: UnilmForLM, ckpt_dir: str, model_name: str):
tensors_to_transpose = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
var_map = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(ckpt_dir):
os.makedirs(ckpt_dir)
state_dict = model.state_dict()
def to_tf_var_name(name: str):
for patt, repl in iter(var_map):
name = name.replace(patt, repl)
return "{}".format(name)
def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session):
tf_dtype = tf.dtypes.as_dtype(tensor.dtype)
tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(tf_var)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
tf_name = to_tf_var_name(var_name)
torch_tensor = state_dict[var_name].numpy()
if any([x in var_name for x in tensors_to_transpose]):
torch_tensor = torch_tensor.T
tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session)
tf.keras.backend.set_value(tf_var, torch_tensor)
tf_weight = session.run(tf_var)
print("Successfully created {}: {}".format(tf_name, np.allclose(tf_weight, torch_tensor)))
saver = tf.train.Saver(tf.trainable_variables())
saver.save(session, os.path.join(ckpt_dir, model_name.replace("-", "_") + ".ckpt"))
def main(raw_args=None):
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, required=True, help="model name e.g. bert-base-uncased")
parser.add_argument(
"--cache_dir", type=str, default=None, required=False, help="Directory containing pytorch model"
)
parser.add_argument("--pytorch_model_path", type=str, required=True, help="/path/to/<pytorch-model-name>.bin")
parser.add_argument("--tf_cache_dir", type=str, required=True, help="Directory in which to save tensorflow model")
args = parser.parse_args(raw_args)
model = UnilmForLM.from_pretrained(
pretrained_model_name_or_path=args.model_name,
state_dict=torch.load(args.pytorch_model_path),
cache_dir=args.cache_dir,
)
convert_pytorch_checkpoint_to_tf(model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name)
if __name__ == "__main__":
main()
| 3,081 | 35.258824 | 118 | py |
Unilm | Unilm-master/utils_seq2seq.py | # coding=utf-8
from random import randint, shuffle, choice
from random import random as rand
import math
import numpy as np
import torch
import torch.utils.data
def get_random_word(vocab_words):
i = randint(0, len(vocab_words)-1)
return vocab_words[i]
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if x[0] is None:
batch_tensors.append(None)
elif isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
try:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
except:
batch_tensors.append(None)
return batch_tensors
def _get_word_split_index(tokens, st, end):
split_idx = []
i = st
while i < end:
if (not tokens[i].startswith('##')) or (i == st):
split_idx.append(i)
i += 1
split_idx.append(end)
return split_idx
def _expand_whole_word(tokens, st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
class Pipeline():
""" Pre-process Pipeline Class : callable """
def __init__(self):
super().__init__()
self.skipgram_prb = None
self.skipgram_size = None
self.pre_whole_word = None
self.mask_whole_word = None
self.vocab_words = None
self.call_count = 0
self.offline_mode = False
self.skipgram_size_geo_list = None
self.span_same_mask = False
def init_skipgram_size_geo_list(self, p):
if p > 0:
g_list = []
t = p
for _ in range(self.skipgram_size):
g_list.append(t)
t *= (1-p)
s = sum(g_list)
self.skipgram_size_geo_list = [x/s for x in g_list]
def __call__(self, instance):
raise NotImplementedError
# pre_whole_word: tokenize to words before masking
# post whole word (--mask_whole_word): expand to words after masking
def get_masked_pos(self, tokens, n_pred, add_skipgram=False, mask_segment=None, protect_range=None):
if self.pre_whole_word:
pre_word_split = _get_word_split_index(tokens, 0, len(tokens))
else:
pre_word_split = list(range(0, len(tokens)+1))
span_list = list(zip(pre_word_split[:-1], pre_word_split[1:]))
# candidate positions of masked tokens
cand_pos = []
special_pos = set()
if mask_segment:
for i, sp in enumerate(span_list):
sp_st, sp_end = sp
if (sp_end-sp_st == 1) and tokens[sp_st].endswith('SEP]'):
segment_index = i
break
for i, sp in enumerate(span_list):
sp_st, sp_end = sp
if (sp_end-sp_st == 1) and (tokens[sp_st].endswith('CLS]') or tokens[sp_st].endswith('SEP]')):
special_pos.add(i)
else:
if mask_segment:
if ((i < segment_index) and ('a' in mask_segment)) or ((i > segment_index) and ('b' in mask_segment)):
cand_pos.append(i)
else:
cand_pos.append(i)
shuffle(cand_pos)
masked_pos = set()
for i_span in cand_pos:
if len(masked_pos) >= n_pred:
break
cand_st, cand_end = span_list[i_span]
if len(masked_pos)+cand_end-cand_st > n_pred:
continue
if any(p in masked_pos for p in range(cand_st, cand_end)):
continue
n_span = 1
rand_skipgram_size = 0
# ngram
if self.skipgram_size_geo_list:
# sampling ngram size from geometric distribution
rand_skipgram_size = np.random.choice(
len(self.skipgram_size_geo_list), 1, p=self.skipgram_size_geo_list)[0] + 1
else:
if add_skipgram and (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb):
rand_skipgram_size = min(
randint(2, self.skipgram_size), len(span_list)-i_span)
for n in range(2, rand_skipgram_size+1):
tail_st, tail_end = span_list[i_span+n-1]
if (tail_end-tail_st == 1) and (tail_st in special_pos):
break
if len(masked_pos)+tail_end-cand_st > n_pred:
break
n_span = n
st_span, end_span = i_span, i_span + n_span
if self.mask_whole_word:
# pre_whole_word==False: position index of span_list is the same as tokens
st_span, end_span = _expand_whole_word(
tokens, st_span, end_span)
skip_pos = None
for sp in range(st_span, end_span):
for mp in range(span_list[sp][0], span_list[sp][1]):
if not(skip_pos and (mp in skip_pos)) and (mp not in special_pos) and not(protect_range and (protect_range[0] <= mp < protect_range[1])):
masked_pos.add(mp)
if len(masked_pos) < n_pred:
shuffle(cand_pos)
for pos in cand_pos:
if len(masked_pos) >= n_pred:
break
if pos not in masked_pos:
masked_pos.add(pos)
masked_pos = list(masked_pos)
if len(masked_pos) > n_pred:
# shuffle(masked_pos)
masked_pos = masked_pos[:n_pred]
return masked_pos
def replace_masked_tokens(self, tokens, masked_pos):
if self.span_same_mask:
masked_pos = sorted(list(masked_pos))
prev_pos, prev_rand = None, None
for pos in masked_pos:
if self.span_same_mask and (pos-1 == prev_pos):
t_rand = prev_rand
else:
t_rand = rand()
if t_rand < 0.8: # 80%
tokens[pos] = '[MASK]'
elif t_rand < 0.9: # 10%
tokens[pos] = get_random_word(self.vocab_words)
prev_pos, prev_rand = pos, t_rand
# Input file format :
# 1. One sentence per line. These should ideally be actual sentences,
# not entire paragraphs or arbitrary spans of text. (Because we use
# the sentence boundaries for the "next sentence prediction" task).
# 2. Blank lines between documents. Document boundaries are needed
# so that the "next sentence prediction" task doesn't span between documents.
def truncate_tokens_pair(tokens_a, tokens_b, max_len):
if len(tokens_a) + len(tokens_b) > max_len-3:
while len(tokens_a) + len(tokens_b) > max_len-3:
if len(tokens_a) > len(tokens_b):
tokens_a = tokens_a[:-1]
else:
tokens_b = tokens_b[:-1]
return tokens_a, tokens_b
def truncate_tokens_signle(tokens_a, max_len):
if len(tokens_a) > max_len-2:
tokens_a = tokens_a[:max_len-2]
return tokens_a
from functools import partial
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
class Seq2SeqDataset(torch.utils.data.Dataset):
""" Load sentence pair (sequential or random order) from corpus """
def __init__(self, file, batch_size, tokenizer, max_len, short_sampling_prob=0.1, sent_reverse_order=False, bi_uni_pipeline=[]):
super().__init__()
self.tokenizer = tokenizer # tokenize function
self.max_len = max_len # maximum length of tokens
self.short_sampling_prob = short_sampling_prob
self.bi_uni_pipeline = bi_uni_pipeline
self.batch_size = batch_size
self.sent_reverse_order = sent_reverse_order
# read the file into memory
self.ex_list = []
# with open(file, "r", encoding='utf-8') as f:
# for i, line in enumerate(f):
# sample = eval(line.strip())
# src_tk = tokenizer.tokenize(sample["src_text"])
# tgt_tk = tokenizer.tokenize(sample["tgt_text"])
# assert len(src_tk) > 0
# assert len(tgt_tk) > 0
# self.ex_list.append((src_tk, tgt_tk))
file_data = open(file, "r", encoding='utf-8')
#
threads = min(8, cpu_count())
with Pool(threads) as p:
annotate_ = partial(
self.read_data,
tokenizer=self.tokenizer)
self.ex_list = list(
tqdm(
p.imap(annotate_, file_data.readlines(), chunksize=32),
total=len(file_data.readlines()),
desc="convert squad examples to features",
)
)
# fin = open("look_new.json", "w",encoding="utf-8")
# for jj, m in enumerate(self.ex_list):
# fin.write(str(jj)+"\t"+str(m)+"\n")
print('Load {0} documents'.format(len(self.ex_list)))
# exit()
def read_data(self, line, tokenizer):
sample = eval(line.strip())
# src_tk = tokenizer.tokenize(sample["src_text"])
# tgt_tk = tokenizer.tokenize(sample["tgt_text"])
src_tk = sample["src_text"]
tgt_tk = sample["tgt_text"]
return (src_tk, tgt_tk)
def __len__(self):
return len(self.ex_list)
def __getitem__(self, idx):
instance = self.ex_list[idx]
# proc = choice(self.bi_uni_pipeline)
new_instance = ()
for proc in self.bi_uni_pipeline:
new_instance += proc(instance)
return new_instance
def __iter__(self): # iterator to load data
for __ in range(math.ceil(len(self.ex_list) / float(self.batch_size))):
batch = []
for __ in range(self.batch_size):
idx = randint(0, len(self.ex_list)-1)
batch.append(self.__getitem__(idx))
# To Tensor
yield batch_list_to_batch_tensors(batch)
class Preprocess4Seq2seq(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0, mask_whole_word=False, mask_source_words=True, tokenizer=None):
super().__init__()
self.max_len = max_len
self.max_pred = max_pred # max tokens of prediction
self.mask_prob = mask_prob # masking probability
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.skipgram_prb = skipgram_prb
self.skipgram_size = skipgram_size
self.mask_whole_word = mask_whole_word
self.mask_source_words = mask_source_words
self.tokenizer = tokenizer
def __call__(self, instance):
next_sentence_label = None
tokens_a, tokens_b = instance[:2]
tokens_a = self.tokenizer.tokenize(tokens_a)
tokens_b = self.tokenizer.tokenize(tokens_b)
# -3 for special tokens [CLS], [SEP], [SEP]
tokens_a, tokens_b = truncate_tokens_pair(tokens_a, tokens_b, self.max_len)
# Add Special Tokens
tokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]']
segment_ids = [4]*(len(tokens_a)+2) + [5]*(len(tokens_b)+1)
# For masked Language Models
# the number of prediction is sometimes less than max_pred when sequence is short
effective_length = len(tokens_b)
if self.mask_source_words:
effective_length += len(tokens_a)
n_pred = min(self.max_pred, max(1, int(round(effective_length*self.mask_prob))))
# candidate positions of masked tokens
cand_pos = []
special_pos = set()
for i, tk in enumerate(tokens):
# only mask tokens_b (target sequence)
# we will mask [SEP] as an ending symbol
if (i >= len(tokens_a)+2) and (tk != '[CLS]'):
cand_pos.append(i)
elif self.mask_source_words and (i < len(tokens_a)+2) and (tk != '[CLS]') and (not tk.startswith('[SEP')):
cand_pos.append(i)
else:
special_pos.add(i)
shuffle(cand_pos)
masked_pos = set()
max_cand_pos = max(cand_pos)
for pos in cand_pos:
if len(masked_pos) >= n_pred:
break
if pos in masked_pos:
continue
def _expand_whole_word(st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
if (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb):
# ngram
cur_skipgram_size = randint(2, self.skipgram_size)
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(
pos, pos + cur_skipgram_size)
else:
st_pos, end_pos = pos, pos + cur_skipgram_size
else:
# directly mask
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(pos, pos + 1)
else:
st_pos, end_pos = pos, pos + 1
for mp in range(st_pos, end_pos):
if (0 < mp <= max_cand_pos) and (mp not in special_pos):
masked_pos.add(mp)
else:
break
masked_pos = list(masked_pos)
if len(masked_pos) > n_pred:
shuffle(masked_pos)
masked_pos = masked_pos[:n_pred]
masked_tokens = [tokens[pos] for pos in masked_pos]
for pos in masked_pos:
if rand() < 0.8: # 80%
tokens[pos] = '[MASK]'
elif rand() < 0.5: # 10%
tokens[pos] = get_random_word(self.vocab_words)
# when n_pred < max_pred, we only calculate loss within n_pred
masked_weights = [1]*len(masked_tokens)
# Token Indexing
masked_ids = self.indexer(masked_tokens)
# Token Indexing
input_ids = self.indexer(tokens)
# Zero Padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask = torch.zeros(self.max_len, self.max_len, dtype=torch.long)
input_mask[:, :len(tokens_a)+2].fill_(1)
second_st, second_end = len(
tokens_a)+2, len(tokens_a)+len(tokens_b)+3
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
# Zero Padding for masked target
if self.max_pred > n_pred:
n_pad = self.max_pred - n_pred
if masked_ids is not None:
masked_ids.extend([0]*n_pad)
if masked_pos is not None:
masked_pos.extend([0]*n_pad)
if masked_weights is not None:
masked_weights.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, next_sentence_label)
class Preprocess4BiLM(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0, mask_whole_word=False, mask_source_words=True, tokenizer=None):
super().__init__()
self.max_len = max_len
self.max_pred = max_pred # max tokens of prediction
self.mask_prob = mask_prob # masking probability
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.skipgram_prb = skipgram_prb
self.skipgram_size = skipgram_size
self.mask_whole_word = mask_whole_word
self.mask_source_words = mask_source_words
self.tokenizer = tokenizer
def __call__(self, instance):
tokens_a, tokens_b = instance[:2]
if rand() <= 0.5:
next_sentence_label = 1.0
else:
tokens_a, tokens_b = tokens_b, tokens_a
next_sentence_label = 0.0
tokens_a = self.tokenizer.tokenize(tokens_a)
tokens_b = self.tokenizer.tokenize(tokens_b)
# -3 for special tokens [CLS], [SEP], [SEP]
tokens_a, tokens_b = truncate_tokens_pair(tokens_a, tokens_b, self.max_len)
# Add Special Tokens
tokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]']
segment_ids = [0]*(len(tokens_a)+2) + [1]*(len(tokens_b)+1)
# For masked Language Models
# the number of prediction is sometimes less than max_pred when sequence is short
effective_length = len(tokens_b)
if self.mask_source_words:
effective_length += len(tokens_a)
n_pred = min(self.max_pred, max(
1, int(round(effective_length*self.mask_prob))))
# candidate positions of masked tokens
cand_pos = []
special_pos = set()
for i, tk in enumerate(tokens):
# only mask tokens_b (target sequence)
# we will mask [SEP] as an ending symbol
if (i >= len(tokens_a)+2) and (tk != '[CLS]'):
cand_pos.append(i)
elif self.mask_source_words and (i < len(tokens_a)+2) and (tk != '[CLS]') and (not tk.startswith('[SEP')):
cand_pos.append(i)
else:
special_pos.add(i)
shuffle(cand_pos)
masked_pos = set()
max_cand_pos = max(cand_pos)
for pos in cand_pos:
if len(masked_pos) >= n_pred:
break
if pos in masked_pos:
continue
def _expand_whole_word(st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
if (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb):
# ngram
cur_skipgram_size = randint(2, self.skipgram_size)
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(
pos, pos + cur_skipgram_size)
else:
st_pos, end_pos = pos, pos + cur_skipgram_size
else:
# directly mask
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(pos, pos + 1)
else:
st_pos, end_pos = pos, pos + 1
for mp in range(st_pos, end_pos):
if (0 < mp <= max_cand_pos) and (mp not in special_pos):
masked_pos.add(mp)
else:
break
masked_pos = list(masked_pos)
if len(masked_pos) > n_pred:
shuffle(masked_pos)
masked_pos = masked_pos[:n_pred]
masked_tokens = [tokens[pos] for pos in masked_pos]
for pos in masked_pos:
if rand() < 0.8: # 80%
tokens[pos] = '[MASK]'
elif rand() < 0.5: # 10%
tokens[pos] = get_random_word(self.vocab_words)
# when n_pred < max_pred, we only calculate loss within n_pred
masked_weights = [1]*len(masked_tokens)
# Token Indexing
masked_ids = self.indexer(masked_tokens)
# Token Indexing
input_ids = self.indexer(tokens)
# Zero Padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask = torch.ones(self.max_len, self.max_len, dtype=torch.long)
# input_mask[:, :len(tokens_a)+2].fill_(1)
# second_st, second_end = len(
# tokens_a)+2, len(tokens_a)+len(tokens_b)+3
# input_mask[second_st:second_end, second_st:second_end].copy_(
# self._tril_matrix[:second_end-second_st, :second_end-second_st])
# Zero Padding for masked target
if self.max_pred > n_pred:
n_pad = self.max_pred - n_pred
if masked_ids is not None:
masked_ids.extend([0]*n_pad)
if masked_pos is not None:
masked_pos.extend([0]*n_pad)
if masked_weights is not None:
masked_weights.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, next_sentence_label)
class Preprocess4RightLM(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0, mask_whole_word=False, mask_source_words=True, tokenizer=None):
super().__init__()
self.max_len = max_len
self.max_pred = max_pred # max tokens of prediction
self.mask_prob = mask_prob # masking probability
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.skipgram_prb = skipgram_prb
self.skipgram_size = skipgram_size
self.mask_whole_word = mask_whole_word
self.mask_source_words = mask_source_words
self.tokenizer = tokenizer
def __call__(self, instance):
next_sentence_label = None
tokens_a, _ = instance[:2]
tokens_a = self.tokenizer.tokenize(tokens_a)
tokens_a = truncate_tokens_signle(tokens_a, self.max_len)
# Add Special Tokens
tokens = ['[CLS]'] + tokens_a + ['[SEP]']
segment_ids = [2]*(len(tokens_a)+2)
# For masked Language Models
# the number of prediction is sometimes less than max_pred when sequence is short
effective_length = 0
if self.mask_source_words:
effective_length += len(tokens_a)
n_pred = min(self.max_pred, max(
1, int(round(effective_length*self.mask_prob))))
# candidate positions of masked tokens
cand_pos = []
special_pos = set()
for i, tk in enumerate(tokens):
# only mask tokens_b (target sequence)
# we will mask [SEP] as an ending symbol
# if (i >= len(tokens_a)+2) and (tk != '[CLS]'):
# cand_pos.append(i)
if (tk != '[CLS]') and (tk != '[SEP]'):
cand_pos.append(i)
else:
special_pos.add(i)
shuffle(cand_pos)
masked_pos = set()
try:
max_cand_pos = max(cand_pos)
except:
max_cand_pos = 0
for pos in cand_pos:
if len(masked_pos) >= n_pred:
break
if pos in masked_pos:
continue
def _expand_whole_word(st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
if (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb):
# ngram
cur_skipgram_size = randint(2, self.skipgram_size)
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(
pos, pos + cur_skipgram_size)
else:
st_pos, end_pos = pos, pos + cur_skipgram_size
else:
# directly mask
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(pos, pos + 1)
else:
st_pos, end_pos = pos, pos + 1
for mp in range(st_pos, end_pos):
if (0 < mp <= max_cand_pos) and (mp not in special_pos):
masked_pos.add(mp)
else:
break
masked_pos = list(masked_pos)
if len(masked_pos) > n_pred:
shuffle(masked_pos)
masked_pos = masked_pos[:n_pred]
masked_tokens = [tokens[pos] for pos in masked_pos]
for pos in masked_pos:
if rand() < 0.8: # 80%
tokens[pos] = '[MASK]'
elif rand() < 0.5: # 10%
tokens[pos] = get_random_word(self.vocab_words)
# when n_pred < max_pred, we only calculate loss within n_pred
masked_weights = [1]*len(masked_tokens)
# Token Indexing
masked_ids = self.indexer(masked_tokens)
# Token Indexing
input_ids = self.indexer(tokens)
# Zero Padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask = torch.ones(self.max_len, self.max_len, dtype=torch.long)
# input_mask[:, :len(tokens_a)+2].fill_(1)
second_st, second_end = 0, len(tokens_a)+2
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
# Zero Padding for masked target
if self.max_pred > n_pred:
n_pad = self.max_pred - n_pred
if masked_ids is not None:
masked_ids.extend([0]*n_pad)
if masked_pos is not None:
masked_pos.extend([0]*n_pad)
if masked_weights is not None:
masked_weights.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, next_sentence_label)
class Preprocess4LeftLM(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0, mask_whole_word=False, mask_source_words=True, tokenizer=None):
super().__init__()
self.max_len = max_len
self.max_pred = max_pred # max tokens of prediction
self.mask_prob = mask_prob # masking probability
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self._tril_matrix = torch.triu(torch.ones(
(max_len, max_len), dtype=torch.long))
self.skipgram_prb = skipgram_prb
self.skipgram_size = skipgram_size
self.mask_whole_word = mask_whole_word
self.mask_source_words = mask_source_words
self.tokenizer = tokenizer
def __call__(self, instance):
next_sentence_label = None
tokens_a, _ = instance[:2]
tokens_a = self.tokenizer.tokenize(tokens_a)
tokens_a = truncate_tokens_signle(tokens_a, self.max_len)
# Add Special Tokens
tokens = ['[CLS]'] + tokens_a + ['[SEP]']
segment_ids = [3]*(len(tokens_a)+2)
# For masked Language Models
# the number of prediction is sometimes less than max_pred when sequence is short
effective_length = 0
if self.mask_source_words:
effective_length += len(tokens_a)
n_pred = min(self.max_pred, max(
1, int(round(effective_length*self.mask_prob))))
# candidate positions of masked tokens
cand_pos = []
special_pos = set()
for i, tk in enumerate(tokens):
# only mask tokens_b (target sequence)
# we will mask [SEP] as an ending symbol
# if (i >= len(tokens_a)+2) and (tk != '[CLS]'):
# cand_pos.append(i)
if (tk != '[CLS]') and (tk != '[SEP]'):
cand_pos.append(i)
else:
special_pos.add(i)
shuffle(cand_pos)
masked_pos = set()
try:
max_cand_pos = max(cand_pos)
except:
max_cand_pos = 0
for pos in cand_pos:
if len(masked_pos) >= n_pred:
break
if pos in masked_pos:
continue
def _expand_whole_word(st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
if (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb):
# ngram
cur_skipgram_size = randint(2, self.skipgram_size)
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(
pos, pos + cur_skipgram_size)
else:
st_pos, end_pos = pos, pos + cur_skipgram_size
else:
# directly mask
if self.mask_whole_word:
st_pos, end_pos = _expand_whole_word(pos, pos + 1)
else:
st_pos, end_pos = pos, pos + 1
for mp in range(st_pos, end_pos):
if (0 < mp <= max_cand_pos) and (mp not in special_pos):
masked_pos.add(mp)
else:
break
masked_pos = list(masked_pos)
if len(masked_pos) > n_pred:
shuffle(masked_pos)
masked_pos = masked_pos[:n_pred]
masked_tokens = [tokens[pos] for pos in masked_pos]
for pos in masked_pos:
if rand() < 0.8: # 80%
tokens[pos] = '[MASK]'
elif rand() < 0.5: # 10%
tokens[pos] = get_random_word(self.vocab_words)
# when n_pred < max_pred, we only calculate loss within n_pred
masked_weights = [1]*len(masked_tokens)
# Token Indexing
masked_ids = self.indexer(masked_tokens)
# Token Indexing
input_ids = self.indexer(tokens)
# Zero Padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask = torch.ones(self.max_len, self.max_len, dtype=torch.long)
# input_mask[:, :len(tokens_a)+2].fill_(1)
second_st, second_end = 0, len(tokens_a)+2
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
# Zero Padding for masked target
if self.max_pred > n_pred:
n_pad = self.max_pred - n_pred
if masked_ids is not None:
masked_ids.extend([0]*n_pad)
if masked_pos is not None:
masked_pos.extend([0]*n_pad)
if masked_weights is not None:
masked_weights.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, next_sentence_label)
class Preprocess4Seq2seqDecode(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128):
super().__init__()
self.max_len = max_len
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self.max_len = max_len
self._tril_matrix = torch.tril(torch.ones(
(max_len, max_len), dtype=torch.long))
self.max_tgt_length = max_tgt_length
def __call__(self, instance):
tokens_a, max_a_len = instance
# Add Special Tokens
padded_tokens_a = ['[CLS]'] + tokens_a + ['[SEP]']
assert len(padded_tokens_a) <= max_a_len + 2
if max_a_len + 2 > len(padded_tokens_a):
padded_tokens_a += ['[PAD]'] * \
(max_a_len + 2 - len(padded_tokens_a))
assert len(padded_tokens_a) == max_a_len + 2
max_len_in_batch = min(self.max_tgt_length +
max_a_len + 2, self.max_len)
tokens = padded_tokens_a
segment_ids = [4]*(len(padded_tokens_a)) + [5]*(max_len_in_batch - len(padded_tokens_a))
position_ids = []
for i in range(len(tokens_a) + 2):
position_ids.append(i)
for i in range(len(tokens_a) + 2, max_a_len + 2):
position_ids.append(0)
for i in range(max_a_len + 2, max_len_in_batch):
position_ids.append(i - (max_a_len + 2) + len(tokens_a) + 2)
# Token Indexing
input_ids = self.indexer(tokens)
# Zero Padding
input_mask = torch.zeros(
max_len_in_batch, max_len_in_batch, dtype=torch.long)
input_mask[:, :len(tokens_a)+2].fill_(1)
second_st, second_end = len(padded_tokens_a), max_len_in_batch
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
return (input_ids, segment_ids, position_ids, input_mask)
| 33,772 | 38.134415 | 175 | py |
DoTra | DoTra-main/latAEModels.py | #Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
#Models for Cycle-GAN on encoded data
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def lin(c_in, c_out, bn=True, dr=False): # """Custom convolutional layer for simplicity."""
layers=[]
if dr > 0: layers.append(nn.Dropout(dr))
layers.append(nn.Linear(c_in, c_out, bias=not bn))
if bn: layers.append(nn.BatchNorm1d(c_out))
return layers
class G(nn.Module):# """Generator for transfering from mnist to svhn"""
def __init__(self, cfg):
super(G, self).__init__()
lw=cfg["aez"]*np.array([1]+cfg["glay"]+[1])
self.laleak=cfg["laLeak"]
lins = []
for j in range(len(lw)-1):
dobn = cfg["gben"] and not (cfg["gben"] == 2 and (j + 1 == len(lw) - 1))
lins+=lin(lw[j],lw[j+1],dobn,cfg["gdrop"][j])
self.lays=nn.Sequential(*lins)
def forward(self, x):
for l in self.lays[:-1]:
x=F.leaky_relu(l(x))
x=self.lays[-1](x)
if self.laleak:x=F.leaky_relu(x)
return x
class D(nn.Module):
def __init__(self, cfg, use_labels=False):
super(D, self).__init__()
n_out = 11 if use_labels else 1
lw = cfg["aez"] * np.array([1] + cfg["dlay"] +[1])
lw[-1]=n_out
lins = []
for j in range(len(lw) - 1):
dobn=cfg["dben"] and not (cfg["dben"]==2 and (j+1 == len(lw) - 1))
lins += lin(lw[j], lw[j + 1],dobn,cfg["ddrop"][j])
self.lays = nn.Sequential(*lins)
def forward(self, x):
for l in self.lays[:-1]:
x = F.leaky_relu(l(x))
x = self.lays[-1](x)
return x
class LinCl(nn.Module):
def __init__(self, cfg):
super(LinCl, self).__init__()
lw = [cfg["aez"]]+list(cfg["aez"]*np.array(cfg["cllay"]))+[cfg["ds"][1]]
lins = []
for j in range(len(lw) - 1):
dobn = cfg["clben"] and not (cfg["clben"] == 2 and (j + 1 == len(lw) - 1))
lins += lin(lw[j], lw[j + 1], dobn, cfg["cldrop"][j])
self.lays = nn.Sequential(*lins)
def forward(self, x):
for l in self.lays[:-1]:
x = F.leaky_relu(l(x))
x = self.lays[-1](x)
return x
| 2,462 | 31.84 | 198 | py |
DoTra | DoTra-main/main.py | # Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
# Licence: Use it however you like, but cite the paper :-)
#Main routine to train models
import sklearn
import torch
from torch.utils.data import Dataset, TensorDataset
import torch.cuda.amp as tca
from optCycEncoded import Solver
import dutils
import AEModels as aut
import trainClassifiers,imgutil
from imgutil import *
def trainOne(cfg):
def cds(X, Y, shuffle=False, norm=True):
noX = imgutil.nor(X.astype(np.float32)) if norm else X
ds = TensorDataset(torch.from_numpy(noX), torch.from_numpy(Y))
return torch.utils.data.DataLoader(ds, batch_size=cfg["batchSize"], shuffle=shuffle, num_workers=0)
def cds2(X, Y, shuffle=False):
noX = [imgutil.nor(cX.astype(np.float32)) for cX in X]
ds = TensorDataset(*[torch.from_numpy(cX) for cX in noX], torch.from_numpy(Y))
return torch.utils.data.DataLoader(ds, batch_size=cfg["batchSize"], shuffle=shuffle, num_workers=0)
def modXY(toModteX,modteY):
t = cfg["trans"][0]
modteX=applyOp(toModteX,t[0],t[1])
return modteX,modteY
def cdsMod(X, Y):
X = imgutil.nor(X)
allX, allY = [X], [Y]
for t in cfg["transEv"]:
modtrX, Y = modXY(allX[-1].astype(np.float32), allY[-1])
allX.append(modtrX)
allY.append(Y)
return allX, allY
def getAE(cfg, trX, trY, teX, teY, sname, dotrain,picname=""):
origtr_iter = cds(trX, trY, True)
origte_iter = cds(teX, teY, False)
(aetrX, aetrY), (aeteX, aeteY), acfg, netAEorg = aut.runAE(cfg, origtr_iter, origte_iter, sname, dotrain,picname)
aetr_iter = cds(aetrX, aetrY, True, False)
aete_iter = cds(aeteX, aeteY, False, False)
return aetr_iter, aete_iter, netAEorg
cget=True
#Get unmodified, raw training data and a base Classifier
totdat= int(cfg["ds"][2])
(trX, trY), (teX, teY) = dutils.getFullDS(cfg, totdat, None, cget)
if cfg["baseZoom"]!=1:
print("Zooming data for Proposition test")
trX=applyOp(trX, "zoo1", cfg["baseZoom"])
teX = applyOp(teX, "zoo1", cfg["baseZoom"])
disoff = int(totdat * cfg["distinctDat"]*0.5)
nd = totdat-disoff
baseCl, baseClRes = trainClassifiers.getclassifier(cfg, cds(trX, trY, True), cds(teX, teY, False), None, getc=False, loadCl=False, save=False, useLat=False) #get base classifier (only used for reference)
toModtrX, modtrY = np.copy(trX[disoff:nd + disoff]), np.copy(trY[disoff:nd + disoff])
toModteX, modteY = np.copy(teX), np.copy(teY)
trX, trY = trX[:nd], trY[:nd]
#Get auto encoding of raw training data and domain adapted data
aetr_iter, aete_iter, netAEorg = getAE(cfg,trX, trY, teX, teY, sname=None, dotrain=cget,picname="onRawOrgDomain") #AE with tanh => -1,1
t = cfg["trans"][0]
modtrX, modtrY = modXY(toModtrX, modtrY)
modteX, modteY = modXY(toModteX,modteY)
failed = False
# Full DoTra: autoencode, transform on latent, learn between domains
aemodtr_iter, aemodte_iter, netAEdom = getAE(cfg,modtrX, modtrY, modteX, modteY, sname=None, dotrain=cget,picname="onRawAdaDomain")
#Get Transformer between auto encodings of raw training data and for the same data in domain-adapted space
solver = Solver(cfg) #if cfg["solvSim"]==0 else solverSimp.Solver(cfg)
othlo, ax0, ax1, ay, atex0, atex1, atey = solver.train(netAEorg,netAEdom,aetr_iter,aete_iter,aemodtr_iter,modteX, modteY,cget,nd) #AE with tanh => -1,1
# Get transformer from original space (not some encoding space) and domain space (not encoded)
a2tr_iter = cds2([ax0, ax1], ay,shuffle=True) # Data where training data in orig space is mapped to data in domain space
a2te_iter = cds2([atex0, atex1], atey) # modtrX, amodteX = cdsMod(trX), cdsMod(teX) #eteX, eteY = amodteX[-nFor:], [teY] * nFor # if cfg["doTra"]: #ntr=ntra+1 - nFor #origtr_iter = cds2(amodtrX,np.copy(trY), True) # Array is copied due to type cast to float32 in cds2 #origte_iter = cds2(amodteX, np.copy(teY), False)
loadtrans = cget
orgTransModel, cyccfg, loaded = trainClassifiers.getTrans(cfg, a2tr_iter, a2te_iter, ((ax0, ay), (atex0, atey)), None, loadtrans, selfTra=False)
#Get labeled domain data, by applying transformer multiple times on source data
nFor = len(cfg["transEv"])# nFor = ntra# if nFor <= 0: print("nothing to forecast", ntra, cfg["transEv"]) return
atrX = [ax0]
atrY = [ay]
for i in range(nFor):
cajx, cajy = [], []
orgTransModel.eval()
cdataset = cds(atrX[-1], atrY[-1],shuffle=False,norm=cfg["evNorm"]) # citer=cds2([atrX[-2],atrX[-1]],atrY[-1])
for data in cdataset:
with tca.autocast():
dsx = data[0].cuda()
if not cfg["dirCyc"]: dsx=[None,dsx]
output = orgTransModel(dsx).detach().cpu()
cajx.append(output.clone().numpy())
cajy.append(data[-1].clone().numpy())
atrX.append(np.concatenate(cajx, axis=0))
atrY.append(np.concatenate(cajy, axis=0))
etrX, etrY = atrX[-nFor:], atrY[-nFor:] #print("nfo", nFor, len(atrX)) # imgutil.makeAndStore2(atrX[-3][:64],atrX[-2][:64],atrX[-1][:64], cfg["bFolder"] + "samples/", "FORCAST"+str(cfg["bid"]) + fs(cfg) + ".png")
if not failed:
#Get domain datasets used for prediction
amodteX,amodteY = cdsMod(teX,teY)
eteX, eteY = amodteX[-nFor:], amodteY[-nFor:]
def evalCl(ltrX, ltrY, lteX, lteY,domid):
def cods(lX, lY, shuffle=False):
trX, trY = np.concatenate(lX, axis=0), np.concatenate(lY, axis=0) # trY=np.concatenate([np.ones(aj[0].shape[0],dtype=np.int)*j for j in range(len(aj))])
trX, trY = sklearn.utils.shuffle(trX, trY)
trit = cds(trX, trY, shuffle)
return trit
trit = cods(ltrX, ltrY, True)
teit = cods(lteX, lteY, False)
netCl, clcfg = trainClassifiers.getclassifier(cfg, trit, teit, None, getc=False, save=False, loadCl=False)
return clcfg
#Train classifier using labeled data being transformed and apply it to generated test data
vals=np.arange(nFor)
for j in reversed(vals):
clcfg = evalCl([etrX[j]], [etrY[j]], [eteX[j]], [eteY[j]],j)
#print("eval",j,nFor,np.sum(etrX[j]),clcfg,cyccfg)
cyccfg["ptrA" + str(j)] = clcfg["trA"]
cyccfg["pteA" + str(j)] = clcfg["teA"]
cyccfg["mteA" + str(j)] = clcfg["mteA"]
cyccfg = {**cyccfg, **othlo,**baseClRes}
cfg["result"] = [cyccfg]
print("\n\nBench:",cfg["trans"])
print("Result")
res=cfg["result"][0]
print("All metrics",res)
print("Accs (Source, target 0,..,2)",np.round([res["teA"]]+[res["pteA"+str(i)] for i in range(3)],3))
print("MaxAccs", np.round([res["teA"]] + [res["mteA" + str(i)] for i in range(3)], 3))
print("\n\n\n\n")
#print("\n\n\n\nFOR Accuracy: check pteA. (=Accuracy after last epoch) and mteA.(=max Accuracy accross all epochs) in results below")
#print("pteA0 denotes test accuracy on target domain 0, pteA1 target domain 1, pteA2 target domain 2, etc.")
#print("teA denotes test accuracy on source domain")
| 7,497 | 48.006536 | 366 | py |
DoTra | DoTra-main/optCycEncoded.py | #Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
#Based on https://github.com/yunjey/mnist-svhn-transfer/
import torch
import torch.nn as nn
import os
import pickle
import numpy as np
from torch.autograd import Variable
from torch import optim
import torch.cuda.amp as tca
from trainClassifiers import getAcc
from latAEModels import G, D
from trainClassifiers import decay
from classifierModels import worNet
def getTrAcc( cfg, trds,val_dataset):
netCl = worNet(cfg).cuda()
ccf = cfg["clcfg"]
closs, teaccs, trep, loss, clr = 0, [], cfg["epC"]//3, nn.CrossEntropyLoss(), ccf["opt"][1]
# optimizerCl = optim.SGD(netCl.parameters(), lr=0.05, momentum=0.9, weight_decay=1e-5) # elif ccf["opt"][0] == "A": optimizerCl = optim.Adam(netCl.parameters(), ccf["opt"][2], weight_decay=ccf["opt"][3])
warmup = (max(2, trep // 40), 10)
optimizerCl = optim.SGD(netCl.parameters(), lr=ccf["opt"][1] / warmup[1], momentum=0.9, weight_decay=ccf["opt"][2])
scaler = tca.GradScaler()
clAcc = lambda dataset: getAcc(netCl, dataset, niter=9999, cfg=cfg)
crolo = nn.CrossEntropyLoss()
closs = 0
for epoch in range(trep):
netCl.train()
for i, data in enumerate(trds):
with tca.autocast():
optimizerCl.zero_grad(set_to_none=True)
dsx, dsy = data[0].cuda(), data[1].cuda()
output = netCl(dsx)
errD_real = crolo(output, dsy.long())
scaler.scale(errD_real).backward()
scaler.step(optimizerCl)
scaler.update()
closs = 0.97 * closs + 0.03 * errD_real.item() if i > 20 else 0.8 * closs + 0.2 * errD_real.item()
decay(ccf, epoch, optimizerCl, warmup, trep)
netCl.eval()
if epoch<2 or epoch==trep-1 or epoch%15==0:
print("Train Test CL","ep", epoch, closs, clAcc(val_dataset))
return clAcc(val_dataset),netCl
class Solver(object):
def __init__(self, cfg):
self.cfg=cfg
self.g12,self.g21 = None,None
self.d1,self.d2 = None,None
self.g_optimizer,self.d_optimizer = None,None
self.num_classes = cfg["ds"][1]
self.batch_size = cfg["batchSize"]
def build_model(self): # """Builds a generator and a discriminator."""
self.g12 = G(self.cfg).cuda()
self.g21 = G(self.cfg).cuda()
if self.cfg["d1"]: self.d1 = D(self.cfg, use_labels=self.cfg["useLab"] in [1,2]).cuda()
self.d2 = D(self.cfg, use_labels=self.cfg["useLab"]==2).cuda()
if self.cfg["useLab"] == 3: self.d1cl = D(self.cfg, use_labels=1).cuda()
g_params = list(self.g12.parameters()) + list(self.g21.parameters())
d_params = list(self.d2.parameters())
if self.cfg["d1"]: d_params+= list(self.d1.parameters() )
if self.cfg["useLab"] == 3: d_params+= list(self.d1cl.parameters() )
self.d_optimizer = optim.Adam(d_params, self.cfg["DGlr"][0], self.cfg["DGBeta12"][0])
self.g_optimizer = optim.Adam(g_params, self.cfg["DGlr"][1],self.cfg["DGBeta12"][1])
def to_var(self, x): return Variable(x.cuda()) # """Converts numpy to variable."""
#def to_data(self, x): return x.cpu().data.numpy() #"""Converts variable to numpy."""
def reset_grad(self):
self.g_optimizer.zero_grad(set_to_none=True)
self.d_optimizer.zero_grad(set_to_none=True)
#if self.cfg["useLab"] == 3: self.d3_optimizer.zero_grad()
def decay(self,epoch,total):
if epoch>total*self.cfg["lrdecay"]:
for opt in [self.g_optimizer,self.d_optimizer]:
for g in opt.param_groups:
g['lr']=0.85*g['lr']
def train(self,netAEorg,netAEdom,aetr_iter,aete_iter,aemodtr_iter,modteX,modteY,cget,nd): #drift_iter = iter(self.adaDom_loader) #orgDom_iter = iter(self.orgDom_loader)
othlo, milo,bacc = -1, 1e99,0
reclo, reclo2 = torch.zeros(1), torch.zeros(1)
print("Train Cyc GAN")
if self.cfg["useClLo"]:
from trainClassifiers import getLinCl
clloss = nn.CrossEntropyLoss()
if self.cfg["trainCl"]:
ccf = self.cfg["clcfg"]
from latAEModels import LinCl
clDom = LinCl(self.cfg).cuda()
loOrg, loDom= nn.CrossEntropyLoss(), nn.CrossEntropyLoss()
clr = ccf["opt"][1] / 100
if self.cfg["trainCl"]!=3:
clOrg = LinCl(self.cfg).cuda()
optOrg = optim.SGD(clOrg.parameters(), lr=clr, momentum=0.8, weight_decay=ccf["opt"][2] / 5) #clr / warmup[
else:
linCl, lincfg = getLinCl(self.cfg, aetr_iter, aete_iter,"None", cget, save=True, loadCl=True)
optDom = optim.SGD(clDom.parameters(), lr=clr, momentum=0.8, weight_decay=ccf["opt"][2] / 5)
else:
linCl, lincfg=getLinCl(self.cfg, aetr_iter, aete_iter, "None", cget, save=True, loadCl=True)
self.build_model()
useLabLoss=nn.CrossEntropyLoss() # loss if use_labels = True
ax, ay = [], []
tries = 0
niterEp = nd // self.cfg["batchSize"]
train_iters = self.cfg["epG"] * niterEp + self.cfg["ntries"][0] * self.cfg["ntries"][1] # config.train_iters
labSmo = lambda sh: 2*(torch.rand(sh.shape[0]).cuda()-0.5)*self.cfg["labSmo"] if self.cfg["labSmo"]>0 else 0
recF= torch.square if self.cfg["recLo"] %10== 2 else torch.abs
recLoFct = lambda x: recF(torch.mean(torch.abs(x),dim=1)) if self.cfg["recLo"]>=10 else recF(x)
miter, siter = iter(aetr_iter), iter(aemodtr_iter)
sca0,sca1,sca2,sca3=tca.GradScaler(),tca.GradScaler(),tca.GradScaler(),tca.GradScaler()
def wrap(scaler,opt,lo):
scaler.scale(lo).backward()
scaler.step(opt)
scaler.update()
for step in range(train_iters + 1): # # reset data_iter for each epoch
with tca.autocast():
try:
adaDom, s_labels = next(siter) ## load adaDom and orgDom dataset
orgDom, m_labels = next(miter)
except StopIteration:
miter, siter = iter(aetr_iter), iter(aemodtr_iter)
adaDom, s_labels = next(siter)
orgDom, m_labels = next(miter)
self.decay(step//niterEp, self.cfg["epG"])
orgDom,adaDom = orgDom.float(),adaDom.float()
if step == 0: code_org, code_dom = orgDom.clone().cuda(), adaDom.clone().cuda() #save for outputting images
adaDom, s_labels = self.to_var(adaDom), self.to_var(s_labels).long().squeeze()
orgDom, m_labels = self.to_var(orgDom), self.to_var(m_labels.long())
if self.cfg["useLab"]: orgDom_fake_labels = self.to_var(torch.Tensor([self.num_classes] * adaDom.size(0)).long())
if self.cfg["useLab"] == 2: adaDom_fake_labels = self.to_var(torch.Tensor([self.num_classes] * orgDom.size(0)).long())
# ============ train D ============#
# train with real images
self.reset_grad()
d1_loss = 0
if self.cfg["d1"]:
out = self.d1(orgDom)
if self.cfg["useLab"] ==1 or self.cfg["useLab"] ==2: d1_loss = useLabLoss(out, m_labels)
else: d1_loss = torch.mean((out - 1+labSmo(out)) ** 2)
out = self.d2(adaDom)
if self.cfg["useLab"] == 2: d2_loss = useLabLoss(out, s_labels)
else: d2_loss = torch.mean((out - 1+labSmo(out)) ** 2)
d_orgDom_loss, d_adaDom_loss, d_real_loss = d1_loss, d2_loss, d1_loss + d2_loss
if self.cfg["useLab"] == 3:
out = self.d1cl(orgDom)
d_real_loss += useLabLoss(out, m_labels)
wrap(sca0,self.d_optimizer,d_real_loss)
# train with fake images
self.reset_grad()
fake_adaDom = self.g12(orgDom)
out = self.d2(fake_adaDom)
if self.cfg["useLab"] == 2: d2_loss = useLabLoss(out, adaDom_fake_labels)
else: d2_loss = torch.mean((out+labSmo(out)) ** 2)
fake_orgDom = self.g21(adaDom)
if self.cfg["d1"]:
out = self.d1(fake_orgDom)
if self.cfg["useLab"] ==1 or self.cfg["useLab"] ==2: d1_loss = useLabLoss(out, orgDom_fake_labels)
else: d1_loss = torch.mean((out+labSmo(out)) ** 2)
else: d1_loss=0
d_fake_loss = d1_loss + d2_loss
if self.cfg["useLab"] == 3:
out = self.d1cl(fake_orgDom)
d_fake_loss += useLabLoss(out, orgDom_fake_labels)
# d_fake_loss.backward()
# self.d_optimizer.step()
wrap(sca1, self.d_optimizer, d_fake_loss)
# ============ train G ============#
# train orgDom-adaDom-orgDom cycle
self.reset_grad()
fake_adaDom = self.g12(orgDom)
out = self.d2(fake_adaDom)
if self.cfg["useLab"] == 2: g_loss = useLabLoss(out, m_labels)
else: g_loss = torch.mean((out - 1) ** 2)
if self.cfg["useRec"] > 0:
reconst_orgDom = self.g21(fake_adaDom)
reclo= self.cfg["cycFac"][0]*self.cfg["useRec"] * torch.mean(recLoFct(orgDom - reconst_orgDom))
g_loss += reclo
if self.cfg["useLab"] == 3:
out = self.d1cl(reconst_orgDom)
g_loss += useLabLoss(out, m_labels)
if self.cfg["useClLo"]:
if self.cfg["trainCl"]:
actOrg = clOrg(reconst_orgDom) # subtract loss on original maybe
actDom = clDom(fake_adaDom) # subtract loss on original maybe
if self.cfg["smoo"]:
actOrg=actOrg+torch.mean(torch.abs(actOrg.detach()),dim=0)*self.cfg["smoo"]
actDom = actDom + torch.mean(torch.abs(actDom.detach()),dim=0) * self.cfg["smoo"]
li_loss = self.cfg["trainCl"] * (clloss(actOrg, m_labels) + clloss(actDom, m_labels))
else:
acti = linCl(reconst_orgDom) # subtract loss on original maybe
if self.cfg["smoo"]:
acti=acti+torch.mean(torch.abs(acti.detach()),dim=0)*self.cfg["smoo"]
li_loss=self.cfg["useClLo"]*clloss(acti, m_labels)
g_loss += li_loss
wrap(sca2, self.g_optimizer, g_loss)
# train adaDom-orgDom-adaDom cycle
self.reset_grad()
fake_orgDom = self.g21(adaDom)
if self.cfg["d1"]:
out = self.d1(fake_orgDom)
if self.cfg["useLab"] == 2: g_loss = useLabLoss(out, s_labels)
else: g_loss = torch.mean((out - 1) ** 2)
else: g_loss=0
if self.cfg["useRec"] > 0:
reconst_adaDom = self.g12(fake_orgDom)
reclo2=self.cfg["cycFac"][1] * self.cfg["useRec"] * torch.mean(recLoFct(adaDom - reconst_adaDom))
g_loss += reclo2
wrap(sca3, self.g_optimizer, g_loss)
if self.cfg["useClLo"]:
if self.cfg["trainCl"]:
def trCl(cl,dat, lo, opt):
opt.zero_grad(set_to_none=True)
cl.train()
act=cl(dat)
clo = lo(act, m_labels)
clo.backward()
opt.step()
cl.eval()
if not self.cfg["trainOrg"]==2: trCl(clOrg, reconst_orgDom.detach(), loOrg, optOrg)
if self.cfg["trainOrg"]==1: trCl(clOrg, orgDom.detach(), loOrg, optOrg)
trCl(clDom, fake_adaDom.detach(), loDom, optDom)
if (step + 1) % self.cfg["ntries"][0] == 0: # print the log info self.log_step
useLat = self.cfg["useLat"]
def getaeds(ds):
ax0, ax1, ay = [], [], []
self.g12.eval()
for bx, by in ds:
with tca.autocast():
cx = bx.float().cuda()
fake_code_dom = self.g12(cx)
if not useLat:
orgX = netAEorg.dec(cx).detach().cpu().numpy()
domGenX = netAEdom.dec(fake_code_dom).detach().cpu().numpy()
ax0.append(orgX if not useLat else bx)
ax1.append(domGenX if not useLat else fake_code_dom.detach().cpu().numpy())
ay.append(by.detach().cpu().numpy())
self.g12.train()
return [np.concatenate(cx, axis=0) for cx in [ax0, ax1, ay]]
if (step + 1) % (10*self.cfg["ntries"][0]) == 0:
print('Step [%d/%d], Losses: d_real: %.4f, d_OrgDom: %.4f, d_AdaDom: %.4f, '
'd_fake: %.4f, g: %.4f, r: %.4f, r2: %.4f' % (step + 1, train_iters, d_real_loss.item(), d_orgDom_loss.item() if self.cfg["d1"] else -1, d_adaDom_loss.item(), d_fake_loss.item(), g_loss.item(), reclo.item(), reclo2.item()),self.cfg["pr"])
if self.cfg["useClLo"]: print("LinCl Loss",li_loss.item())
clo = g_loss.item()
if (step // niterEp >= self.cfg["epG"] and milo * 0.85 > clo):
milo = clo
tries += 1
[ax0, ax1, ay] = getaeds(aetr_iter)
[atex0, atex1, atey] = getaeds(aete_iter)
if tries == self.cfg["ntries"][1]: break
othlo = {"DOLo": d_orgDom_loss.item() if self.cfg["d1"] else 0, "DDLo": d_adaDom_loss.item(), "DFLo": d_fake_loss.item()} # "DRLo":d_real_loss.item(),
return othlo,ax0, ax1, ay, atex0, atex1, atey
| 14,096 | 47.947917 | 264 | py |
DoTra | DoTra-main/doTraModel.py | #Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
import torch.nn as nn
import torch.nn.functional as F
import torch
def deconv(c_in, c_out, k_size, stride=2, pad=1, bn=True): #"""Custom deconvolutional layer for simplicity."""
layers = [nn.ConvTranspose2d(c_in, c_out, k_size, stride, pad, bias=not bn)]
if bn: layers.append(nn.BatchNorm2d(c_out))
return nn.Sequential(*layers)
def conv(c_in, c_out, k_size, stride=2, pad=1, bn=True):#"""Custom convolutional layer for simplicity."""
layers = [nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=not bn)]
if bn: layers.append(nn.BatchNorm2d(c_out))
return nn.Sequential(*layers)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Trans(nn.Module):
def __init__(self, cfg):
super(Trans, self).__init__()
self.cfg=cfg
conv_dim = int(cfg["convdim"]*cfg["netSi"])
self.leak=cfg["tleak"]
# encoding blocks
self.in1=cfg["singleIn"]
self.sym=cfg["sym"]
insym=cfg["imCh"]*(1+2*int(cfg["sym"]==1) +int(cfg["sym"]==2)+ 3*int(cfg["sym"]==3))
if self.in1:
self.conv1 = conv(insym, conv_dim, 4)
self.conv2 = conv(conv_dim , conv_dim * 2 , 4)
else:
self.conca = cfg["conca"]
co = self.conca
self.conv1 = conv(insym*(co+1), conv_dim//(2-co), 4)
self.conv2 = conv(conv_dim//(2-co), conv_dim*2//(2-co), 4)
# residual blocks
if cfg["resB"]:
self.conv3= BasicBlock(conv_dim*2, conv_dim*2)
self.conv3a = BasicBlock(conv_dim * 2, conv_dim * 2, 3) if cfg["nExLay"] else nn.Identity()
self.conv4 = BasicBlock(conv_dim * 2, conv_dim * 2)
else:
self.conv3 = conv(conv_dim*2, conv_dim*2, 3, 1, 1)
self.conv3a = conv(conv_dim*2, conv_dim*2, 3, 1, 1) if cfg["nExLay"] else nn.Identity()
self.conv4 = conv(conv_dim*2, conv_dim*2, 3, 1, 1)
# decoding blocks
self.deconv1 = deconv(conv_dim*2, conv_dim, 4)
self.deconv2 = deconv(conv_dim, cfg["imCh"], 4, bn=False)
def geto(self,inx):
out = F.leaky_relu(self.conv1(inx), self.leak) # (?, 64, 16, 16)
out = F.leaky_relu(self.conv2(out), self.leak) # (?, 128, 8, 8)
return out
def forward(self, x):
x1,x2=x
if self.in1:
if self.sym:
xsym = torch.flip(x2, dims=(-1,))
if self.sym==1: x2=torch.cat([x2,xsym,torch.flip(x2, dims=(-2,))],dim=1)
if self.sym == 2: x2 = torch.cat([x2,torch.flip(xsym,dims=(-2,))], dim=1)
if self.sym==3: x2 = torch.cat([x2, xsym, torch.flip(x2, dims=(-2,)), torch.flip(xsym, dims=(-2,))], dim=1)
out = self.geto(x2)
else:
if self.sym>0: print("must flip etc for each input - not implemented see above how to do it")
if self.conca:
x=torch.cat([x1,x2],dim=1)
out=self.geto(x)
else:
out = torch.cat([self.geto(x1), self.geto(x2)], dim=1)
out = F.leaky_relu(self.conv3(out), self.leak) # ( " )
out = F.leaky_relu(self.conv3a(out), self.leak) if self.cfg["nExLay"] else out
out = F.leaky_relu(self.conv4(out), self.leak) # ( " )
out = F.leaky_relu(self.deconv1(out), self.leak) # (?, 64, 16, 16)
out = F.tanh(self.deconv2(out)) # (?, 3, 32, 32)
return out
class D1(nn.Module):
"""Discriminator for mnist."""
def __init__(self, conv_dim=64, use_labels=False):
super(D1, self).__init__()
self.conv1 = conv(1, conv_dim, 4, bn=False)
self.conv2 = conv(conv_dim, conv_dim*2, 4)
self.conv3 = conv(conv_dim*2, conv_dim*4, 4)
n_out = 11 if use_labels else 1
self.fc = conv(conv_dim*4, n_out, 4, 1, 0, False)
def forward(self, x):
out = F.leaky_relu(self.conv1(x), 0.05) # (?, 64, 16, 16)
out = F.leaky_relu(self.conv2(out), 0.05) # (?, 128, 8, 8)
out = F.leaky_relu(self.conv3(out), 0.05) # (?, 256, 4, 4)
out = self.fc(out).squeeze()
return out
class D2(nn.Module):
"""Discriminator for svhn."""
def __init__(self, conv_dim=64, use_labels=False):
super(D2, self).__init__()
self.conv1 = conv(1, conv_dim, 4, bn=False)
self.conv2 = conv(conv_dim, conv_dim*2, 4)
self.conv3 = conv(conv_dim*2, conv_dim*4, 4)
n_out = 11 if use_labels else 1
self.fc = conv(conv_dim*4, n_out, 4, 1, 0, False)
def forward(self, x):
out = F.leaky_relu(self.conv1(x), 0.05) # (?, 64, 16, 16)
out = F.leaky_relu(self.conv2(out), 0.05) # (?, 128, 8, 8)
out = F.leaky_relu(self.conv3(out), 0.05) # (?, 256, 4, 4)
out = self.fc(out).squeeze()
return out | 5,990 | 41.792857 | 198 | py |
DoTra | DoTra-main/classifierModels.py | #Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
#Classifier models
import numpy as np
import torch
import torch.nn as nn
class BBlock(nn.Module):
def __init__(self, in_planes, planes,ker=3,down=True,pad=1):
super(BBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=ker, stride=1, padding=pad, bias=False)
self.bn=nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.mp = nn.MaxPool2d((2, 2), stride=2) if down else nn.Identity()
def forward(self, x):
out=self.conv1(x) #out = self.ident(out)
out= self.bn(out) #out = self.identBefR(out)
out = self.relu(out) #out = self.identBefS(out)
out = self.mp(out)
return out
class worNet(nn.Module):
def __init__(self, cfg):
super(worNet, self).__init__()
cf= cfg["clcfg"]
tr = lambda x: max(1, int(np.round(x * cfg["netSi"])))
#self.addN=cfg["addN"] self.oneh = cfg["onehot"]
self.in_channels = cfg["imCh"]
in_channels = self.in_channels
self.is11 = 1 if "11" in cf["netT"] else 0
#chans = [in_channels, 32, 64, 64, 128, 128, 256, 256, 512, 512] if self.is11 else [in_channels, 32, 64, 128, 256, 512]
chans = [in_channels, 64, 64, 64, 128, 128, 256, 256, 512, 512] if self.is11 else [in_channels, 64, 64, 128, 256, 512]
i=-1
def getConv(ker=cfg["ker"], down=True):
nonlocal i
i+=1 #return nn.Sequential(*[nn.Conv2d(in_channels=inoffs[i]+ (tr(chans[i]) if i>0 else chans[i]) , out_channels=tr(chans[i+1]), kernel_size=(ker, ker), padding=ker > 1), nn.BatchNorm2d(tr(chans[i+1])), relu] + ([mp] if down else []))
return BBlock((tr(chans[i]) if i>0 else chans[i]),tr(chans[i+1]), ker=ker,down=down,pad=(ker-1)//2)#inoffs[i]+
#if self.is11: self.conv0a = nn.Identity()
self.conv0 = getConv()
if self.is11: self.conv1a = getConv(down=False)
self.conv1 = getConv()
if self.is11: self.conv2a = getConv( down=False)
self.conv2 = getConv()
if self.is11: self.conv3a = getConv(ker=3, down=False)
self.conv3 = getConv(ker=3)
if self.is11: self.conv4a = getConv( down=False,ker=3)
self.conv4 = getConv(ker=3)
self.allays = [self.conv0, self.conv1, self.conv2, self.conv3, self.conv4]
if self.is11: self.allays = [self.conv0,self.conv1a,self.conv1, self.conv2a,self.conv2, self.conv3a,self.conv3, self.conv4a, self.conv4]
i, ker = -1, 1
self.avgpool = nn.AdaptiveMaxPool2d((1,1))
self.dropout = nn.Dropout(0.5) if cfg["drop"] else nn.Identity()
self.pred = nn.Linear(tr(512),tr(128) ) if cfg["twoLin"] else nn.Identity()
self.pred2 = nn.Linear(tr(128),cfg["ds"][1]) if cfg["twoLin"] else nn.Linear(tr(512),cfg["ds"][1])
#self.k=0
def forward(self, x):
# import imgutil as imgu # print(np.sum(np.abs(x.cpu().numpy()))) # imgu.makeAndStore(x.cpu().numpy(),x.cpu().numpy(),"Img",str(self.k)+".png") # self.k+=1 # self.k=self.k%10
for il,l in enumerate(self.allays): x = l(x)
x = self.avgpool(x)
x = torch.flatten(x, start_dim=1)
x=self.dropout(x)
x=self.pred(x)
x = self.pred2(x)
return x
def lin(c_in, c_out, bn=True, dr=False): # """Custom convolutional layer for simplicity."""
layers=[]
if dr > 0: layers.append(nn.Dropout(dr))
layers.append(nn.Linear(c_in, c_out, bias=not bn))
if bn: layers.append(nn.BatchNorm1d(c_out))
layers.append(nn.ReLU())
return layers
class linNet(nn.Module):
def __init__(self, cfg):
super(linNet, self).__init__()
n_out = cfg["ds"][1]
lw = cfg["aez"] * np.array([1] + cfg["llay"] + [1])
lw[-1] = n_out
lins = []
for j in range(len(lw) - 1):
dobn = cfg["lben"] and not (cfg["dben"] == 2 and (j + 1 == len(lw) - 1))
lins += lin(lw[j], lw[j + 1], dobn, cfg["ldrop"][j])
self.lays = nn.Sequential(*lins)
def forward(self, x):
x = self.lays(x)
return x | 4,359 | 43.948454 | 246 | py |
DoTra | DoTra-main/AEModels.py | #Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
#Autoencoder models and training
import numpy as np
import pickle,os,copy
import torch.optim as optim
import torch.cuda.amp as tca
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
import torch.nn.functional as F
from aecyc.OnlineRep import imgutil
class AEDisc(nn.Module):
def __init__(self, cfg, input_size=(1, 32, 32)):
super(AEDisc, self).__init__()
output_size = 1
self.input_size = input_size
self.channel_mult = int(64 * cfg["netSi"])
bn = lambda x: nn.BatchNorm2d(x) if cfg["aeganbn"] else nn.Identity()
bn1d = lambda x: nn.BatchNorm1d(x) if cfg["aeganbn"] else nn.Identity()
slope = 0.2
self.conv = nn.Sequential(*[nn.Conv2d(in_channels=input_size[0], out_channels=self.channel_mult * 1, kernel_size=4, stride=2, padding=1), bn(self.channel_mult * 1), nn.LeakyReLU(slope, True),
nn.Conv2d(self.channel_mult * 1, self.channel_mult * 2, 4, 2, 1), bn(self.channel_mult * 2), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult * 2, self.channel_mult * 4, 4, 2, 1), bn(self.channel_mult * 4), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult * 4, self.channel_mult * 8, 4, 2, 1), bn(self.channel_mult * 8), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult * 8, self.channel_mult * 8, 3, 2, 1), bn(self.channel_mult * 8), nn.LeakyReLU(slope, inplace=True)])
# self.flat_fts = self.get_flat_fts(self.conv)
self.nin = self.channel_mult * 8
self.linear = nn.Sequential(nn.Linear(self.nin, output_size) )
def forward(self, x):
for il, l in enumerate(self.conv):
x = l(x)
x = torch.flatten(x, start_dim=1) # x.view(-1, self.flat_fts)
return self.linear(x)
class CNN_Encoder(nn.Module):
def __init__(self, cfg, input_size=(1, 32, 32)):
super(CNN_Encoder, self).__init__()
output_size=cfg["aez"]
self.input_size = input_size
self.channel_mult = int(64*(cfg["netSi"]+0.25*int("F" in cfg["ds"][0])))
bn = lambda x: nn.BatchNorm2d(x) if cfg["aebn"] else nn.Identity()
bn1d = lambda x: nn.BatchNorm1d(x) if cfg["aebn"] else nn.Identity()
slope=0.05
self.conv = nn.Sequential(*[nn.Conv2d(in_channels=input_size[0],out_channels=self.channel_mult*1,kernel_size=4,stride=2,padding=1), bn(self.channel_mult*1),nn.LeakyReLU(slope, True),
nn.Conv2d(self.channel_mult*1, self.channel_mult*2, 4, 2, 1), bn(self.channel_mult*2), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult*2, self.channel_mult*4, 4, 2, 1), bn(self.channel_mult*4), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult*4, self.channel_mult*8, 4, 2, 1),bn(self.channel_mult*8), nn.LeakyReLU(slope, inplace=True),
nn.Conv2d(self.channel_mult*8, self.channel_mult*8, 3, 2, 1), bn(self.channel_mult*8), nn.LeakyReLU(slope, inplace=True)])
#self.flat_fts = self.get_flat_fts(self.conv)
self.nin=self.channel_mult*8
self.linear = nn.Sequential(nn.Linear(self.nin, output_size),bn1d(output_size),nn.LeakyReLU(slope),)
def forward(self, x):
for il,l in enumerate(self.conv):
x = l(x)
x = torch.flatten(x,start_dim=1)#x.view(-1, self.flat_fts)
return self.linear(x)
class CNN_Decoder(nn.Module):
def __init__(self, cfg):
super(CNN_Decoder, self).__init__()
self.input_dim = cfg["aez"] #cfg["aecfg"]["esize"]
self.channel_mult = int(64*(cfg["netSi"]+0.25*int("F" in cfg["ds"][0])))
self.fc_output_dim = self.channel_mult*16#self.input_dim#int(64*cfg["aecfg"]["netSi"]) #cfg["aecfg"]["esize"]#128#256
self.fc = nn.Sequential(nn.Linear(self.input_dim, self.fc_output_dim),nn.BatchNorm1d(self.fc_output_dim),nn.ReLU(True))
bn = lambda x: nn.BatchNorm2d(x) if cfg["aebn"] else nn.Identity()
slope=0.05
self.deconv = nn.Sequential(*[nn.ConvTranspose2d(self.fc_output_dim,self.channel_mult * 8, 4, 2,1, bias=False), bn(self.channel_mult * 8), nn.LeakyReLU(slope),
nn.ConvTranspose2d(self.channel_mult * 8,self.channel_mult * 4, 4, 2,1, bias = False), bn(self.channel_mult * 4), nn.LeakyReLU(slope),
nn.ConvTranspose2d(self.channel_mult * 4,self.channel_mult * 2, 4, 2, 1, bias = False), bn(self.channel_mult * 2), nn.LeakyReLU(slope),
nn.ConvTranspose2d(self.channel_mult * 2,self.channel_mult * 1, 4, 2, 1, bias = False), bn(self.channel_mult * 1), nn.LeakyReLU(slope),
nn.ConvTranspose2d(self.channel_mult * 1,cfg["imCh"], 4, 2, 1, bias = False)])
def forward(self, x):
x = self.fc(x)
x = x.view(-1, self.fc_output_dim, 1, 1)
for l in self.deconv: x=l(x)
return F.tanh(x)
class AENetwork(nn.Module):
def __init__(self, cfg):
super(AENetwork, self).__init__()
self.encoder = CNN_Encoder(cfg,input_size=(cfg["imCh"],32,32))
self.decoder = CNN_Decoder(cfg)
def enc(self, x): return self.encoder(x)
def dec(self, z): return self.decoder(z)
def forward(self, x):
z = self.enc(x)#.view(-1, 784))
return self.dec(z),z
def getAEDat(netAE,dataset,encoded=True):
netAE.eval()
aencx=[]
aency = []
def nor(x): return (x - np.mean(x, axis=(0, 2, 3), keepdims=True)) / (np.std(x, axis=(0, 2, 3), keepdims=True) + 1e-7)
with tca.autocast():
with torch.no_grad():
for i, data in enumerate(dataset):
x = data[0].cuda()
selfx,code = netAE(x)
tosave=code if encoded else selfx
aencx.append(tosave.detach().cpu().numpy())
aency.append(np.copy(data[1].cpu().numpy()))
return np.concatenate(aencx,axis=0),np.concatenate(aency,axis=0)
def getAEDatIter(netAE,trdata,tedata,encoded=True,cds=None):
aetrX, aetrY=getAEDat(netAE, trdata, encoded=encoded)
aetr_iter = cds(aetrX, aetrY, True, False)
aeteX, aeteY = getAEDat(netAE, tedata, encoded=encoded)
aete_iter = cds(aeteX, aeteY, False, False)
return aetr_iter,aete_iter
def runAE(cfg,dataset,tedataset,sname,cget,picname):
getM =getAEModel
netAE, acfg = getM(cfg, dataset, sname, cget,picname)
trds, teds = getAEDat(netAE, dataset), getAEDat(netAE, tedataset)
#imgutil.makeAndStore(trds[:64], trds[:64], cfg["bFolder"] + "samples/", "AE" + picname + fs(cfg) + ".png")
return trds,teds,acfg,netAE
def decay(ccf,epoch,optimizerCl):
if ccf["opt"][0] == "S" and (epoch + 1) % (ccf["opt"][1] // 3+ccf["opt"][1]//10+2 ) == 0:
for p in optimizerCl.param_groups: p['lr'] *= 0.1
print(" D", np.round(optimizerCl.param_groups[0]['lr'],5))
def getAEModel(cfg, train_dataset, sname, cget,picname=""): #Co,val_datasetMa,resFolder
ccf=cfg["aecfg"]
netAE = AENetwork(cfg).cuda()
if cfg["aeGAN"][0]:
netD=AEDisc(cfg).cuda()
optimizerD = optim.Adam(netD.parameters(), lr=cfg["aeGAN"][1]["lr"], betas=(0.5, 0.999))
optimizerG = optim.Adam(netAE.parameters(), lr=cfg["aeGAN"][1]["lr"], betas=(0.5, 0.999))
criterion = nn.BCEWithLogitsLoss()
real_label, fake_label = 1., 0.
gloss,drloss,dfloss=0,0,0
if ccf["opt"][0] == "S": optimizerCl = optim.SGD(netAE.parameters(), lr=ccf["opt"][1], momentum=0.8, weight_decay=ccf["opt"][2])
elif ccf["opt"][0] == "A": optimizerCl = optim.Adam(netAE.parameters(), ccf["opt"][1], weight_decay=ccf["opt"][2])
else: "Error opt not found"
closs, trep, loss = 0, cfg["epA"], nn.MSELoss()#nn.CrossEntropyLoss()
print("Train AE")
scaler = tca.GradScaler()
ulo = lambda x,t,e: 0.97*x+0.03*t.item() if e>1 else 0.85*x+0.15*t.item()
torch.backends.cudnn.benchmark = True
for epoch in range(trep):
netAE.train()
for i, data in enumerate(train_dataset):
with tca.autocast():
optimizerCl.zero_grad()
x=data[0].cuda()
outAE,lo=netAE(x)
errD_real = loss(torch.flatten(outAE,1),torch.flatten(x,1))
scaler.scale(errD_real).backward()
scaler.step(optimizerCl)
scaler.update()
if cfg["aeGAN"][0]:
## Train with all-real batch
netD.zero_grad()
b_size = x.size(0)
label = torch.full((b_size,), real_label, dtype=torch.float).cuda()
outreal = netD(x).view(-1)
errD_real = criterion(outreal, label)
scaler.scale(errD_real).backward()
## Train with all-fake batch
label.fill_(fake_label)
outfake = netD(outAE.detach()).view(-1)
errD_fake = criterion(outfake, label)
scaler.scale(errD_fake).backward()
scaler.step(optimizerD)
# (2) Update G network: maximize log(D(G(z)))
optimizerG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
outAE, _ = netAE(x)
outfake = netD(outAE).view(-1)
errG = criterion(outfake, label)
scaler.scale(errG).backward()
scaler.step(optimizerG)
scaler.update()
gloss = ulo(gloss, errG, epoch)
drloss = ulo(drloss, errD_real, epoch)
dfloss = ulo(dfloss, errD_fake, epoch)
closs = ulo(closs,errD_real,epoch)
decay(ccf,epoch,optimizerCl)
netAE.eval()
if (epoch % 2 == 0 and epoch<=10) or (epoch % 10==0 and epoch>10):
print(epoch, "AE", np.round(np.array([closs]+([gloss,drloss,dfloss] if cfg["aeGAN"][0] else [])), 5), cfg["pr"])
if np.isnan(closs):
print("Failed!!!")
return None,None
lcfg = {"AELo": closs}
if cfg["aeGAN"][0]: lcfg={**lcfg,**{"glo":gloss,"drlo":drloss,"dflo":dfloss}}
netAE.eval()
return netAE, lcfg
| 10,633 | 49.398104 | 203 | py |
DoTra | DoTra-main/trainClassifiers.py | #Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
#Training of classifiers (and also DoTra on paired samples)
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.cuda.amp as tca
from classifierModels import worNet,linNet
from torch.utils.data import Dataset,TensorDataset
from doTraModel import Trans
import imgutil
niter = 1e10
def decay(ccf,epoch,optimizerCl,warmup,trep):
if epoch==warmup[0]:
for p in optimizerCl.param_groups: p['lr'] *= (warmup[1] if ccf["opt"][0] == "S" else warmup[1]/3)
print(" W", np.round(optimizerCl.param_groups[0]['lr'],5))
if ccf["opt"][0] == "S" and (epoch + 1) % int(trep// 3+10+warmup[0] ) == 0:
for p in optimizerCl.param_groups: p['lr'] *= 0.1
print(" D", np.round(optimizerCl.param_groups[0]['lr'],5))
def getSingleAcc(net, dsx, labels, pool=None):
with tca.autocast():
outputs = net(dsx)
_, predicted = torch.max(outputs.data, 1)
correct = torch.eq(predicted,labels).sum().item()
return correct
def getAcc(net, dataset, niter=10000,cfg=None):
correct,total = 0,0
net.eval()
with torch.no_grad():
for cit,data in enumerate(dataset):
with tca.autocast():
dsx,dsy = data[0].cuda(),data[1].cuda().unsqueeze(-1)
outputs = net(dsx) # if useAtt: # errD_real = loss(output[0], dsy.long())+loss(output[1], dsy.long()) # output=output[1] #prediction outputs # else:
total += dsy.size(0)
_, predicted = torch.max(outputs, 1)
correct += torch.eq(predicted, dsy.squeeze().long()).sum()
if cit>=niter: break
return float((correct*1.0/total).cpu().numpy())
def getCorr(net, dataset):
correct = []
conf=[]
net.eval()
with torch.no_grad():
for cit,data in enumerate(dataset):
with tca.autocast():
dsx,dsy = data[0].cuda(),data[1].cuda().unsqueeze(-1)
outputs = net(dsx)
preconf, predicted = torch.max(outputs, 1)
correct.append(torch.eq(predicted, dsy.squeeze().long()).detach().cpu().numpy())
conf.append(preconf.detach().cpu().numpy())
return np.concatenate(correct,axis=0),np.concatenate(conf,axis=0)
def getCls(net, dataset):
net.eval()
bx,by=[],[]
with torch.no_grad():
for cit,data in enumerate(dataset):
with tca.autocast():
dsx,dsy = data[0].cuda(),data[1].cuda().unsqueeze(-1)
outputs = net(dsx) # if useAtt: # errD_real = loss(output[0], dsy.long())+loss(output[1], dsy.long()) # output=output[1] #prediction outputs # else:
_, predicted = torch.max(outputs, 1)
by.append(predicted.detach().cpu().numpy())
bx.append(data[0].cpu().numpy())
return np.concatenate(bx,axis=0),np.concatenate(by,axis=0)
def setEval(netCl):
netCl.eval()
for name, module in netCl.named_modules():
if isinstance(module, nn.Dropout): module.p = 0
elif isinstance(module, nn.LSTM): module.dropout = 0 #print("zero lstm drop") #print("zero drop")
elif isinstance(module, nn.GRU): module.dropout = 0
def getTrans(cfg,train_dataset,val_dataset,dat,traname,cget,selfTra=False):
ccf=cfg["trcfg"]
NETWORK = Trans #if "V" in ccf["netT"] else (res.ResNet10 if ccf["netT"] == "R10" else res.ResNet18)
netCl = NETWORK(cfg).cuda()
closs, teaccs, trep, clr,telo = 0, [], cfg["epC"], ccf["opt"][1],0
loss = nn.MSELoss() if cfg["traLo"] else nn.L1Loss()
warmup = (max(2,trep//40), 10)
#if ccf["opt"][0] == "S": optimizerCl = optim.SGD(netCl.parameters(), lr=ccf["opt"][1]/warmup[1], momentum=0.8, weight_decay=ccf["opt"][2])
#elif ccf["opt"][0] == "A": #else: "Error opt not found"
optimizerCl = optim.Adam(netCl.parameters(), ccf["opt"][1], weight_decay=ccf["opt"][2])
print("Train Trans",ccf)
scaler = tca.GradScaler()
nDom=2#len(cfg["trans"])+1-cfg["nFor"] #Last for testing
inds=np.zeros(3,dtype=np.int)
torch.backends.cudnn.benchmark = True
for epoch in range(trep):
netCl.train()
for i, data in enumerate(train_dataset):
with tca.autocast():
optimizerCl.zero_grad()
xdata=data[:-1]
if cfg["singleIn"]:
inds[0] = np.random.choice(nDom - 1) # -4 = X0, -3=X1, -2=XTe, -1=XPreTest
inds[1] = inds[0]
inds[2] = inds[1] + 1
else:
inds[0]=np.random.choice(nDom - 2) #-4 = X0, -3=X1, -2=XTe, -1=XPreTest
inds[1] = inds[0] + 1
if cfg["ranCh"]: inds[1] +=np.random.choice(nDom - 2-inds[0])
inds[2]=inds[1]+1
dsx=[xdata[cind].cuda() for cind in inds] #dsy = data[1].cuda()
output = netCl(dsx[:2])
errD_real = loss(output,dsx[-1]) # errD_real.backward() # optimizerCl.step()
scaler.scale(errD_real).backward()
scaler.step(optimizerCl)
scaler.update()
closs = 0.97 * closs + 0.03 * errD_real.item() if i > 20 else 0.8 * closs + 0.2 * errD_real.item()
decay(ccf,epoch,optimizerCl,warmup,trep)
netCl.eval()
#if epoch%16==0: store(pre="Ep_" + str(epoch)+"_",dirapp="Tmp",output=output,xdata=xdata)
if (epoch % 2 == 0 and epoch<=10) or (epoch % 10==0 and epoch>10):
print(epoch, np.round(np.array([closs]),5), cfg["pr"])#teAccs[-1], clAcc(train_dataset)
if np.isnan(closs):
print("Failed!!!")
return None,None,None
def getLo(ds,off=0):
telo,nele=0,0
for i, xdata in enumerate(ds):
with tca.autocast():
with torch.no_grad():
ainds = [nDom - 3+off, nDom - 2+off, nDom - 1+off] # Shift by one to get test
dsx = [xdata[cind].cuda() for cind in ainds]
output = netCl(dsx[:2])
telo += loss(output, dsx[-1])
nele+=dsx[0].shape[0]
return (telo/nele).item()
def transform(cfg, orgTransModel, traX, traY):
def cds(X, Y, shuffle=False, norm=True):
noX = imgutil.nor(X.astype(np.float32)) if norm else X
ds = TensorDataset(torch.from_numpy(noX), torch.from_numpy(Y))
return torch.utils.data.DataLoader(ds, batch_size=cfg["batchSize"], shuffle=shuffle, num_workers=0)
cajx, cajy = [], []
orgTransModel.eval()
for data in cds(traX, traY, shuffle=False):
with tca.autocast():
# with torch.no_grad():
dsx = data[0].cuda()
out1 = orgTransModel([None, dsx])
output = orgTransModel([None, out1]).detach().cpu()
cajx.append(output.clone().numpy())
cajy.append(data[-1].clone().numpy())
return cds(np.concatenate(cajx, axis=0), np.concatenate(cajy, axis=0))
lcfg = { "trLo": closs,"tetrLo": getLo(train_dataset),"teteLo": getLo(val_dataset)}#,"D1AccTra":traAcc
setEval(netCl)
return netCl, lcfg,False
def getclassifier(cfg,train_dataset,val_dataset,sname,getc,save=False,loadCl=True,useLat=False):
print(sname,"Cl")
ccf=cfg["clcfg"]
if useLat: NETWORK = linNet
else: NETWORK = worNet #if "V" in ccf["netT"] else (res.ResNet10 if ccf["netT"] == "R10" else res.ResNet18)
netCl = NETWORK(cfg).cuda()
closs, teaccs, trep, loss, clr = 0, [], cfg["epC"], nn.CrossEntropyLoss(), ccf["opt"][1]
warmup = (max(2,trep//40), 10)
if ccf["opt"][0] == "S": optimizerCl = optim.SGD(netCl.parameters(), lr=ccf["opt"][1]/warmup[1], momentum=0.9, weight_decay=ccf["opt"][2]) #elif ccf["opt"][0] == "A": optimizerCl = optim.Adam(netCl.parameters(), ccf["opt"][2], weight_decay=ccf["opt"][3])
else: "Error opt not found"
print("Train CL",sname,ccf)
scaler = tca.GradScaler()
teAccs=[]
clAcc = lambda dataset: getAcc(netCl, dataset, niter=niter,cfg=cfg)
crolo=nn.CrossEntropyLoss()
torch.backends.cudnn.benchmark = True
for epoch in range(trep):
netCl.train()
for i, data in enumerate(train_dataset):
with tca.autocast():
optimizerCl.zero_grad()
dsx,dsy = data[0].cuda(),data[1].cuda()
output = netCl(dsx)
errD_real = crolo(output,dsy.long())
scaler.scale(errD_real).backward()
scaler.step(optimizerCl)
scaler.update()
closs = 0.97 * closs + 0.03 * errD_real.item() if i > 20 else 0.8 * closs + 0.2 * errD_real.item()
decay(ccf,epoch,optimizerCl,warmup,trep)
netCl.eval()
teAccs.append(clAcc(val_dataset))
if (epoch % 2 == 0 and epoch<=10) or (epoch % 10==0 and epoch>10):
print(epoch, np.round(np.array([closs, teAccs[-1], clAcc(train_dataset)]), 5), cfg["pr"])
if np.isnan(closs):
print("Failed!!!")
return None,None,None
mteA=np.max(np.array(teAccs))
lcfg = {"teA": teAccs[-1], "trA": clAcc(train_dataset), "Lo": closs,"mteA":mteA}
setEval(netCl)
return netCl, lcfg
def getLinCl(cfg,train_dataset,val_dataset,sname,getc,save=True,loadCl=True):
ccf=cfg["clcfg"]
from aecyc.latAEModels import LinCl
netCl = LinCl(cfg).cuda()
closs, teaccs, trep, loss, clr = 0, [], cfg["epC"], nn.CrossEntropyLoss(), ccf["opt"][1]/4 #Train just 1/2 as long
warmup = (max(2,trep//40), 10)
if ccf["opt"][0] == "S": optimizerCl = optim.SGD(netCl.parameters(), lr=clr/warmup[1], momentum=0.8, weight_decay=ccf["opt"][2]/5) #elif ccf["opt"][0] == "A": optimizerCl = optim.Adam(netCl.parameters(), ccf["opt"][2], weight_decay=ccf["opt"][3])
else: "Error opt not found"
print("Train CL",sname,ccf)
scaler = tca.GradScaler()
teAccs=[]
clAcc = lambda dataset: getAcc(netCl, dataset, niter=niter,cfg=cfg)
crolo=nn.CrossEntropyLoss()
for epoch in range(trep):
netCl.train()
for i, data in enumerate(train_dataset):
with tca.autocast():
optimizerCl.zero_grad()
dsx,dsy = data[0].cuda(),data[1].cuda()
output = netCl(dsx)
errD_real = crolo(output,dsy.long())
scaler.scale(errD_real).backward()
scaler.step(optimizerCl)
scaler.update()
closs = 0.97 * closs + 0.03 * errD_real.item() if i > 20 else 0.8 * closs + 0.2 * errD_real.item()
decay(ccf,epoch,optimizerCl,warmup,trep)
netCl.eval()
teAccs.append(clAcc(val_dataset))
if (epoch % 2 == 0 and epoch<=10) or (epoch % 10==0 and epoch>10):
print(epoch, np.round(np.array([closs, teAccs[-1], clAcc(train_dataset)]), 5), cfg["pr"])
if np.isnan(closs):
print("Failed!!!")
return None,None,None
lcfg = {"LiteA": clAcc(val_dataset), "LitrA": clAcc(train_dataset), "LiLo": closs}
setEval(netCl)
return netCl, lcfg | 11,382 | 45.461224 | 258 | py |
DoTra | DoTra-main/dutils.py | #Source code for 'Domain Transformer: Predicting Samples of Unseen, Future Domains' by Johannes Schneider, IJCNN, 2022, https://arxiv.org/abs/2106.06057; Github; https://github.com/JohnTailor/DoTra
#Licence: Use it however you like, but cite the paper :-)
from scipy import ndimage
from torch.utils.data import Dataset
import numpy as np,os,sklearn #,pickle imageio,time,
import torchvision,torch
import torchvision.transforms as transforms
def getnorm(dname):
if dname == "MNIST":
return (torch.from_numpy(np.array((0.1307), np.float32).reshape(1, 1, 1, 1)).cuda(), torch.from_numpy(np.array((0.3081), np.float32).reshape(1, 1, 1, 1)).cuda())
def getFullDS(cfg,ntrain,sname,cget):
dname=cfg["ds"][0]
trans=transforms.Compose([transforms.ToTensor()])
if dname == "MNIST":
cdat = torchvision.datasets.MNIST
cfg["imCh"] = 1
down=True
cpa="."
fname="Mnist"
if not os.path.exists(fname+"teX") or cget:
os.makedirs(cpa,exist_ok=True)
def loadStore(isTrain,ndat):
trainset = cdat(root=".", train=isTrain, download=down,transform=trans)
train_dataset = torch.utils.data.DataLoader(trainset, batch_size=ndat, num_workers=0) # cfg["batchSize"]
ds = next(iter(train_dataset))
X,y=ds[0].clone().numpy(),ds[1].clone().numpy()
print("Data stats",dname,X.shape,np.mean(X,axis=(0,2,3)),np.std(X,axis=(0,2,3)))
if (dname == "MNIST" or dname == "Fash") and cfg["imSi"]!=28:
X=[ndimage.zoom(X[i,0],cfg["imSi"]/28) for i in range(X.shape[0])]
X=np.stack(X,axis=0)
X=np.expand_dims(X,axis=1)
ds = [X, y]
ds = sklearn.utils.shuffle(*ds) # , random_state=cfg["seed"])
t=np.float16
preamb="tr" if isTrain else "te"
with open(fname + preamb+"X", "wb") as f: np.save(f, ds[0].astype(t), allow_pickle=True)
with open(fname + preamb+"Y", "wb") as f: np.save(f, ds[1].astype(np.int16), allow_pickle=True)
#return trainset
loadStore(True,ntrain)
loadStore(False, ntrain)
lo = lambda na: np.load(open(fname + na, "rb"), allow_pickle=True)
trX,trY=lo("trX"),lo("trY")
teX,teY=lo("teX"),lo("teY")
norm=getnorm(dname)
trX = (trX - norm[0].cpu().numpy()) / norm[1].cpu().numpy()
teX = (teX - norm[0].cpu().numpy()) / norm[1].cpu().numpy()
return (trX, trY), (teX, teY)#, None,norm
| 2,472 | 44.796296 | 198 | py |
OpenFWI | OpenFWI-main/pytorch_ssim.py | # From https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average) | 2,722 | 35.306667 | 104 | py |
OpenFWI | OpenFWI-main/test.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
import torch.nn as nn
from torch.utils.data import SequentialSampler
from torch.utils.data.dataloader import default_collate
import torchvision
from torchvision.transforms import Compose
import numpy as np
import utils
import network
from vis import *
from dataset import FWIDataset
import transforms as T
import pytorch_ssim
def evaluate(model, criterions, dataloader, device, k, ctx,
vis_path, vis_batch, vis_sample, missing, std):
model.eval()
label_list, label_pred_list= [], [] # store denormalized predcition & gt in numpy
label_tensor, label_pred_tensor = [], [] # store normalized prediction & gt in tensor
if missing or std:
data_list, data_noise_list = [], [] # store original data and noisy/muted data
with torch.no_grad():
batch_idx = 0
for data, label in dataloader:
data = data.type(torch.FloatTensor).to(device, non_blocking=True)
label = label.type(torch.FloatTensor).to(device, non_blocking=True)
label_np = T.tonumpy_denormalize(label, ctx['label_min'], ctx['label_max'], exp=False)
label_list.append(label_np)
label_tensor.append(label)
if missing or std:
# Add gaussian noise
data_noise = torch.clip(data + (std ** 0.5) * torch.randn(data.shape).to(device, non_blocking=True), min=-1, max=1)
# Mute some traces
mute_idx = np.random.choice(data.shape[3], size=missing, replace=False)
data_noise[:, :, :, mute_idx] = data[0, 0, 0, 0]
data_np = T.tonumpy_denormalize(data, ctx['data_min'], ctx['data_max'], k=k)
data_noise_np = T.tonumpy_denormalize(data_noise, ctx['data_min'], ctx['data_max'], k=k)
data_list.append(data_np)
data_noise_list.append(data_noise_np)
pred = model(data_noise)
else:
pred = model(data)
label_pred_np = T.tonumpy_denormalize(pred, ctx['label_min'], ctx['label_max'], exp=False)
label_pred_list.append(label_pred_np)
label_pred_tensor.append(pred)
# Visualization
if vis_path and batch_idx < vis_batch:
for i in range(vis_sample):
plot_velocity(label_pred_np[i, 0], label_np[i, 0], f'{vis_path}/V_{batch_idx}_{i}.png') #, vmin=ctx['label_min'], vmax=ctx['label_max'])
if missing or std:
for ch in [2]: # range(data.shape[1]):
plot_seismic(data_np[i, ch], data_noise_np[i, ch], f'{vis_path}/S_{batch_idx}_{i}_{ch}.png',
vmin=ctx['data_min'] * 0.01, vmax=ctx['data_max'] * 0.01)
batch_idx += 1
label, label_pred = np.concatenate(label_list), np.concatenate(label_pred_list)
label_t, pred_t = torch.cat(label_tensor), torch.cat(label_pred_tensor)
l1 = nn.L1Loss()
l2 = nn.MSELoss()
print(f'MAE: {l1(label_t, pred_t)}')
print(f'MSE: {l2(label_t, pred_t)}')
ssim_loss = pytorch_ssim.SSIM(window_size=11)
print(f'SSIM: {ssim_loss(label_t / 2 + 0.5, pred_t / 2 + 0.5)}') # (-1, 1) to (0, 1)
for name, criterion in criterions.items():
print(f' * Velocity {name}: {criterion(label, label_pred)}')
# print(f' | Velocity 2 layers {name}: {criterion(label[:1000], label_pred[:1000])}')
# print(f' | Velocity 3 layers {name}: {criterion(label[1000:2000], label_pred[1000:2000])}')
# print(f' | Velocity 4 layers {name}: {criterion(label[2000:], label_pred[2000:])}')
def main(args):
print(args)
print("torch version: ", torch.__version__)
print("torchvision version: ", torchvision.__version__)
utils.mkdir(args.output_path)
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
print("Loading data")
print("Loading validation data")
log_data_min = T.log_transform(ctx['data_min'], k=args.k)
log_data_max = T.log_transform(ctx['data_max'], k=args.k)
transform_valid_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(log_data_min, log_data_max),
])
transform_valid_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_valid_data,
transform_label=transform_valid_label
)
else:
dataset_valid = torch.load(args.val_anno)
print("Creating data loaders")
valid_sampler = SequentialSampler(dataset_valid)
dataloader_valid = torch.utils.data.DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print("Creating model")
if args.model not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal, norm=args.norm).to(device)
criterions = {
'MAE': lambda x, y: np.mean(np.abs(x - y)),
'MSE': lambda x, y: np.mean((x - y) ** 2)
}
if args.resume:
print(args.resume)
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(network.replace_legacy(checkpoint['model']))
print('Loaded model checkpoint at Epoch {} / Step {}.'.format(checkpoint['epoch'], checkpoint['step']))
if args.vis:
# Create folder to store visualization results
vis_folder = f'visualization_{args.vis_suffix}' if args.vis_suffix else 'visualization'
vis_path = os.path.join(args.output_path, vis_folder)
utils.mkdir(vis_path)
else:
vis_path = None
print("Start testing")
start_time = time.time()
evaluate(model, criterions, dataloader_valid, device, args.k, ctx,
vis_path, args.vis_batch, args.vis_sample, args.missing, args.std)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Testing time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='FCN Testing')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flatfault-b', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=int, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='split_files', help='annotation files location')
parser.add_argument('-v', '--val-anno', default='flatfault_b_val_invnet.txt', help='name of val anno')
parser.add_argument('-o', '--output-path', default='Invnet_models', help='path to parent folder to save checkpoints')
parser.add_argument('-n', '--save-name', default='fcn_l1loss_ffb', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='inverse model name')
parser.add_argument('-no', '--norm', default='bn', help='normalization layer type, support bn, in, ln (default: bn)')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Test related
parser.add_argument('-b', '--batch-size', default=50, type=int)
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--vis', help='visualization option', action="store_true")
parser.add_argument('-vsu','--vis-suffix', default=None, type=str, help='visualization suffix')
parser.add_argument('-vb','--vis-batch', help='number of batch to be visualized', default=0, type=int)
parser.add_argument('-vsa', '--vis-sample', help='number of samples in a batch to be visualized', default=0, type=int)
parser.add_argument('--missing', default=0, type=int, help='number of missing traces')
parser.add_argument('--std', default=0, type=float, help='standard deviation of gaussian noise')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 10,383 | 42.814346 | 156 | py |
OpenFWI | OpenFWI-main/gan_train.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
from torch import nn
from torch.utils.data import RandomSampler, DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torchvision.transforms import Compose
import utils
import network
from dataset import FWIDataset
from scheduler import WarmupMultiStepLR
import transforms as T
# Need to use parallel in apex, torch ddp can cause bugs when computing gradient penalty
import apex.parallel as parallel
step = 0
def train_one_epoch(model, model_d, criterion_g, criterion_d, optimizer_g, optimizer_d,
lr_schedulers, dataloader, device, epoch, print_freq, writer, n_critic=5):
global step
model.train()
model_d.train()
# Logger setup
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr_g', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('lr_d', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('samples/s', utils.SmoothedValue(window_size=10, fmt='{value:.3f}'))
header = 'Epoch: [{}]'.format(epoch)
itr = 0 # step in this epoch
max_itr = len(dataloader)
for data, label in metric_logger.log_every(dataloader, print_freq, header):
start_time = time.time()
data, label = data.to(device), label.to(device)
# Update discribminator first
optimizer_d.zero_grad()
with torch.no_grad():
pred = model(data)
loss_d, loss_diff, loss_gp = criterion_d(label, pred, model_d)
loss_d.backward()
optimizer_d.step()
metric_logger.update(loss_diff=loss_diff, loss_gp=loss_gp)
# Update generator occasionally
if ((itr + 1) % n_critic == 0) or (itr == max_itr - 1):
optimizer_g.zero_grad()
pred = model(data)
loss_g, loss_g1v, loss_g2v = criterion_g(pred, label, model_d)
loss_g.backward()
optimizer_g.step()
metric_logger.update(loss_g1v=loss_g1v, loss_g2v=loss_g2v)
batch_size = data.shape[0]
metric_logger.update(lr_g=optimizer_g.param_groups[0]['lr'],
lr_d=optimizer_d.param_groups[0]['lr'])
metric_logger.meters['samples/s'].update(batch_size / (time.time() - start_time))
if writer:
writer.add_scalar('loss_diff', loss_diff, step)
writer.add_scalar('loss_gp', loss_gp, step)
if ((itr + 1) % n_critic == 0) or (itr == max_itr - 1):
writer.add_scalar('loss_g1v', loss_g1v, step)
writer.add_scalar('loss_g2v', loss_g2v, step)
step += 1
itr += 1
for lr_scheduler in lr_schedulers:
lr_scheduler.step()
def evaluate(model, criterion, dataloader, device, writer):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
with torch.no_grad():
for data, label in metric_logger.log_every(dataloader, 20, header):
data = data.to(device, non_blocking=True)
label = label.to(device, non_blocking=True)
pred = model(data)
loss, loss_g1v, loss_g2v = criterion(pred, label)
metric_logger.update(loss=loss.item(),
loss_g1v=loss_g1v.item(), loss_g2v=loss_g2v.item())
# Gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Loss {loss.global_avg:.8f}\n'.format(loss=metric_logger.loss))
if writer:
writer.add_scalar('loss', metric_logger.loss.global_avg, step)
writer.add_scalar('loss_g1v', metric_logger.loss_g1v.global_avg, step)
writer.add_scalar('loss_g2v', metric_logger.loss_g2v.global_avg, step)
return metric_logger.loss.global_avg
def main(args):
global step
print(args)
print('torch version: ', torch.__version__)
print('torchvision version: ', torchvision.__version__)
utils.mkdir(args.output_path) # create folder to store checkpoints
utils.init_distributed_mode(args) # distributed mode initialization
# Set up tensorboard summary writer
train_writer, val_writer = None, None
if args.tensorboard:
utils.mkdir(args.log_path) # create folder to store tensorboard logs
if not args.distributed or (args.rank == 0) and (args.local_rank == 0):
train_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'train'))
val_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'val'))
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
# Create dataset and dataloader
print('Loading data')
print('Loading training data')
log_data_min = T.log_transform(ctx['data_min'], k=args.k)
log_data_max = T.log_transform(ctx['data_max'], k=args.k)
transform_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(log_data_min, log_data_max)
])
transform_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.train_anno[-3:] == 'txt':
dataset_train = FWIDataset(
args.train_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_train = torch.load(args.train_anno)
print('Loading validation data')
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_valid = torch.load(args.val_anno)
print('Creating data loaders')
if args.distributed:
train_sampler = DistributedSampler(dataset_train, shuffle=True)
valid_sampler = DistributedSampler(dataset_valid, shuffle=True)
else:
train_sampler = RandomSampler(dataset_train)
valid_sampler = RandomSampler(dataset_valid)
dataloader_train = DataLoader(
dataset_train, batch_size=args.batch_size,
sampler=train_sampler, num_workers=args.workers,
pin_memory=True, drop_last=True, collate_fn=default_collate)
dataloader_valid = DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print('Creating model')
if args.model not in network.model_dict or args.model_d not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal).to(device)
model_d = network.model_dict[args.model_d]().to(device)
if args.distributed and args.sync_bn:
model = parallel.convert_syncbn_model(model)
model_d = parallel.convert_syncbn_model(model_d)
# Define loss function
l1loss = nn.L1Loss()
l2loss = nn.MSELoss()
def criterion_g(pred, gt, model_d=None):
loss_g1v = l1loss(pred, gt)
loss_g2v = l2loss(pred, gt)
loss = args.lambda_g1v * loss_g1v + args.lambda_g2v * loss_g2v
if model_d is not None:
loss_adv = -torch.mean(model_d(pred))
loss += args.lambda_adv * loss_adv
return loss, loss_g1v, loss_g2v
criterion_d = utils.Wasserstein_GP(device, args.lambda_gp)
# Scale lr according to effective batch size
lr_g = args.lr_g * args.world_size
lr_d = args.lr_d * args.world_size
optimizer_g = torch.optim.AdamW(model.parameters(), lr=lr_g, betas=(0, 0.9), weight_decay=args.weight_decay)
optimizer_d = torch.optim.AdamW(model_d.parameters(), lr=lr_d, betas=(0, 0.9), weight_decay=args.weight_decay)
# Convert scheduler to be per iteration instead of per epoch
warmup_iters = args.lr_warmup_epochs * len(dataloader_train)
lr_milestones = [len(dataloader_train) * m for m in args.lr_milestones]
lr_schedulers = [WarmupMultiStepLR(
optimizer, milestones=lr_milestones, gamma=args.lr_gamma,
warmup_iters=warmup_iters, warmup_factor=1e-5) for optimizer in [optimizer_g, optimizer_d]]
model_without_ddp = model
model_d_without_ddp = model_d
if args.distributed:
model = parallel.DistributedDataParallel(model)
model_d = parallel.DistributedDataParallel(model_d)
model_without_ddp = model.module
model_d_without_ddp = model_d.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model']))
model_d_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model_d']))
optimizer_g.load_state_dict(checkpoint['optimizer_g'])
optimizer_d.load_state_dict(checkpoint['optimizer_d'])
args.start_epoch = checkpoint['epoch'] + 1
step = checkpoint['step']
for i in range(len(lr_schedulers)):
lr_schedulers[i].load_state_dict(checkpoint['lr_schedulers'][i])
for lr_scheduler in lr_schedulers:
lr_scheduler.milestones = lr_milestones
print('Start training')
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, model_d, criterion_g, criterion_d, optimizer_g, optimizer_d,
lr_schedulers, dataloader_train, device, epoch,
args.print_freq, train_writer, args.n_critic)
evaluate(model, criterion_g, dataloader_valid, device, val_writer)
checkpoint = {
'model': model_without_ddp.state_dict(),
'model_d': model_d_without_ddp.state_dict(),
'optimizer_g': optimizer_g.state_dict(),
'optimizer_d': optimizer_d.state_dict(),
'lr_schedulers': [scheduler.state_dict() for scheduler in lr_schedulers],
'epoch': epoch,
'step': step,
'args': args}
# Save checkpoint per epoch
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'checkpoint.pth'))
# Save checkpoint every epoch block
if args.output_path and (epoch + 1) % args.epoch_block == 0:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'model_{}.pth'.format(epoch + 1)))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='GAN Training')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flat', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=str, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='/vast/home/aicyd/Desktop/OpenFWI/src/', help='annotation files location')
parser.add_argument('-t', '--train-anno', default='train_flatvel.json', help='name of train anno')
parser.add_argument('-v', '--val-anno', default='val_flatvel.json', help='name of val anno')
parser.add_argument('-o', '--output-path', default='models', help='path to parent folder to save checkpoints')
parser.add_argument('-l', '--log-path', default='models', help='path to parent folder to save logs')
parser.add_argument('-n', '--save-name', default='gan', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='generator name')
parser.add_argument('-md', '--model-d', default='Discriminator', help='discriminator name')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Training related
parser.add_argument('-nc', '--n_critic', default=5, type=int, help='generator & discriminator update ratio')
parser.add_argument('-b', '--batch-size', default=64, type=int)
parser.add_argument('--lr_g', default=0.0001, type=float, help='initial learning rate of generator')
parser.add_argument('--lr_d', default=0.0001, type=float, help='initial learning rate of discriminator')
parser.add_argument('-lm', '--lr-milestones', nargs='+', default=[], type=int, help='decrease lr on milestones')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', default=1e-4 , type=float, help='weight decay (default: 1e-4)')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--lr-warmup-epochs', default=0, type=int, help='number of warmup epochs')
parser.add_argument('-eb', '--epoch_block', type=int, default=20, help='epochs in a saved block')
parser.add_argument('-nb', '--num_block', type=int, default=25, help='number of saved block')
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, help='start epoch')
# Loss related
parser.add_argument('-g1v', '--lambda_g1v', type=float, default=100.0)
parser.add_argument('-g2v', '--lambda_g2v', type=float, default=100.0)
parser.add_argument('-adv', '--lambda_adv', type=float, default=1.0)
parser.add_argument('-gp', '--lambda_gp', type=float, default=10.0)
# Distributed training related
parser.add_argument('--sync-bn', action='store_true', help='Use sync batch norm')
parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
# Tensorboard related
parser.add_argument('--tensorboard', action='store_true', help='Use tensorboard for logging.')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.log_path = os.path.join(args.log_path, args.save_name, args.suffix or '')
args.train_anno = os.path.join(args.anno_path, args.train_anno)
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.epochs = args.epoch_block * args.num_block
if args.resume:
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 16,662 | 43.553476 | 128 | py |
OpenFWI | OpenFWI-main/network.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import ceil
from collections import OrderedDict
NORM_LAYERS = { 'bn': nn.BatchNorm2d, 'in': nn.InstanceNorm2d, 'ln': nn.LayerNorm }
# Replace the key names in the checkpoint in which legacy network building blocks are used
def replace_legacy(old_dict):
li = []
for k, v in old_dict.items():
k = (k.replace('Conv2DwithBN', 'layers')
.replace('Conv2DwithBN_Tanh', 'layers')
.replace('Deconv2DwithBN', 'layers')
.replace('ResizeConv2DwithBN', 'layers'))
li.append((k, v))
return OrderedDict(li)
class Conv2DwithBN(nn.Module):
def __init__(self, in_fea, out_fea,
kernel_size=3, stride=1, padding=1,
bn=True, relu_slop=0.2, dropout=None):
super(Conv2DwithBN,self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if bn:
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.LeakyReLU(relu_slop, inplace=True))
if dropout:
layers.append(nn.Dropout2d(0.8))
self.Conv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.Conv2DwithBN(x)
class ResizeConv2DwithBN(nn.Module):
def __init__(self, in_fea, out_fea, scale_factor=2, mode='nearest'):
super(ResizeConv2DwithBN, self).__init__()
layers = [nn.Upsample(scale_factor=scale_factor, mode=mode)]
layers.append(nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.ResizeConv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.ResizeConv2DwithBN(x)
class Conv2DwithBN_Tanh(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1):
super(Conv2DwithBN_Tanh, self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.Tanh())
self.Conv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.Conv2DwithBN(x)
class ConvBlock(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1, norm='bn', relu_slop=0.2, dropout=None):
super(ConvBlock,self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(relu_slop, inplace=True))
if dropout:
layers.append(nn.Dropout2d(0.8))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ConvBlock_Tanh(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1, norm='bn'):
super(ConvBlock_Tanh, self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.Tanh())
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class DeconvBlock(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=2, stride=2, padding=0, output_padding=0, norm='bn'):
super(DeconvBlock, self).__init__()
layers = [nn.ConvTranspose2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ResizeBlock(nn.Module):
def __init__(self, in_fea, out_fea, scale_factor=2, mode='nearest', norm='bn'):
super(ResizeBlock, self).__init__()
layers = [nn.Upsample(scale_factor=scale_factor, mode=mode)]
layers.append(nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=3, stride=1, padding=1))
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
# FlatFault/CurveFault
# 1000, 70 -> 70, 70
class InversionNet(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, dim5=512, sample_spatial=1.0, **kwargs):
super(InversionNet, self).__init__()
self.convblock1 = ConvBlock(5, dim1, kernel_size=(7, 1), stride=(2, 1), padding=(3, 0))
self.convblock2_1 = ConvBlock(dim1, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock2_2 = ConvBlock(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock3_1 = ConvBlock(dim2, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock3_2 = ConvBlock(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock4_1 = ConvBlock(dim2, dim3, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock4_2 = ConvBlock(dim3, dim3, kernel_size=(3, 1), padding=(1, 0))
self.convblock5_1 = ConvBlock(dim3, dim3, stride=2)
self.convblock5_2 = ConvBlock(dim3, dim3)
self.convblock6_1 = ConvBlock(dim3, dim4, stride=2)
self.convblock6_2 = ConvBlock(dim4, dim4)
self.convblock7_1 = ConvBlock(dim4, dim4, stride=2)
self.convblock7_2 = ConvBlock(dim4, dim4)
self.convblock8 = ConvBlock(dim4, dim5, kernel_size=(8, ceil(70 * sample_spatial / 8)), padding=0)
self.deconv1_1 = DeconvBlock(dim5, dim5, kernel_size=5)
self.deconv1_2 = ConvBlock(dim5, dim5)
self.deconv2_1 = DeconvBlock(dim5, dim4, kernel_size=4, stride=2, padding=1)
self.deconv2_2 = ConvBlock(dim4, dim4)
self.deconv3_1 = DeconvBlock(dim4, dim3, kernel_size=4, stride=2, padding=1)
self.deconv3_2 = ConvBlock(dim3, dim3)
self.deconv4_1 = DeconvBlock(dim3, dim2, kernel_size=4, stride=2, padding=1)
self.deconv4_2 = ConvBlock(dim2, dim2)
self.deconv5_1 = DeconvBlock(dim2, dim1, kernel_size=4, stride=2, padding=1)
self.deconv5_2 = ConvBlock(dim1, dim1)
self.deconv6 = ConvBlock_Tanh(dim1, 1)
def forward(self,x):
# Encoder Part
x = self.convblock1(x) # (None, 32, 500, 70)
x = self.convblock2_1(x) # (None, 64, 250, 70)
x = self.convblock2_2(x) # (None, 64, 250, 70)
x = self.convblock3_1(x) # (None, 64, 125, 70)
x = self.convblock3_2(x) # (None, 64, 125, 70)
x = self.convblock4_1(x) # (None, 128, 63, 70)
x = self.convblock4_2(x) # (None, 128, 63, 70)
x = self.convblock5_1(x) # (None, 128, 32, 35)
x = self.convblock5_2(x) # (None, 128, 32, 35)
x = self.convblock6_1(x) # (None, 256, 16, 18)
x = self.convblock6_2(x) # (None, 256, 16, 18)
x = self.convblock7_1(x) # (None, 256, 8, 9)
x = self.convblock7_2(x) # (None, 256, 8, 9)
x = self.convblock8(x) # (None, 512, 1, 1)
# Decoder Part
x = self.deconv1_1(x) # (None, 512, 5, 5)
x = self.deconv1_2(x) # (None, 512, 5, 5)
x = self.deconv2_1(x) # (None, 256, 10, 10)
x = self.deconv2_2(x) # (None, 256, 10, 10)
x = self.deconv3_1(x) # (None, 128, 20, 20)
x = self.deconv3_2(x) # (None, 128, 20, 20)
x = self.deconv4_1(x) # (None, 64, 40, 40)
x = self.deconv4_2(x) # (None, 64, 40, 40)
x = self.deconv5_1(x) # (None, 32, 80, 80)
x = self.deconv5_2(x) # (None, 32, 80, 80)
x = F.pad(x, [-5, -5, -5, -5], mode="constant", value=0) # (None, 32, 70, 70) 125, 100
x = self.deconv6(x) # (None, 1, 70, 70)
return x
class FCN4_Deep_Resize_2(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, dim5=512, ratio=1.0, upsample_mode='nearest'):
super(FCN4_Deep_Resize_2, self).__init__()
self.convblock1 = Conv2DwithBN(5, dim1, kernel_size=(7, 1), stride=(2, 1), padding=(3, 0))
self.convblock2_1 = Conv2DwithBN(dim1, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock2_2 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock3_1 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock3_2 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock4_1 = Conv2DwithBN(dim2, dim3, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock4_2 = Conv2DwithBN(dim3, dim3, kernel_size=(3, 1), padding=(1, 0))
self.convblock5_1 = Conv2DwithBN(dim3, dim3, stride=2)
self.convblock5_2 = Conv2DwithBN(dim3, dim3)
self.convblock6_1 = Conv2DwithBN(dim3, dim4, stride=2)
self.convblock6_2 = Conv2DwithBN(dim4, dim4)
self.convblock7_1 = Conv2DwithBN(dim4, dim4, stride=2)
self.convblock7_2 = Conv2DwithBN(dim4, dim4)
self.convblock8 = Conv2DwithBN(dim4, dim5, kernel_size=(8, ceil(70 * ratio / 8)), padding=0)
self.deconv1_1 = ResizeConv2DwithBN(dim5, dim5, scale_factor=5, mode=upsample_mode)
self.deconv1_2 = Conv2DwithBN(dim5, dim5)
self.deconv2_1 = ResizeConv2DwithBN(dim5, dim4, scale_factor=2, mode=upsample_mode)
self.deconv2_2 = Conv2DwithBN(dim4, dim4)
self.deconv3_1 = ResizeConv2DwithBN(dim4, dim3, scale_factor=2, mode=upsample_mode)
self.deconv3_2 = Conv2DwithBN(dim3, dim3)
self.deconv4_1 = ResizeConv2DwithBN(dim3, dim2, scale_factor=2, mode=upsample_mode)
self.deconv4_2 = Conv2DwithBN(dim2, dim2)
self.deconv5_1 = ResizeConv2DwithBN(dim2, dim1, scale_factor=2, mode=upsample_mode)
self.deconv5_2 = Conv2DwithBN(dim1, dim1)
self.deconv6 = Conv2DwithBN_Tanh(dim1, 1)
def forward(self,x):
# Encoder Part
x = self.convblock1(x) # (None, 32, 500, 70)
x = self.convblock2_1(x) # (None, 64, 250, 70)
x = self.convblock2_2(x) # (None, 64, 250, 70)
x = self.convblock3_1(x) # (None, 64, 125, 70)
x = self.convblock3_2(x) # (None, 64, 125, 70)
x = self.convblock4_1(x) # (None, 128, 63, 70)
x = self.convblock4_2(x) # (None, 128, 63, 70)
x = self.convblock5_1(x) # (None, 128, 32, 35)
x = self.convblock5_2(x) # (None, 128, 32, 35)
x = self.convblock6_1(x) # (None, 256, 16, 18)
x = self.convblock6_2(x) # (None, 256, 16, 18)
x = self.convblock7_1(x) # (None, 256, 8, 9)
x = self.convblock7_2(x) # (None, 256, 8, 9)
x = self.convblock8(x) # (None, 512, 1, 1)
# Decoder Part
x = self.deconv1_1(x) # (None, 512, 5, 5)
x = self.deconv1_2(x) # (None, 512, 5, 5)
x = self.deconv2_1(x) # (None, 256, 10, 10)
x = self.deconv2_2(x) # (None, 256, 10, 10)
x = self.deconv3_1(x) # (None, 128, 20, 20)
x = self.deconv3_2(x) # (None, 128, 20, 20)
x = self.deconv4_1(x) # (None, 64, 40, 40)
x = self.deconv4_2(x) # (None, 64, 40, 40)
x = self.deconv5_1(x) # (None, 32, 80, 80)
x = self.deconv5_2(x) # (None, 32, 80, 80)
x = F.pad(x, [-5, -5, -5, -5], mode="constant", value=0) # (None, 32, 70, 70)
x = self.deconv6(x) # (None, 1, 70, 70)
return x
class Discriminator(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, **kwargs):
super(Discriminator, self).__init__()
self.convblock1_1 = ConvBlock(1, dim1, stride=2)
self.convblock1_2 = ConvBlock(dim1, dim1)
self.convblock2_1 = ConvBlock(dim1, dim2, stride=2)
self.convblock2_2 = ConvBlock(dim2, dim2)
self.convblock3_1 = ConvBlock(dim2, dim3, stride=2)
self.convblock3_2 = ConvBlock(dim3, dim3)
self.convblock4_1 = ConvBlock(dim3, dim4, stride=2)
self.convblock4_2 = ConvBlock(dim4, dim4)
self.convblock5 = ConvBlock(dim4, 1, kernel_size=5, padding=0)
def forward(self, x):
x = self.convblock1_1(x)
x = self.convblock1_2(x)
x = self.convblock2_1(x)
x = self.convblock2_2(x)
x = self.convblock3_1(x)
x = self.convblock3_2(x)
x = self.convblock4_1(x)
x = self.convblock4_2(x)
x = self.convblock5(x)
x = x.view(x.shape[0], -1)
return x
class Conv_HPGNN(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=None, stride=None, padding=None, **kwargs):
super(Conv_HPGNN, self).__init__()
layers = [
ConvBlock(in_fea, out_fea, relu_slop=0.1, dropout=0.8),
ConvBlock(out_fea, out_fea, relu_slop=0.1, dropout=0.8),
]
if kernel_size is not None:
layers.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class Deconv_HPGNN(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size, **kwargs):
super(Deconv_HPGNN, self).__init__()
layers = [
nn.ConvTranspose2d(in_fea, in_fea, kernel_size=kernel_size, stride=2, padding=0),
ConvBlock(in_fea, out_fea, relu_slop=0.1, dropout=0.8),
ConvBlock(out_fea, out_fea, relu_slop=0.1, dropout=0.8)
]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
model_dict = {
'InversionNet': InversionNet,
'Discriminator': Discriminator,
'UPFWI': FCN4_Deep_Resize_2
}
| 14,861 | 45.15528 | 167 | py |
OpenFWI | OpenFWI-main/vis.py | import os
import torch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
# Load colormap for velocity map visualization
rainbow_cmap = ListedColormap(np.load('rainbow256.npy'))
def plot_velocity(output, target, path, vmin=None, vmax=None):
fig, ax = plt.subplots(1, 2, figsize=(11, 5))
if vmin is None or vmax is None:
vmax, vmin = np.max(target), np.min(target)
im = ax[0].matshow(output, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
ax[0].set_title('Prediction', y=1.08)
ax[1].matshow(target, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
ax[1].set_title('Ground Truth', y=1.08)
for axis in ax:
# axis.set_xticks(range(0, 70, 10))
# axis.set_xticklabels(range(0, 1050, 150))
# axis.set_yticks(range(0, 70, 10))
# axis.set_yticklabels(range(0, 1050, 150))
axis.set_xticks(range(0, 70, 10))
axis.set_xticklabels(range(0, 700, 100))
axis.set_yticks(range(0, 70, 10))
axis.set_yticklabels(range(0, 700, 100))
axis.set_ylabel('Depth (m)', fontsize=12)
axis.set_xlabel('Offset (m)', fontsize=12)
fig.colorbar(im, ax=ax, shrink=0.75, label='Velocity(m/s)')
plt.savefig(path)
plt.close('all')
def plot_single_velocity(label, path):
plt.rcParams.update({'font.size': 16})
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
vmax, vmin = np.max(label), np.min(label)
im = ax.matshow(label, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
# im = ax.matshow(label, cmap="gist_rainbow", vmin=vmin, vmax=vmax)
# nx = label.shape[0]
# ax.set_aspect(aspect=1)
# ax.set_xticks(range(0, nx, int(150//(1050/nx)))[:7])
# ax.set_xticklabels(range(0, 1050, 150))
# ax.set_yticks(range(0, nx, int(150//(1050/nx)))[:7])
# ax.set_yticklabels(range(0, 1050, 150))
# ax.set_title('Offset (m)', y=1.08)
# ax.set_ylabel('Depth (m)', fontsize=18)
fig.colorbar(im, ax=ax, shrink=1.0, label='Velocity(m/s)')
plt.savefig(path)
plt.close('all')
# def plot_seismic(output, target, path, vmin=-1e-5, vmax=1e-5):
# fig, ax = plt.subplots(1, 3, figsize=(15, 6))
# im = ax[0].matshow(output, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[0].set_title('Prediction')
# ax[1].matshow(target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[1].set_title('Ground Truth')
# ax[2].matshow(output - target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[2].set_title('Difference')
# fig.colorbar(im, ax=ax, format='%.1e')
# plt.savefig(path)
# plt.close('all')
def plot_seismic(output, target, path, vmin=-1e-5, vmax=1e-5):
fig, ax = plt.subplots(1, 3, figsize=(20, 5))
# fig, ax = plt.subplots(1, 2, figsize=(11, 5))
aspect = output.shape[1]/output.shape[0]
im = ax[0].matshow(target, aspect=aspect, cmap='gray', vmin=vmin, vmax=vmax)
ax[0].set_title('Ground Truth')
ax[1].matshow(output, aspect=aspect, cmap='gray', vmin=vmin, vmax=vmax)
ax[1].set_title('Prediction')
ax[2].matshow(output - target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
ax[2].set_title('Difference')
# for axis in ax:
# axis.set_xticks(range(0, 70, 10))
# axis.set_xticklabels(range(0, 1050, 150))
# axis.set_title('Offset (m)', y=1.1)
# axis.set_ylabel('Time (ms)', fontsize=12)
# fig.colorbar(im, ax=ax, shrink=1.0, pad=0.01, label='Amplitude')
fig.colorbar(im, ax=ax, shrink=0.75, label='Amplitude')
plt.savefig(path)
plt.close('all')
def plot_single_seismic(data, path):
nz, nx = data.shape
plt.rcParams.update({'font.size': 18})
vmin, vmax = np.min(data), np.max(data)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
im = ax.matshow(data, aspect='auto', cmap='gray', vmin=vmin * 0.01, vmax=vmax * 0.01)
ax.set_aspect(aspect=nx/nz)
ax.set_xticks(range(0, nx, int(300//(1050/nx)))[:5])
ax.set_xticklabels(range(0, 1050, 300))
ax.set_title('Offset (m)', y=1.08)
ax.set_yticks(range(0, nz, int(200//(1000/nz)))[:5])
ax.set_yticklabels(range(0, 1000, 200))
ax.set_ylabel('Time (ms)', fontsize=18)
fig.colorbar(im, ax=ax, shrink=1.0, pad=0.01, label='Amplitude')
plt.savefig(path)
plt.close('all')
| 4,324 | 38.318182 | 89 | py |
OpenFWI | OpenFWI-main/utils.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
from collections import defaultdict, deque
import datetime
import time
import torch
import torch.distributed as dist
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn as nn
import errno
import os
import itertools
from torchvision.models import vgg16
import numpy as np
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
if isinstance(iterable, list):
length = max(len(x) for x in iterable)
iterable = [x if len(x) == length else itertools.cycle(x) for x in iterable]
iterable = zip(*iterable)
else:
length = len(iterable)
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(length))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj # <-- yield the batch in for loop
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (length - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, length, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, length, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {}'.format(header, total_time_str))
# Legacy code
class ContentLoss(nn.Module):
def __init__(self, args):
super(ContentLoss, self).__init__()
names = ['l1', 'l2']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, model, input, target):
pred = model(input)
loss_l1 = self.l1loss(target, pred)
loss_l2 = self.l2loss(target, pred)
loss = loss_l1 * self.lambda_l1 + loss_l2 * self.lambda_l2
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Legacy code
class IdenticalLoss(nn.Module):
def __init__(self, args):
super(IdenticalLoss, self).__init__()
names = ['id1s', 'id2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, model_s2v, model_v2s, input):
mid = model_s2v(input)
pred = model_v2s(mid)
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_id1s, loss_id2s = cal_loss(input, pred)
loss = loss_id1s * self.lambda_id1s + loss_id2s * self.lambda_id2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Implemented according to H-PGNN, not useful
class NMSELoss(nn.Module):
def __init__(self):
super(NMSELoss, self).__init__()
def forward(self, pred, gt):
return torch.mean(((pred - gt) / (torch.amax(gt, (-2, -1), keepdim=True) + 1e-5)) ** 2)
class CycleLoss(nn.Module):
def __init__(self, args):
super(CycleLoss, self).__init__()
names = ['g1v', 'g2v', 'g1s', 'g2s', 'c1v', 'c2v', 'c1s', 'c2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, data, label, pred_s=None, pred_v=None, recon_s=None, recon_v=None):
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_g1v, loss_g2v, loss_g1s, loss_g2s = [0] * 4
if pred_v is not None:
loss_g1v, loss_g2v = cal_loss(pred_v, label)
if pred_s is not None:
loss_g1s, loss_g2s = cal_loss(pred_s, data)
loss_c1v, loss_c2v, loss_c1s , loss_c2s = [0] * 4
if recon_v is not None:
loss_c1v, loss_c2v = cal_loss(recon_v, label)
if recon_s is not None:
loss_c1s, loss_c2s = cal_loss(recon_s, data)
loss = loss_g1v * self.lambda_g1v + loss_g2v * self.lambda_g2v + \
loss_g1s * self.lambda_g1s + loss_g2s * self.lambda_g2s + \
loss_c1v * self.lambda_c1v + loss_c2v * self.lambda_c2v + \
loss_c1s * self.lambda_c1s + loss_c2s * self.lambda_c2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Legacy code
class _CycleLoss(nn.Module):
def __init__(self, args):
super(_CycleLoss, self).__init__()
names = ['g1v', 'g2v', 'g1s', 'g2s', 'c1v', 'c2v', 'c1s', 'c2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, data, label, pred_s=None, pred_v=None, recon_s=None, recon_v=None):
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_g1v, loss_g2v, loss_g1s, loss_g2s = [0] * 4
if pred_v is not None and (self.lambda_g1v != 0 or self.lambda_g2v != 0):
loss_g1v, loss_g2v = cal_loss(pred_v, label)
if pred_s is not None and (self.lambda_g1s != 0 or self.lambda_g2s != 0):
loss_g1s, loss_g2s = cal_loss(pred_s, data)
loss_c1v, loss_c2v, loss_c1s , loss_c2s = [0] * 4
if recon_v is not None and (self.lambda_c1v != 0 or self.lambda_c2v != 0):
loss_c1v, loss_c2v = cal_loss(recon_v, label)
if recon_s is not None and (self.lambda_c1s != 0 or self.lambda_c2s != 0):
loss_c1s, loss_c2s = cal_loss(recon_s, data)
loss = loss_g1v * self.lambda_g1v + loss_g2v * self.lambda_g2v + \
loss_g1s * self.lambda_g1s + loss_g2s * self.lambda_g2s + \
loss_c1v * self.lambda_c1v + loss_c2v * self.lambda_c2v + \
loss_c1s * self.lambda_c1s + loss_c2s * self.lambda_c2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.local_rank = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ and args.world_size > 1:
args.rank = int(os.environ['SLURM_PROCID'])
args.local_rank = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.local_rank)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
setup_for_distributed(args.rank == 0)
class Wasserstein_GP(nn.Module):
def __init__(self, device, lambda_gp):
super(Wasserstein_GP, self).__init__()
self.device = device
self.lambda_gp = lambda_gp
def forward(self, real, fake, model):
gradient_penalty = self.compute_gradient_penalty(model, real, fake)
loss_real = torch.mean(model(real))
loss_fake = torch.mean(model(fake))
loss = -loss_real + loss_fake + gradient_penalty * self.lambda_gp
return loss, loss_real-loss_fake, gradient_penalty
def compute_gradient_penalty(self, model, real_samples, fake_samples):
alpha = torch.rand(real_samples.size(0), 1, 1, 1, device=self.device)
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = model(interpolates)
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=torch.ones(real_samples.size(0), d_interpolates.size(1)).to(self.device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
# Modified from https://gist.github.com/alper111/8233cdb0414b4cb5853f2f730ab95a49
class VGGPerceptualLoss(nn.Module):
def __init__(self, resize=True):
super(VGGPerceptualLoss, self).__init__()
blocks = []
blocks.append(vgg16(pretrained=True).features[:4].eval()) # relu1_2
blocks.append(vgg16(pretrained=True).features[4:9].eval()) # relu2_2
blocks.append(vgg16(pretrained=True).features[9:16].eval()) # relu3_3
blocks.append(vgg16(pretrained=True).features[16:23].eval()) # relu4_3
for bl in blocks:
for p in bl:
p.requires_grad = False
self.blocks = nn.ModuleList(blocks)
self.transform = nn.functional.interpolate
self.resize = resize
self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, input, target, rescale=True, feature_layers=[1]):
input = input.view(-1, 1, input.shape[-2], input.shape[-1]).repeat(1, 3, 1, 1)
target = target.view(-1, 1, target.shape[-2], target.shape[-1]).repeat(1, 3, 1, 1)
if rescale: # from [-1, 1] to [0, 1]
input = input / 2 + 0.5
target = target / 2 + 0.5
input = (input-self.mean) / self.std
target = (target-self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
loss_l1, loss_l2 = 0.0, 0.0
x = input
y = target
for i, block in enumerate(self.blocks):
x = block(x)
y = block(y)
if i in feature_layers:
loss_l1 += self.l1loss(x, y)
loss_l2 += self.l2loss(x, y)
return loss_l1, loss_l2
def cal_psnr(gt, data, max_value):
mse = np.mean((gt - data) ** 2)
if (mse == 0):
return 100
return 20 * np.log10(max_value / np.sqrt(mse))
| 17,006 | 34.804211 | 105 | py |
OpenFWI | OpenFWI-main/dataset.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import numpy as np
from torch.utils.data import Dataset
from torchvision.transforms import Compose
import transforms as T
class FWIDataset(Dataset):
''' FWI dataset
For convenience, in this class, a batch refers to a npy file
instead of the batch used during training.
Args:
anno: path to annotation file
preload: whether to load the whole dataset into memory
sample_ratio: downsample ratio for seismic data
file_size: # of samples in each npy file
transform_data|label: transformation applied to data or label
'''
def __init__(self, anno, preload=True, sample_ratio=1, file_size=500,
transform_data=None, transform_label=None):
if not os.path.exists(anno):
print(f'Annotation file {anno} does not exists')
self.preload = preload
self.sample_ratio = sample_ratio
self.file_size = file_size
self.transform_data = transform_data
self.transform_label = transform_label
with open(anno, 'r') as f:
self.batches = f.readlines()
if preload:
self.data_list, self.label_list = [], []
for batch in self.batches:
data, label = self.load_every(batch)
self.data_list.append(data)
if label is not None:
self.label_list.append(label)
# Load from one line
def load_every(self, batch):
batch = batch.split('\t')
data_path = batch[0] if len(batch) > 1 else batch[0][:-1]
data = np.load(data_path)[:, :, ::self.sample_ratio, :]
data = data.astype('float32')
if len(batch) > 1:
label_path = batch[1][:-1]
label = np.load(label_path)
label = label.astype('float32')
else:
label = None
return data, label
def __getitem__(self, idx):
batch_idx, sample_idx = idx // self.file_size, idx % self.file_size
if self.preload:
data = self.data_list[batch_idx][sample_idx]
label = self.label_list[batch_idx][sample_idx] if len(self.label_list) != 0 else None
else:
data, label = self.load_every(self.batches[batch_idx])
data = data[sample_idx]
label = label[sample_idx] if label is not None else None
if self.transform_data:
data = self.transform_data(data)
if self.transform_label and label is not None:
label = self.transform_label(label)
return data, label if label is not None else np.array([])
def __len__(self):
return len(self.batches) * self.file_size
if __name__ == '__main__':
transform_data = Compose([
T.LogTransform(k=1),
T.MinMaxNormalize(T.log_transform(-61, k=1), T.log_transform(120, k=1))
])
transform_label = Compose([
T.MinMaxNormalize(2000, 6000)
])
dataset = FWIDataset(f'relevant_files/temp.txt', transform_data=transform_data, transform_label=transform_label, file_size=1)
data, label = dataset[0]
print(data.shape)
print(label is None)
| 3,920 | 37.441176 | 129 | py |
OpenFWI | OpenFWI-main/scheduler.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
from bisect import bisect_right
# Scheduler adopted from the original repo
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=5,
warmup_method="linear",
last_epoch=-1,
):
if not milestones == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr *
warmup_factor *
self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 2,380 | 35.075758 | 105 | py |
OpenFWI | OpenFWI-main/train.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
from torch import nn
from torch.utils.data import RandomSampler, DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torchvision.transforms import Compose
import utils
import network
from dataset import FWIDataset
from scheduler import WarmupMultiStepLR
import transforms as T
step = 0
def train_one_epoch(model, criterion, optimizer, lr_scheduler,
dataloader, device, epoch, print_freq, writer):
global step
model.train()
# Logger setup
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('samples/s', utils.SmoothedValue(window_size=10, fmt='{value:.3f}'))
header = 'Epoch: [{}]'.format(epoch)
for data, label in metric_logger.log_every(dataloader, print_freq, header):
start_time = time.time()
optimizer.zero_grad()
data, label = data.to(device), label.to(device)
output = model(data)
loss, loss_g1v, loss_g2v = criterion(output, label)
loss.backward()
optimizer.step()
loss_val = loss.item()
loss_g1v_val = loss_g1v.item()
loss_g2v_val = loss_g2v.item()
batch_size = data.shape[0]
metric_logger.update(loss=loss_val, loss_g1v=loss_g1v_val,
loss_g2v=loss_g2v_val, lr=optimizer.param_groups[0]['lr'])
metric_logger.meters['samples/s'].update(batch_size / (time.time() - start_time))
if writer:
writer.add_scalar('loss', loss_val, step)
writer.add_scalar('loss_g1v', loss_g1v_val, step)
writer.add_scalar('loss_g2v', loss_g2v_val, step)
step += 1
lr_scheduler.step()
def evaluate(model, criterion, dataloader, device, writer):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
with torch.no_grad():
for data, label in metric_logger.log_every(dataloader, 20, header):
data = data.to(device, non_blocking=True)
label = label.to(device, non_blocking=True)
output = model(data)
loss, loss_g1v, loss_g2v = criterion(output, label)
metric_logger.update(loss=loss.item(),
loss_g1v=loss_g1v.item(),
loss_g2v=loss_g2v.item())
# Gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Loss {loss.global_avg:.8f}\n'.format(loss=metric_logger.loss))
if writer:
writer.add_scalar('loss', metric_logger.loss.global_avg, step)
writer.add_scalar('loss_g1v', metric_logger.loss_g1v.global_avg, step)
writer.add_scalar('loss_g2v', metric_logger.loss_g2v.global_avg, step)
return metric_logger.loss.global_avg
def main(args):
global step
print(args)
print('torch version: ', torch.__version__)
print('torchvision version: ', torchvision.__version__)
utils.mkdir(args.output_path) # create folder to store checkpoints
utils.init_distributed_mode(args) # distributed mode initialization
# Set up tensorboard summary writer
train_writer, val_writer = None, None
if args.tensorboard:
utils.mkdir(args.log_path) # create folder to store tensorboard logs
if not args.distributed or (args.rank == 0) and (args.local_rank == 0):
train_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'train'))
val_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'val'))
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
# Create dataset and dataloader
print('Loading data')
print('Loading training data')
# Normalize data and label to [-1, 1]
transform_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(T.log_transform(ctx['data_min'], k=args.k), T.log_transform(ctx['data_max'], k=args.k))
])
transform_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.train_anno[-3:] == 'txt':
dataset_train = FWIDataset(
args.train_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_train = torch.load(args.train_anno)
print('Loading validation data')
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_valid = torch.load(args.val_anno)
print('Creating data loaders')
if args.distributed:
train_sampler = DistributedSampler(dataset_train, shuffle=True)
valid_sampler = DistributedSampler(dataset_valid, shuffle=True)
else:
train_sampler = RandomSampler(dataset_train)
valid_sampler = RandomSampler(dataset_valid)
dataloader_train = DataLoader(
dataset_train, batch_size=args.batch_size,
sampler=train_sampler, num_workers=args.workers,
pin_memory=True, drop_last=True, collate_fn=default_collate)
dataloader_valid = DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print('Creating model')
if args.model not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal).to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# Define loss function
l1loss = nn.L1Loss()
l2loss = nn.MSELoss()
def criterion(pred, gt):
loss_g1v = l1loss(pred, gt)
loss_g2v = l2loss(pred, gt)
loss = args.lambda_g1v * loss_g1v + args.lambda_g2v * loss_g2v
return loss, loss_g1v, loss_g2v
# Scale lr according to effective batch size
lr = args.lr * args.world_size
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=args.weight_decay)
# Convert scheduler to be per iteration instead of per epoch
warmup_iters = args.lr_warmup_epochs * len(dataloader_train)
lr_milestones = [len(dataloader_train) * m for m in args.lr_milestones]
lr_scheduler = WarmupMultiStepLR(
optimizer, milestones=lr_milestones, gamma=args.lr_gamma,
warmup_iters=warmup_iters, warmup_factor=1e-5)
model_without_ddp = model
if args.distributed:
model = DistributedDataParallel(model, device_ids=[args.local_rank])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model']))
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
step = checkpoint['step']
lr_scheduler.milestones=lr_milestones
print('Start training')
start_time = time.time()
best_loss = 10
chp=1
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, lr_scheduler, dataloader_train,
device, epoch, args.print_freq, train_writer)
loss = evaluate(model, criterion, dataloader_valid, device, val_writer)
checkpoint = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'step': step,
'args': args}
# Save checkpoint per epoch
if loss < best_loss:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'checkpoint.pth'))
print('saving checkpoint at epoch: ', epoch)
chp = epoch
best_loss = loss
# Save checkpoint every epoch block
print('current best loss: ', best_loss)
print('current best epoch: ', chp)
if args.output_path and (epoch + 1) % args.epoch_block == 0:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'model_{}.pth'.format(epoch + 1)))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='FCN Training')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flatfault-b', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=int, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='split_files', help='annotation files location')
parser.add_argument('-t', '--train-anno', default='flatfault_b_train_invnet.txt', help='name of train anno')
parser.add_argument('-v', '--val-anno', default='flatfault_b_val_invnet.txt', help='name of val anno')
parser.add_argument('-o', '--output-path', default='Invnet_models', help='path to parent folder to save checkpoints')
parser.add_argument('-l', '--log-path', default='Invnet_models', help='path to parent folder to save logs')
parser.add_argument('-n', '--save-name', default='fcn_l1loss_ffb', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='inverse model name')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Training related
parser.add_argument('-b', '--batch-size', default=256, type=int)
parser.add_argument('--lr', default=0.0001, type=float, help='initial learning rate')
parser.add_argument('-lm', '--lr-milestones', nargs='+', default=[], type=int, help='decrease lr on milestones')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', default=1e-4 , type=float, help='weight decay (default: 1e-4)')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--lr-warmup-epochs', default=0, type=int, help='number of warmup epochs')
parser.add_argument('-eb', '--epoch_block', type=int, default=40, help='epochs in a saved block')
parser.add_argument('-nb', '--num_block', type=int, default=3, help='number of saved block')
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('--print-freq', default=50, type=int, help='print frequency')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, help='start epoch')
# Loss related
parser.add_argument('-g1v', '--lambda_g1v', type=float, default=1.0)
parser.add_argument('-g2v', '--lambda_g2v', type=float, default=1.0)
# Distributed training related
parser.add_argument('--sync-bn', action='store_true', help='Use sync batch norm')
parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
# Tensorboard related
parser.add_argument('--tensorboard', action='store_true', help='Use tensorboard for logging.')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.log_path = os.path.join(args.log_path, args.save_name, args.suffix or '')
args.train_anno = os.path.join(args.anno_path, args.train_anno)
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.epochs = args.epoch_block * args.num_block
if args.resume:
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 14,469 | 41.558824 | 122 | py |
OpenFWI | OpenFWI-main/transforms.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
import numpy as np
import random
from sklearn.decomposition import PCA
def crop(vid, i, j, h, w):
return vid[..., i:(i + h), j:(j + w)]
def center_crop(vid, output_size):
h, w = vid.shape[-2:]
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
return crop(vid, i, j, th, tw)
def hflip(vid):
return vid.flip(dims=(-1,))
# NOTE: for those functions, which generally expect mini-batches, we keep them
# as non-minibatch so that they are applied as if they were 4d (thus image).
# this way, we only apply the transformation in the spatial domain
def resize(vid, size, interpolation='bilinear'):
# NOTE: using bilinear interpolation because we don't work on minibatches
# at this level
scale = None
if isinstance(size, int):
scale = float(size) / min(vid.shape[-2:])
size = None
return torch.nn.functional.interpolate(
vid, size=size, scale_factor=scale, mode=interpolation, align_corners=False)
def random_resize(vid, size, random_factor, interpolation='bilinear'):
# NOTE: using bilinear interpolation because we don't work on minibatches
# at this level
scale = None
r = 1 + random.random() * (random_factor - 1)
if isinstance(size, int):
scale = float(size) / min(vid.shape[-2:]) * r
size = None
else:
size = tuple([int(elem * r) for elem in list(size)])
return torch.nn.functional.interpolate(
vid, size=size, scale_factor=scale, mode=interpolation, align_corners=False)
def pad(vid, padding, fill=0, padding_mode="constant"):
# NOTE: don't want to pad on temporal dimension, so let as non-batch
# (4d) before padding. This works as expected
return torch.nn.functional.pad(vid, padding, value=fill, mode=padding_mode)
def to_normalized_float_tensor(vid):
return vid.permute(3, 0, 1, 2).to(torch.float32) / 255
def normalize(vid, mean, std):
shape = (-1,) + (1,) * (vid.dim() - 1)
mean = torch.as_tensor(mean).reshape(shape)
std = torch.as_tensor(std).reshape(shape)
return (vid - mean) / std
def minmax_normalize(vid, vmin, vmax, scale=2):
vid -= vmin
vid /= (vmax - vmin)
return (vid - 0.5) * 2 if scale == 2 else vid
def minmax_denormalize(vid, vmin, vmax, scale=2):
if scale == 2:
vid = vid / 2 + 0.5
return vid * (vmax - vmin) + vmin
def add_noise(data, snr):
sig_avg_power_db = 10*np.log10(np.mean(data**2))
noise_avg_power_db = sig_avg_power_db - snr
noise_avg_power = 10**(noise_avg_power_db/10)
noise = np.random.normal(0, np.sqrt(noise_avg_power), data.shape)
noisy_data = data + noise
return noisy_data
def log_transform(data, k=1, c=0):
return (np.log1p(np.abs(k * data) + c)) * np.sign(data)
def log_transform_tensor(data, k=1, c=0):
return (torch.log1p(torch.abs(k * data) + c)) * torch.sign(data)
def exp_transform(data, k=1, c=0):
return (np.expm1(np.abs(data)) - c) * np.sign(data) / k
def tonumpy_denormalize(vid, vmin, vmax, exp=True, k=1, c=0, scale=2):
if exp:
vmin = log_transform(vmin, k=k, c=c)
vmax = log_transform(vmax, k=k, c=c)
vid = minmax_denormalize(vid.cpu().numpy(), vmin, vmax, scale)
return exp_transform(vid, k=k, c=c) if exp else vid
# Class interface
class RandomCrop(object):
def __init__(self, size):
self.size = size
@staticmethod
def get_params(vid, output_size):
"""Get parameters for ``crop`` for a random crop.
"""
h, w = vid.shape[-2:]
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, vid):
i, j, h, w = self.get_params(vid, self.size)
return crop(vid, i, j, h, w)
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, vid):
return center_crop(vid, self.size)
class Resize(object):
def __init__(self, size):
self.size = size
def __call__(self, vid):
return resize(vid, self.size)
class RandomResize(object):
def __init__(self, size, random_factor=1.25):
self.size = size
self.factor = random_factor
def __call__(self, vid):
return random_resize(vid, self.size, self.factor)
class ToFloatTensorInZeroOne(object):
def __call__(self, vid):
return to_normalized_float_tensor(vid)
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, vid):
return normalize(vid, self.mean, self.std)
class MinMaxNormalize(object):
def __init__(self, datamin, datamax, scale=2):
self.datamin = datamin
self.datamax = datamax
self.scale = scale
def __call__(self, vid):
return minmax_normalize(vid, self.datamin, self.datamax, self.scale)
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, vid):
if random.random() < self.p:
return hflip(vid)
return vid
class Pad(object):
def __init__(self, padding, fill=0):
self.padding = padding
self.fill = fill
def __call__(self, vid):
return pad(vid, self.padding, self.fill)
class TemporalDownsample(object):
def __init__(self, rate=1):
self.rate = rate
def __call__(self, vid):
return vid[::self.rate]
class AddNoise(object):
def __init__(self, snr=10):
self.snr = snr
def __call__(self, vid):
return add_noise(vid, self.snr)
class PCD(object):
def __init__(self, n_comp=8):
self.pca = PCA(n_components=n_comp)
def __call__(self, data):
data= data.reshape((data.shape[0], -1))
feat_mean = data.mean(axis=0)
data -= np.tile(feat_mean, (data.shape[0], 1))
pc = self.pca.fit_transform(data)
pc = pc.reshape((-1,))
pc = pc[:, np.newaxis, np.newaxis]
return pc
class StackPCD(object):
def __init__(self, n_comp=(32, 8)):
self.primary_pca = PCA(n_components=n_comp[0])
self.secondary_pca = PCA(n_components=n_comp[1])
def __call__(self, data):
data = np.transpose(data, (0, 2, 1))
primary_pc = []
for sample in data:
feat_mean = sample.mean(axis=0)
sample -= np.tile(feat_mean, (sample.shape[0], 1))
primary_pc.append(self.primary_pca.fit_transform(sample))
primary_pc = np.array(primary_pc)
data = primary_pc.reshape((data.shape[0], -1))
feat_mean = data.mean(axis=0)
data -= np.tile(feat_mean, (data.shape[0], 1))
secondary_pc = self.secondary_pca.fit_transform(data)
secondary_pc = secondary_pc.reshape((-1,))
secondary_pc = pc[:, np.newaxis, np.newaxis]
return secondary_pc
class LogTransform(object):
def __init__(self, k=1, c=0):
self.k = k
self.c = c
def __call__(self, data):
return log_transform(data, k=self.k, c=self.c)
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
# def __init__(self, device):
# self.device = device
def __call__(self, sample):
return torch.from_numpy(sample)
| 8,236 | 29.394834 | 105 | py |
hurricast | hurricast-master/utils/data_processing.py | from __future__ import print_function
import pandas as pd
import math
import torch
import numpy as np
import warnings
warnings.filterwarnings('ignore')
dtype = torch.float
device = torch.device("cpu")
#allows to keep only specific columns
def select_data(data):
return data[['SID', 'NUMBER', 'ISO_TIME', 'LAT', 'LON', 'WMO_WIND', 'WMO_PRES', 'DIST2LAND', 'STORM_SPEED']]#, 'STORM_DIR', 'BASIN', 'NATURE']]
#convert columns to numeric values
#and interpolate missing values
def numeric_data(data):
for i in ['LAT', 'LON', 'WMO_WIND', 'WMO_PRES', 'DIST2LAND', 'STORM_SPEED']:
data[i]=pd.to_numeric(data[i],errors='coerce').astype('float64')
data[i]=data[i].interpolate(method='linear')
return data
#to have one-hot encoding of basin and nature of the storm
def add_one_hot(data, df0):
basin = pd.get_dummies(data['BASIN'],prefix='basin')
basin.drop(columns=['basin_ '], inplace = True)
nature = pd.get_dummies(data['NATURE'],prefix='nature')
nature.drop('nature_ ', axis=1, inplace = True)
frames = [df0, basin, nature]
df0 = pd.concat(frames, axis = 1)
print("Basin and Nature of the storm are now added and one-hot.")
return df0
#This code allows to get the maximum wind change in the last X hours.
def get_max_change(data, time, i):
t = time//3
try:
val = max(data['WMO_WIND'][i-t:i])-min(data['WMO_WIND'][i-t:i])
except:
val = 'NaN'
return val
#please specify a multiple of 3h for the time
def get_max_wind_change(data, time):
df = data
df['max_wind_change']=[get_max_change(data, time, i) for i in range(len(data))]
return df
#to use in the future: computes the wind category
def sust_wind_to_cat_one_hot(wind):
# maximum sustained wind in kt (knot)
if wind<=33: cat='TD' # <=33
elif wind<=63.: cat='TS'
elif wind <=82.: cat='H1'
elif wind <=95.: cat='H2'
elif wind <=112.: cat='H3'
elif wind <=136.: cat='H4'
elif wind > 136. : cat='H5'
else: cat = 'nan'
return cat
def sust_wind_to_cat_val(wind):
# maximum sustained wind in kt (knot)
if wind<=33: cat= 0 # <=33
elif wind<=63.: cat=1
elif wind <=82.: cat=2
elif wind <=95.: cat=3
elif wind <=112.: cat=4
elif wind <=136.: cat=5
elif wind > 136. : cat=6
else: cat = 0
return cat
def add_storm_category_one_hot(data):
df = pd.DataFrame()
df['storm_category'] = [sust_wind_to_cat_one_hot(data['WMO_WIND'][i]) for i in range(len(data))]
storm_cat = pd.get_dummies(df['storm_category'],prefix='storm_category')
#storm_cat
storm_cat.drop('storm_category_nan', axis=1, inplace=True)
frames = [data, storm_cat]
df0 = pd.concat(frames, axis = 1)
#df0.drop('storm_category', axis=1)
print("Storm category is now added and one-hot.")
return df0
def add_storm_category_val(data):
df = pd.DataFrame()
df['storm_category'] = [sust_wind_to_cat_val(data['WMO_WIND'][i]) for i in range(len(data))]
frames = [data, df]
df0 = pd.concat(frames, axis = 1)
#df0.drop('storm_category', axis=1)
return df0
def sort_storm(data, min_wind, min_steps = 5, max_steps = 120):
'''function to create dictionary of storm matrices
arguments:
data we want to cut
min_wind: the minimum wind speed to store data
'''
#get unique storm_id:
SID=pd.unique(data['SID']).tolist()
#remove empty SID
#if not dropna: SID.remove(' ')
#create empty dictionary
dict0={}
ind = 0
for i in range(len(SID)):
#get data of a particular SID
M = data.loc[data['SID'] == SID[i]]
#cut off using min wind speed
#TODO : cut everything before, ie look for the right date
try:
t = M.index[M['WMO_WIND']>= min_wind][0]
t0 = M.index[0]
except:
t = 0
N = M.loc[M['WMO_WIND'] >= min_wind]
#save matrix in dict0
if N.shape[0] > min_steps:
ind+=1
dict0.update({ind:M.iloc[t-t0:max_steps+t-t0]})
print("The dictionary of storms has been created.")
return dict0
#Geographical difference features: i.e. feature_1(t) = feature(t)-feature(0)
# features: LAT, LON, DIST2LAND
def geo_diff(dict0):
dict1={}
#loop over each dataframe
for i in dict0:
df=dict0[i]
#reset index
df.reset_index(inplace=True, drop=True)
#calculate difference from t=0
df['LAT_1']= df['LAT'] - df['LAT'][0]
df['LON_1']= df['LON'] - df['LON'][0]
df['DIST2LAND_1']= df['DIST2LAND'] - df['DIST2LAND'][0]
#substitute back to the dictionary
dict1[i]=df
return dict1
#instead of padding with 0, pad with latest values in loop
def pad_traj(dict0, max_steps, nan = False):
dict1={}
for t in dict0:
num_steps = dict0[t].shape[0]
steps2add = max_steps - num_steps
if steps2add > 0:
if nan:
dict1[t] = pd.concat([dict0[t], pd.DataFrame([[np.nan] * dict0[t].shape[1]]*steps2add, columns=dict0[t].columns)], ignore_index=True)
else:
dict1[t] = pd.concat([dict0[t], pd.DataFrame([[0] * dict0[t].shape[1]]*steps2add, columns=dict0[t].columns)], ignore_index=True)
#In fact it happens to be easier to make the change afterwards with repad
#dict1[t] = pd.concat([dict0[t], pd.DataFrame([dict0[t].tail(1)]*steps2add, columns=dict0[t].columns)], ignore_index=True)
else:
dict1[t] = dict0[t][:max_steps]
print("The trajectories have now been padded.")
return dict1
def get_distance_km(lon1, lat1, lon2, lat2):
'''
Using haversine formula (https://www.movable-type.co.uk/scripts/latlong.html)
'''
R=6371e3 # meters (earth's radius)
phi_1=math.radians(lat1)
phi_2 = math.radians(lat2)
delta_phi=math.radians(lat2-lat1)
delta_lambda=math.radians(lon2-lon1)
a=np.power(math.sin(delta_phi/2),2) + math.cos(phi_1)*math.cos(phi_2)\
* np.power(math.sin(delta_lambda/2),2)
c= 2 * math.atan2(math.sqrt(a),math.sqrt(1-a))
return R*c/1000.
#compute the displacement from t=0
def add_displacement_distance(dict0):
dict1={}
#loop over each dataframe
for i in dict0:
df=dict0[i]
#reset index
df.reset_index(inplace=True, drop=True)
#calculate difference from t=0
df['DISPLACEMENT'] = 0
for j in range(1,len(df)):
d = get_distance_km(df['LON'][j-1], df['LAT'][j-1], df['LON'][j], df['LAT'][j])
if d > 500: d=0
df['DISPLACEMENT'][j] = d
dict1[i]=df
return dict1
def add_displacement_lat_lon2(dict0):
dict1={}
#loop over each dataframe
for i in dict0:
df=dict0[i]
#reset index
df.reset_index(inplace=True, drop=True)
lst_lat = [0]
lst_lon = [0]
for j in range(1,len(df)):
d_lat = df['LAT'][j] - df['LAT'][j-1]
d_lon = df['LON'][j] - df['LON'][j-1]
lst_lat.append(d_lat)
lst_lon.append(d_lon)
df['DISPLACEMENT_LAT'] = lst_lat
df['DISPLACEMENT_LON'] = lst_lon
dict1[i]=df
return dict1
#function to calculate tensor shape
#input: dictionary of storm data
def tensor_shape(dict0):
#number of storms
num_storms=len(dict0) - 1
#number of features
num_features=dict0[next(iter(dict0))].shape[1]
#to compute min and max number of steps
t_max = 0 #initialise
t_min = 1000
t_hist = []
for i in dict0:
t0 = dict0[i].shape[0]
t_hist.append(t0)
if t0 > t_max:
t_max = t0
if t0 < t_min:
t_min = t0
print("There are %s storms with %s features, and maximum number of steps is %s and minimum is %s." %(num_storms,num_features,t_max, t_min))
return num_storms, num_features, t_max, t_min, t_hist
#create a tensor
def create_tensor(data, number_of_storms):
tensor = data[1]
for i in range(2,number_of_storms,1):
tensor=np.dstack((tensor, data[i]))
#return list of features
p_list = data[1].columns.tolist()
print("The tensor has now been created.")
return tensor, p_list
def repad(t):
for i in range(t.shape[0]):
if t[i][2][-1] == 0:
ind = np.argmin(t[i][2])
for j in range(ind,t.shape[2]):
t[i,:,j]=t[i,:,ind-1]
return t
def prepare_data(path = "/data/ibtracs.last3years.list.v04r00.csv", max_wind_change = 12, min_wind = 50, min_steps = 15, max_steps = 120, secondary = False, one_hot=False, dropna = False):
data = pd.read_csv(path)
#select interesting columns
df0 = select_data(data)
#transform data from String to numeric
df0 = numeric_data(df0)
#if dropna: df0 = df0.dropna()
#add one_hot columns:
if one_hot:
#add one-hot storm category
#df0 = add_storm_category_val(df0)
df0 = add_storm_category_one_hot(df0)
#transform basin and nature of the storm into one-hot vector
df0 = add_one_hot(data, df0)
if secondary:
#add the max-wind-change column
df0 = get_max_wind_change(df0, max_wind_change)
#get a dict with the storms with a windspeed greater to a threshold
storms = sort_storm(df0, min_wind, min_steps)
#pad the trajectories to a fix length
d = pad_traj(storms, max_steps)
#print(d)
if secondary:
#d = add_displacement_distance(d)
d = add_displacement_lat_lon2(d)
#print the shape of the tensor
m, n, t_max, t_min, t_hist = tensor_shape(d)
#create the tensor
t, p_list = create_tensor(d, m)
#delete id and number of the storms
t2 = torch.Tensor(t[:,3:,:].astype('float64'))
#match feature list
p_list = p_list[3:]
#transpose time and sample
t3 = torch.transpose(t2,0,2)
#replace 0 by latest values in the tensor
t3 = repad(t3)
return t3, p_list
def prepare_data2(path = "./data/ibtracs.last3years.list.v04r00.csv", max_wind_change = 12, min_wind = 50, min_steps = 15, max_steps = 120, secondary = False, one_hot=False, dropna = False):
data = pd.read_csv(path)
#select interesting columns
df0 = select_data(data)
#transform data from String to numeric
df0 = numeric_data(df0)
#if dropna: df0 = df0.dropna()
#add one_hot columns:
if one_hot:
#add one-hot storm category
#df0 = add_storm_category_val(df0)
df0 = add_storm_category_one_hot(df0)
#transform basin and nature of the storm into one-hot vector
df0 = add_one_hot(data, df0)
if secondary:
#add the max-wind-change column
df0 = get_max_wind_change(df0, max_wind_change)
#get a dict with the storms with a windspeed greater to a threshold
storms = sort_storm(df0, min_wind, min_steps)
#pad the trajectories to a fix length
d = pad_traj(storms, max_steps)
#print(d)
if secondary:
#d = add_displacement_distance(d)
d = add_displacement_lat_lon2(d)
#print the shape of the tensor
m, n, t_max, t_min, t_hist = tensor_shape(d)
#create the tensor
t, p_list = create_tensor(d, m)
return t[:,2:5,:]
def prepare_tabular_data_vision(path="./data/ibtracs.last3years.list.v04r00.csv", min_wind=50, min_steps=15,
max_steps=120, get_displacement=True):
data = pd.read_csv(path)
# select interesting columns
df0 = select_data(data)
# transform data from String to numeric
df0 = numeric_data(df0)
df0 = df0[['SID', 'ISO_TIME', 'LAT', 'LON', 'WMO_WIND', 'WMO_PRES']]
# get a dict with the storms with a windspeed and number of timesteps greater to a threshold
storms = sort_storm(df0, min_wind, min_steps)
# pad the trajectories to a fix length
d = pad_traj(storms, max_steps)
# print(d)
if get_displacement:
d = add_displacement_lat_lon2(d)
# print the shape of the tensor
m, n, t_max, t_min, t_hist = tensor_shape(d)
# create the tensor
t, p_list = create_tensor(d, m)
#put t in format storm * timestep * features
e = t.transpose((2, 0, 1))
for tt in e:
try:
tt[0] = datetime.strptime(tt[0], "%Y-%m-%d %H:%M:%S")
except:
pass
return e[:, :, 1:], d
| 12,412 | 30.585242 | 190 | py |
DialogID | DialogID-main/src/auto_text_classifier/atc/models/hf_base.py |
import torch
import torch.nn.functional as F
import torch.nn as nn
import os
import copy
import numpy as np
import pandas as pd
import random
import datetime
from tqdm import tqdm, trange
from transformers import BertConfig
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers.data.data_collator import default_data_collator
from keras.preprocessing.sequence import pad_sequences
from transformers import BertForSequenceClassification, BertModel, BertTokenizer, AutoTokenizer, AutoModelForSequenceClassification
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from atc.utils.data_utils import init_dir
from atc.models.base_model import BaseModel
from atc.utils.metrics_utils import get_model_metrics
from atc.utils.data_utils import load_df, load_df_1
from transformers import AutoConfig
from atc.utils.adt_utils import *
from atc.utils.data_utils import DFDataset
import gc
import time
import sys
try:
from apex import amp # noqa: F401
_has_apex = True
except ImportError:
_has_apex = False
def is_apex_available():
return _has_apex
def get_model_report(preds, labels, num_labels, multi_label=False):
# 多标签
if multi_label:
pred_list_01 = HFBase.transfer_01(preds)
correct_num = 0
for i in range(len(pred_list_01)):
if sum(pred_list_01[i] == labels[i]) == num_labels:
correct_num += 1
acc = correct_num / len(labels)
return {"Accuracy": acc}
#
if num_labels != 2:
# 多分类
pred_flat = np.argmax(preds, axis=1)
acc = np.sum(pred_flat == labels) / len(labels)
return {"Accuracy": acc}
else:
# 二分类
y_pred = preds[:, 1]
return get_model_metrics(y_true=labels, y_pred=y_pred)
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
class HFBase(BaseModel):
def __init__(self, config):
super().__init__(config)
self.model = None
self.tokenizer = self.get_tokenizer()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model_path = self.save_dir
self.config = config
self.adt_emb_name = config.get("adt_emb_name","emb")
self.adt_epsilon = config.get("adt_epsilon",1)
#
if self.pos_weight:
self.pos_counts_dict = self.get_pos_count() # 训练集中各个类别的样本数
self.pos_weight = self.get_pos_weight(self.pos_counts_dict) # loss中各个类别的权重
else:
self.pos_weight = None
#
if self.focal_loss == 1:
self._loss_fun = FocalLoss(logits=True, multilabel=self.multi_label)
print("Training use focal loss ~~")
elif self.supcon_loss == 1:
self._loss_fun = SupConLoss(config["num_labels"])
print("Training use supcon loss ~~")
elif self.triplet_loss == 1:
self._loss_fun = TripletLoss()
print("Training use triplet loss ~~")
elif self.multi_label:
self._loss_fun = nn.BCEWithLogitsLoss(reduction="sum", pos_weight=self.pos_weight)
print("Training use BCEWithLogitsLoss ~~, weight {}".format(self.pos_weight))
else:
self._loss_fun = nn.CrossEntropyLoss(weight=self.pos_weight) # 默认损失函数未交叉熵损失,不配置权重
print("Training use origin loss ~~, weight {}".format(self.pos_weight))
#
self.model_to_save = None
def get_pos_count(self):
"""
计算训练集中每个类别的数量
"""
df_tmp = pd.read_csv(self.train_dir)
label_count_dict = dict() # {label: count}
if "label_index" in df_tmp.columns.tolist():
multilabel_list = df_tmp["label_index"].tolist()
for label_list in multilabel_list:
label_list = eval(label_list)
for label in label_list:
if label in label_count_dict:
label_count_dict[label] += 1
else:
label_count_dict[label] = 1
else:
label_list = df_tmp["label"].tolist()
for label in label_list:
if label in label_count_dict:
label_count_dict[label] += 1
else:
label_count_dict[label] = 1
#
return label_count_dict
def get_pos_weight(self, pos_counts_dict):
"""
计算loss中各个类别的权重
weight[i] = min(counts) / counts[i] 最少样本的类别权重为1,其余类别样本越多权重越低
"""
pos_counts_list = [0] * len(pos_counts_dict)
for index, count in pos_counts_dict.items():
pos_counts_list[index] = count
pos_weight = [max(pos_counts_list) / count for count in pos_counts_list]
#
return torch.Tensor(pos_weight).to(self.device)
def get_tokenizer(self):
raise NotImplementedError
def get_data_generator(self, data, shuffle=False, num_workers=1):
data = load_df_1(data)
dataset = DFDataset(data,
tokenizer=self.tokenizer,
max_len=self.max_len,
multi_label=self.multi_label,
num_labels=self.num_labels)
data_dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
shuffle=shuffle,
collate_fn=default_data_collator,
batch_size=self.batch_size,
)
return data_dataloader
def get_inputs(self, batch):
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(self.device)
return batch
def process_data(self, train_path, dev_path, test_path):
train_generator = self.get_data_generator(train_path, shuffle=True)
dev_generator = self.get_data_generator(dev_path)
test_generator = self.get_data_generator(test_path)
return train_generator, dev_generator, test_generator
def init_model(self):
print("HFBase init")
try:
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_dir,
num_labels=self.num_labels)
#output_hidden_states=True)
except:
config = self.get_config()
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_dir,
config=config)
#output_hidden_states=True)
def train(self, train_path, dev_path, test_path):
self.set_seed(self.seed) # 为了可复现
train_generator, dev_generator, test_generator = self.process_data(
train_path, dev_path, test_path)
self.init_model()
self.model = self.model.to(self.device)
self.optimizer = AdamW(self.model.parameters(),
lr=self.lr, # args.learning_rate - default is 5e-5, our notebook had 2e-5
# args.adam_epsilon - default is 1e-8.
eps=1e-8
)
if not self.fp16 is None:
if not is_apex_available():
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level=self.fp16)
print("train model use fp16")
self.train_model(train_generator,
dev_generator, test_generator)
## load best model
self.load_model(self.save_dir)
return self.evaluate(test_path)
def get_sentence_embedding(self, text):
"""
使用当前模型预测text,获得embedding
"""
pass
def get_label_attention_sentence_embedding(self):
"""
使用模型同时对sentence和label进行预测,求word的attention再合并成sentence embedding
https://arxiv.org/pdf/1805.04174.pdf
"""
pass
def load_model(self, model_path):
# 有一些模型必须要指定num_labels,例如bart,但是有一些模型有没有这个参数,因此这里首先判断。和init_model很类似
try:
self.model = AutoModelForSequenceClassification.from_pretrained(model_path,
num_labels=self.num_labels)
except:
self.model = AutoModelForSequenceClassification.from_pretrained(model_path)
# Copy the model to the GPU.
self.model = self.model.to(self.device)
def _eval_model(self, dataloader, have_label=False):
self.model.eval()
total_loss = 0
pred_list = []
label_list = []
# Predict
batch_num = 0
for batch in tqdm(dataloader):
batch_num += 1
inputs = self.get_inputs(batch)
with torch.no_grad():
# Forward pass, calculate logit predictions
outputs = self.model(**inputs)
total_loss += outputs[0].item()
if self.multi_label:
pred = torch.sigmoid(outputs['logits']).detach().cpu().numpy()
else:
pred = F.softmax(outputs['logits']).detach().cpu().numpy()
pred_list.append(pred)
label_list.append(batch['labels'].detach().cpu().numpy())
#
y_pred = np.concatenate(pred_list)
labels = np.concatenate(label_list)
#
if have_label:
loss = total_loss/batch_num
else:
loss, labels = None, None
#
return loss, y_pred, labels
def demo(self, text, softmax_b=False):
"""
对单条数据进行预测
"""
if text.count("[SEP]") == 1:
text1, text2 = text.split("[SEP]")
else:
text1 = text
text2 = None
#
encoding = self.tokenizer.encode_plus(
text1,
text2,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length',
return_token_type_ids=True,
truncation=True
)
for k, v in encoding.items():
v = torch.Tensor([v]).long()
encoding[k] = v.to(self.device)
#
self.model.eval()
with torch.no_grad():
outputs = self.model(**encoding)
if self.multi_label:
preds = torch.sigmoid(outputs['logits']).detach().cpu().numpy()
else:
preds = F.softmax(outputs['logits']).detach().cpu().numpy()
#
if self.config.get("后处理", False):
preds = self.post_thresholds(preds)
#
if softmax_b:
return preds
#
pred = []
if self.multi_label:
for p in preds.tolist()[0]:
if p >= 0.5:
pred.append(1)
else:
pred.append(0)
else:
pred = preds.argmax()
return pred
@staticmethod
def transfer_01(preds, threshold=0.5):
pred_list = []
if type(preds) != list:
preds = preds.tolist()
#
for p in preds:
pred = []
for val in p:
if val >= threshold:
pred.append(1)
else:
pred.append(0)
#
pred_list.append(np.array(pred))
#
return np.array(pred_list)
def demo_text_list(self, text_list, softmax_b=False):
"""
对多条数据进行预测
"""
df = pd.DataFrame({"text": text_list})
dataloader = self.get_data_generator(df, shuffle=False)
_,preds,_ = self._eval_model(dataloader, have_label=False)
#
# df1 = pd.DataFrame()
# df1["softmax"] = preds.tolist()
model_name = self.model_dir.split("/")[-2]
# df1.to_csv("/data1/sp/jupyter_data/exercise/output/softmax_result_{}_{}.csv".format(model_name, self.date))
#
if self.config.get("后处理", False):
preds = self.post_thresholds(preds)
#
if softmax_b:
return preds
#
pred_list = []
if self.multi_label:
# 多标签
pred_list = self.transfer_01(preds)
else:
if self.num_labels == 2:
# 二分类
pred_list = preds[:, 1]
else:
# 多分类
pred_list = np.argmax(preds, axis=1).flatten()
#
return pred_list
def post_thresholds(self, input_preds):
# 对每个位置,如果这个地方的数字没有大于阈值,则最后一位+1
# 如果大于阈值则还是原来的样子.
# 鼓励,引导, 总结, 寒暄, 笔记,复述,复习,举例,其他
# thresholds = [0] * 9
thresholds = [0.98, 0.98, 0.99, 0.9, 0.95, 0.99, 0.99, 0.99, 0]
preds = input_preds.copy()
origin_pred_class = np.argmax(input_preds, axis=1).flatten()
for idx, i in enumerate(origin_pred_class):
if input_preds[idx, i] < thresholds[i]:
preds[idx, 8] += 0.9
return preds
def labelEncoder(self, y, nClass):
"""
将label转换成one hot矩阵
[3, 4, 1] -> [[0,0,0,1,0], [0,0,0,0,1], [0,1,0,0,0]]
"""
tmp = torch.zeros(size=(y.shape[0], nClass))
for i in range(y.shape[0]):
tmp[i][y[i]] = 1
return tmp.to(self.device)
def k_is_in(self, t, keywords):
for k in keywords:
if k in t:
return True
return False
def train_model(self, train_generator, dev_generator, test_generator):
patience_count = 0
best_eval_score = 0
best_loss = np.inf
epochs = self.epochs
output_dir = self.save_dir
total_steps = len(train_generator) * epochs
# Create the learning rate scheduler.
# It is useful to release gpu momory.
scheduler = get_linear_schedule_with_warmup(self.optimizer,
num_warmup_steps=0, # Default value in run_glue.py
num_training_steps=total_steps)
# if self.adt_type=='fgm':
# fgm = FGM(self.model)
# print("Training use fgm ~~, fgm_epsilon is {}".format(self.fgm_epsilon))
if self.adt_type=='fgm':
fgm = FGM(self.model)
print(f"Training use FGM ~~,self.adt_epsilon is {self.adt_epsilon}")
elif self.adt_type == 'pgd':
pgd = PGD(self.model)
print("Training use PGD ~~")
elif self.adt_type == 'freeat':
freeat = FreeAT(self.model)
print("Training use FreeAT ~~")
elif self.adt_type == 'freelb':
freelb = FreeLB(self.model)
print("Training use FreeLB ~~")
else:
print("Training use none adt ~~")
# Store the average loss after each epoch so we can plot them.
loss_values = []
# For each epoch...
self.set_seed(self.seed) # 为了可复现
step_num = 0
if(self.adt_type == "freeat"):
self.epochs = int(self.epochs / self.K)
for epoch_i in range(0, epochs):
print("")
print(
'======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
print("learning rate: {}".format(self.lr))
# Measure how long the training epoch takes.
t_train = time.time()
# Reset the total loss for this epoch.
total_loss = 0
# For each batch of training data...
batch_i = 0
for _, batch in tqdm(enumerate(train_generator)):
batch_i += 1
self.model.train()
step_num += 1
inputs = self.get_inputs(batch)
outputs = self.model(**inputs)
#
logit = outputs[1]
#
loss = self._loss_fun(logit, inputs['labels'])
total_loss += loss
loss.backward() # 反向传播,得到正常的grad
if batch_i % 100 == 0:
print("batch {} loss {} \n".format(batch_i, loss))
sys.stdout.flush()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
# if self.adt_type=='fgm':
# # 对抗训练
# fgm.attack(epsilon=self.fgm_epsilon) # 在embedding上添加对抗扰动
# outputs = self.model(**inputs)
# loss_adv = outputs[0]
# loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
# fgm.restore() # 恢复embedding参数
if self.adt_type=='fgm':#使用fgm对抗训练方式
#对抗训练
fgm.attack(self.adt_epsilon, self.adt_emb_name) # 在embedding上添加对抗扰动
outputs = self.model(**inputs)
loss_adv = outputs[0]
loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
fgm.restore(self.adt_emb_name) # 恢复embedding参数
#梯度下降更新参数
self.optimizer.step()
elif self.adt_type == 'pgd':#使用pgd对抗训练方式
pgd.backup_grad()
# 对抗训练
for t in range(self.K):
pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data
if t != self.K - 1:
self.optimizer.zero_grad()
else:
pgd.restore_grad()
outputs = self.model(**inputs)
loss_adv = outputs[0]
loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
pgd.restore() # 恢复embedding参数
self.optimizer.step()
elif self.adt_type == 'freeat': # 使用freeat对抗训练方式
# 对抗训练
for t in range(self.K):
freeat.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data
self.optimizer.zero_grad()
# freeat.restore_grad()
outputs = self.model(**inputs)
loss_adv = outputs[0]
loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
self.optimizer.step()
# freeat.restore() # 恢复embedding参数
elif self.adt_type == 'freelb': # 使用freelb对抗训练方式
freelb.backup_grad()
# 对抗训练
for t in range(self.K):
freelb.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data
# self._optimizer.zero_grad()
outputs = self.model(**inputs)
loss_adv = outputs[0] / self.K
loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
freelb.restore() # 恢复embedding参数
self.optimizer.step()
else:
self.optimizer.step()
# 梯度下降,更新参数
# self.optimizer.step()
# Update the learning rate.
scheduler.step()
self.model.zero_grad()
# self.eval_steps 个step进行一次效果评估(此处未执行)
if self.eval_steps is not None and self.eval_steps == step_num:
t0 = time.time()
avg_eval_loss, y_pred, labels = self._eval_model(dev_generator, have_label=True)
model_report = get_model_report(y_pred, labels, self.num_labels, self.multi_label)
eval_score = model_report[self.refit] # 选取优化的指标
# if best save self.model
if eval_score > best_eval_score:
best_eval_score = eval_score
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print(" Get best result, saving self.model to %s" % output_dir)
self.model_to_save = self.model.module if hasattr(
self.model, 'module') else self.model
self.model_to_save.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
# Report the final accuracy for this validation run.
print(" Validation {}: {:.4f},Loss :{:.4f},best_eval_loss {} is {:.4f}".format(self.refit,
eval_score,
avg_eval_loss,
self.refit,
best_eval_score))
print(" Validation took: {:}".format(
format_time(time.time() - t0)))
step_num = 0 # reset step_num
#
# 每个epoch进行一次效果评估
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_generator)
# Store the loss value for plotting the learning curve.
loss_values.append(avg_train_loss)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(
format_time(time.time() - t_train)))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
# do eval
# Put the self.model in evaluation mode--the dropout layers behave differently
# during evaluation.
avg_eval_loss, y_pred, labels = self._eval_model(dev_generator, have_label=True)
model_report = get_model_report(y_pred, labels, self.num_labels, self.multi_label)
eval_score = model_report[self.refit] # 选取优化的指标
# Report the final accuracy for this validation run.
print(" {}: {:.4f},Loss :{:.4f}".format(self.refit,eval_score,avg_eval_loss))
print(" Validation took: {:}".format(
format_time(time.time() - t0)))
# if best save self.model
if eval_score > best_eval_score + 0.001:
patience_count = 0
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Get best result, saving self.model to %s" % output_dir)
self.model_to_save = self.model.module if hasattr(
self.model, 'module') else self.model
self.model_to_save.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
best_eval_score = eval_score
else:
patience_count = patience_count + 1
if patience_count > self.patience:
print("Epoch {}:early stopping Get best result, {} did not improve from {}".format(
epoch_i + 1,self.refit,best_eval_score))
break
# 学习率衰减
self.lr *= 0.9
#
del self.optimizer
del self.model_to_save
if self.adt_type == 'fgm':
del fgm
def release(self):
# see this issue:https://github.com/huggingface/transformers/issues/1742
print("Release model")
del self.model
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
def load_raw_config(self):
'''获取原始的config'''
config = AutoConfig.from_pretrained(self.model_dir)
return config
def get_config(self):
config = self.load_raw_config()
num_labels = self.num_labels
config_dict = {"num_labels": num_labels,
"id2label": {x: "LABEL_{}".format(x) for x in range(num_labels)},
"label2id": {"LABEL_{}".format(x): x for x in range(num_labels)},
"output_hidden_states": self.config["output_hidden_states"],
"label_text_filepath": self.config["label_text_filepath"],
"max_length": self.max_len,
"model_dir": self.model_dir,
}
for k, v in config_dict.items():
setattr(config, k, v)
return config | 25,472 | 38.493023 | 131 | py |
DialogID | DialogID-main/src/auto_text_classifier/atc/models/base_model.py | import numpy as np
from atc.utils.data_utils import init_dir, load_df, DataGet
from atc.utils.metrics_utils import get_model_metrics, get_multi_class_report,refit_map
import torch
import random
import os
import pandas as pd
import traceback
from tqdm import tqdm
import time
class BaseModel():
def __init__(self, config):
self.model = None
self.config = config
self.batch_size = int(self.config.get('batch_size', 32))
self.max_len = int(self.config.get('max_len', 128))
self.epochs = int(self.config.get("epochs", 100))
self.patience = int(self.config.get("patience", 5))
#
self.save_dir = self.config.get('save_dir', "")
self.train_dir = self.config.get('train_dir', "")
self.dev_dir = self.config.get('dev_dir', "")
self.test_dir = self.config.get('test_dir', "")
#
self.model_dir = self.config.get('model_dir', "")
self.num_labels = int(self.config.get('num_labels', 2))
self.seed = int(self.config.get('seed', 0))
self.fp16 = self.config.get('fp16', None)
self.token_type_ids_disable = self.config.get(
'token_type_ids_disable', False)
if self.num_labels == 2:
refit = self.config.get('refit', 'acc') # support
self.refit = refit_map[refit]
else:
self.refit = refit_map['acc']
self.adt_type = self.config.get('adt_type',None) # adversarial_training
self.focal_loss = self.config.get('focal_loss', 0)
self.supcon_loss = self.config.get('supcon_loss', 0)
self.triplet_loss = self.config.get('triplet_loss', 0)
self.K = self.config.get('K', 3)
self.fgm_epsilon = self.config.get('fgm_epsilon', 3.5e-5)
self.lr = self.config.get('lr',2e-5)
self.eval_steps = self.config.get("eval_steps", None)
self.multi_label = self.config.get('multi_label', False)
#
self.date = time.strftime("%Y-%m-%d", time.localtime())
#
self.pos_weight = self.config.get('pos_weight', False)
#
# 是否使用模型最顶层token向量的平均embedding替换cls作为
self.mean_top_level_embedding = self.config.get(
'mean_top_level_embedding', False)
#
# 是否使用模型最顶层与label文本进行attention
self.top_level_embedding_attention_with_label = self.config.get(
'top_level_embedding_attention_with_label', False)
#
init_dir(self.save_dir)
def train(self):
"""train model use train_path
Parameters
----------
model_path: model_path
Returns
-------
report:model performance in test
"""
raise NotImplementedError
def load_model(self, model_path):
"""load model from model_path
Parameters
----------
model_path: model_path
Returns
-------
None
"""
raise NotImplementedError
def demo(self, text):
"""demo for one text
Parameters
----------
text: input text
Returns
-------
p:the probability of text
"""
raise NotImplementedError
def demo_text_list(self, text_list):
"""demo input text_list
Parameters
----------
text_list: text_list
Returns
-------
p_list:the probability of all text
"""
raise NotImplementedError
def predict(self, text):
"""
text: str
"""
return self.demo(text)
def predict_list(self, text_list):
return self.demo_text_list(text_list)
def evaluate(self, df, single_sample=False):
df = load_df(df)
y_pred = []
if single_sample:
for text in tqdm(df['text'].tolist()):
y_pred.append(self.demo(text))
else:
y_pred = self.demo_text_list(df['text'].tolist())
#
if self.multi_label:
y_pred = np.array(y_pred)
y_true = [eval(x) for x in df['label'].tolist()]
else:
y_pred = np.array(y_pred)
y_true = np.array(df['label'])
#
if self.num_labels == 2:
report = get_model_metrics(y_true, y_pred)
else:
report = get_multi_class_report(y_true, y_pred)
return report
def release(self):
pass
def set_seed(self, seed=-1):
if seed != -1:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def train_cv(self, df, cv):
df = load_df(df)
data_get = DataGet(df=df,n_splits=cv,random_state=self.seed)
root_dir = self.save_dir
report_list = []
try:
for kf_i in range(cv):
print("Start cv {}/{}".format(kf_i+1,cv))
self.save_dir = os.path.join(root_dir, str(kf_i))
df_train, df_dev, df_test = data_get.get_data(kf_i=kf_i)
report = self.train(df_train, df_dev, df_test)
report['kf_i'] = kf_i
report_list.append(report)
print("Finish cv {}/{}".format(kf_i+1,cv))
self.release()
except Exception as e:
print(traceback.format_exc())
finally:
self.save_dir = root_dir # 避免修改全局变量
return pd.DataFrame(report_list)
def eval_cv(self, df, cv):
df = load_df(df)
root_dir = self.save_dir
try:
kf_name_list = []
for kf_i in range(cv):
print("Start cv {}/{}".format(kf_i+1,cv))
model_dir = os.path.join(root_dir, str(kf_i))
_ = self.load_model(model_dir)
kf_name = 'kf_{}'.format(kf_i)
kf_name_list.append(kf_name)
df[kf_name] = self.predict_list(df['text'].tolist())
self.release()
print("Finish cv {}/{}".format(kf_i+1,cv))
df['kf_avg'] = df[kf_name_list].mean(axis=1)
except Exception as e:
print(traceback.format_exc())
finally:
self.save_dir = root_dir # 避免修改全局变量
return df
| 6,316 | 32.247368 | 87 | py |
DialogID | DialogID-main/src/auto_text_classifier/atc/models/aml.py | import os
import copy
import time
import pandas as pd
import numpy as np
from tqdm import tqdm
from keras.layers import Lambda, Dense
from atc.utils.data_utils import init_dir
from atc.models.base_model import BaseModel
from atc.utils.metrics_utils import get_model_metrics,get_multi_class_report
from atc.utils.data_utils import load_df
from atc.configs.aml_config import model_dict, default_model_list
from atc.utils.data_utils import load_df
import traceback
import json
class AML():
def __init__(self, save_dir, config={}):
self.model_dict = model_dict
self.save_dir = save_dir
self.config = config
self.batch_size = int(self.config.get('batch_size', 32))
self.max_len = int(self.config.get('max_len', 128))
self.epochs = int(self.config.get("epochs",100))
self.patience = int(self.config.get("patience", 5))
self.num_labels = int(self.config.get('num_labels',2))
init_dir(self.save_dir)
def get_model_config(self, model_name):
model_class = copy.deepcopy(self.model_dict[model_name]['model_class'])
config = copy.deepcopy(self.model_dict[model_name]['config'])
return model_class, config
def __evaluate_one_model(self, model, df, model_name, data_set):
df = load_df(df)
# add time
tic = time.time()
y_pred = model.demo_text_list(df['text'].tolist())
toc = time.time()
# cal avg time
avg_time_s = (toc-tic)/df.shape[0]
# get report
#
if model.multi_label:
y_pred = np.array(y_pred)
y_true = [eval(x) for x in df['label'].tolist()]
else:
y_pred = np.array(y_pred)
y_true = np.array(df['label'])
#
if self.num_labels == 2:
report = get_model_metrics(y_true, y_pred)
else:
report = get_multi_class_report(y_true, y_pred)
report['model_name'] = model_name
report['data_set'] = data_set
report['avg_time_s'] = avg_time_s
return report
def __check_model_list(self, model_list):
if len(model_list) == 0:
return default_model_list
for model_name in model_list:
if model_name not in self.model_dict:
raise Exception(
"model:{} is not support now!".format(model_name))
return model_list
def __get_one_model(self, model_name, df_train, df_dev, df_test, train=True):
model_class, config = self.get_model_config(model_name)
config.update(self.config)
config['save_dir'] = os.path.join(self.save_dir, model_name)
print("config is :{}".format(config))
model = model_class(config)
if train:
print('Training...')
print("Start train {}".format(model_name))
_ = model.train(df_train, df_dev, df_test)
print("release after train")
else:
print("Load model")
model.load_model(model.model_path)
print("Load finish")
return model
def __get_report(self, train_path, dev_path, test_path, model_list=[], train=True):
model_list = self.__check_model_list(model_list)
# load data
df_train = load_df(train_path)
df_dev = load_df(dev_path)
df_test = load_df(test_path)
# train or eval all model
self.all_report = []
for model_name in tqdm(model_list):
try:
# get model
model = self.__get_one_model(
model_name, df_train, df_dev, df_test, train=train)
# get dev/test report
dev_report = self.__evaluate_one_model(
model, df_dev, model_name, "dev")
test_report = self.__evaluate_one_model(
model, df_test, model_name, "test")
# append report to list
self.all_report.append(dev_report)
self.all_report.append(test_report)
# release
model.release()
print("model_name:{} eval finish!,dev_report:{},test_report:{}".format(
model_name, dev_report, test_report))
except:
print("model_name:{},fail,detail is {}".format(model_name,traceback.format_exc()))
if self.num_labels == 2:
df_report = pd.DataFrame(self.all_report)
cols = ["Accuracy", "Precision", "Recall",
"F_meansure", "AUC_Value", "avg_time_s"]
df_report_table = df_report.pivot_table(
index=["data_set", "model_name"], values=cols)[cols]
else:
df_report_table = pd.concat(self.all_report)
return df_report_table
def fit(self, train_path, dev_path, test_path, model_list=[]):
"""等价于train()
"""
df_report = self.__get_report(
train_path, dev_path, test_path, model_list=model_list, train=True)
return df_report
def train(self, train_path, dev_path, test_path, model_list=[]):
'''等价于fit()'''
return self.fit(train_path, dev_path, test_path, model_list=model_list)
def evaluate(self, df_path, model_list):
'''在df_path上使用model_list进行评估,返回结果。'''
model_list = self.__check_model_list(model_list)
train = False
df = load_df(df_path)
all_report = []
for model_name in tqdm(model_list):
try:
# get model
model = self.__get_one_model(
model_name, df_train=None, df_dev=None, df_test=None, train=train)
model_report = self.__evaluate_one_model(model, df, model_name, "")
all_report.append(model_report)
# release
model.release()
print("model_name:{} eval finish!,model_report:{}".format(
model_name, model_report))
except:
print("model_name:{},fail,detail is {}".format(model_name,traceback.format_exc()))
if self.num_labels==2:
cols = ["model_name", "Accuracy", "Precision",
"Recall", "F_meansure", "AUC_Value", "avg_time_s"]
df_report = pd.DataFrame(all_report)[cols]
else:
df_report = pd.concat(all_report)
return df_report
def get_list_result(self, df_list, model_list):
'''获取所有模型的输出结果'''
model_list = self.__check_model_list(model_list)
train = False
df_list = [load_df(x) for x in df_list]
for model_name in tqdm(model_list):
# get model
try:
model = self.__get_one_model(
model_name, df_train=None, df_dev=None, df_test=None, train=train)
for df in df_list:
df[model_name] = model.predict_list(df['text'].tolist())
# release
model.release()
except:
print("model_name:{},fail,detail is {}".format(model_name,traceback.format_exc()))
return df_list
def pred_model_list(self, df_list, model_list):
'''输入df list和model list,返回每个模型在每个df中的预测结果'''
return self.get_list_result(df_list,model_list) | 7,288 | 38.61413 | 98 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.