repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
DSLA-DSLA | DSLA-DSLA/mmdet/models/roi_heads/mask_heads/feature_relay_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.builder import HEADS
@HEADS.register_module()
class FeatureRelayHead(BaseModule):
"""Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
in_channels (int, optional): number of input channels. Default: 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Default: 256.
roi_feat_size (int, optional): roi feat size at box head. Default: 7.
scale_factor (int, optional): scale factor to match roi feat size
at mask head. Default: 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels=1024,
out_conv_channels=256,
roi_feat_size=7,
scale_factor=2,
init_cfg=dict(type='Kaiming', layer='Linear')):
super(FeatureRelayHead, self).__init__(init_cfg)
assert isinstance(roi_feat_size, int)
self.in_channels = in_channels
self.out_conv_channels = out_conv_channels
self.roi_feat_size = roi_feat_size
self.out_channels = (roi_feat_size**2) * out_conv_channels
self.scale_factor = scale_factor
self.fp16_enabled = False
self.fc = nn.Linear(self.in_channels, self.out_channels)
self.upsample = nn.Upsample(
scale_factor=scale_factor, mode='bilinear', align_corners=True)
@auto_fp16()
def forward(self, x):
"""Forward function."""
N, in_C = x.shape
if N > 0:
out_C = self.out_conv_channels
out_HW = self.roi_feat_size
x = self.fc(x)
x = x.reshape(N, out_C, out_HW, out_HW)
x = self.upsample(x)
return x
return None
| 1,930 | 34.759259 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/roi_heads/mask_heads/global_context_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
@HEADS.register_module()
class GlobalContextHead(BaseModule):
"""Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_convs (int, optional): number of convolutional layer in GlbCtxHead.
Default: 4.
in_channels (int, optional): number of input channels. Default: 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Default: 256.
num_classes (int, optional): number of classes. Default: 80.
loss_weight (float, optional): global context loss weight. Default: 1.
conv_cfg (dict, optional): config to init conv layer. Default: None.
norm_cfg (dict, optional): config to init norm layer. Default: None.
conv_to_res (bool, optional): if True, 2 convs will be grouped into
1 `SimplifiedBasicBlock` using a skip connection. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_weight=1.0,
conv_cfg=None,
norm_cfg=None,
conv_to_res=False,
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='fc'))):
super(GlobalContextHead, self).__init__(init_cfg)
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
@auto_fp16()
def forward(self, feats):
"""Forward function."""
x = feats[-1]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
# multi-class prediction
mc_pred = x.reshape(x.size(0), -1)
mc_pred = self.fc(mc_pred)
return mc_pred, x
@force_fp32(apply_to=('pred', ))
def loss(self, pred, labels):
"""Loss function."""
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for i, label in enumerate(labels):
targets[i, label] = 1.0
loss = self.loss_weight * self.criterion(pred, targets)
return loss
| 3,774 | 36.009804 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from warnings import warn
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer
from mmcv.ops.carafe import CARAFEPack
from mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.core import mask_target
from mmdet.models.builder import HEADS, build_loss
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
@HEADS.register_module()
class FCNMaskHead(BaseModule):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=80,
class_agnostic=False,
upsample_cfg=dict(type='deconv', scale_factor=2),
conv_cfg=None,
norm_cfg=None,
predictor_cfg=dict(type='Conv'),
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(FCNMaskHead, self).__init__(init_cfg)
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.predictor_cfg = predictor_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=upsample_in_channels,
out_channels=self.conv_out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=upsample_in_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = build_conv_layer(self.predictor_cfg,
logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
super(FCNMaskHead, self).init_weights()
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
elif isinstance(m, CARAFEPack):
m.init_weights()
elif hasattr(m, 'weight') and hasattr(m, 'bias'):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
"""
Example:
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> # There are lots of variations depending on the configuration
>>> self = FCNMaskHead(num_classes=C, num_convs=1)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> sf = self.scale_factor
>>> labels = torch.randint(0, C, size=(N,))
>>> # With the default properties the mask targets should indicate
>>> # a (potentially soft) single-class label
>>> mask_targets = torch.rand(N, H * sf, W * sf)
>>> loss = self.loss(mask_pred, mask_targets, labels)
>>> print('loss = {!r}'.format(loss))
"""
loss = dict()
if mask_pred.size(0) == 0:
loss_mask = mask_pred.sum()
else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape (Tuple): original image height and width, shape (2,)
scale_factor(ndarray | Tensor): If ``rescale is True``, box
coordinates are divided by this scale factor to fit
``ori_shape``.
rescale (bool): If True, the resulting masks will be rescaled to
``ori_shape``.
Returns:
list[list]: encoded masks. The c-th item in the outer list
corresponds to the c-th class. Given the c-th outer list, the
i-th item in that inner list is the mask for the i-th box with
class label c.
Example:
>>> import mmcv
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> self = FCNMaskHead(num_classes=C, num_convs=0)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> # Each input is associated with some bounding box
>>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)
>>> det_labels = torch.randint(0, C, size=(N,))
>>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, })
>>> ori_shape = (H * 4, W * 4)
>>> scale_factor = torch.FloatTensor((1, 1))
>>> rescale = False
>>> # Encoded masks are a list for each category.
>>> encoded_masks = self.get_seg_masks(
>>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape,
>>> scale_factor, rescale
>>> )
>>> assert len(encoded_masks) == C
>>> assert sum(list(map(len, encoded_masks))) == N
"""
if isinstance(mask_pred, torch.Tensor):
mask_pred = mask_pred.sigmoid()
else:
# In AugTest, has been activated before
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
bboxes = det_bboxes[:, :4]
labels = det_labels
# In most cases, scale_factor should have been
# converted to Tensor when rescale the bbox
if not isinstance(scale_factor, torch.Tensor):
if isinstance(scale_factor, float):
scale_factor = np.array([scale_factor] * 4)
warn('Scale_factor should be a Tensor or ndarray '
'with shape (4,), float would be deprecated. ')
assert isinstance(scale_factor, np.ndarray)
scale_factor = torch.Tensor(scale_factor)
if rescale:
img_h, img_w = ori_shape[:2]
bboxes = bboxes / scale_factor.to(bboxes)
else:
w_scale, h_scale = scale_factor[0], scale_factor[1]
img_h = np.round(ori_shape[0] * h_scale.item()).astype(np.int32)
img_w = np.round(ori_shape[1] * w_scale.item()).astype(np.int32)
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
# the types of img_w and img_h are np.int32,
# when the image resolution is large,
# the calculation of num_chunks will overflow.
# so we need to change the types of img_w and img_h to int.
# See https://github.com/open-mmlab/mmdetection/pull/5191
num_chunks = int(
np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT /
GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
for i in range(N):
cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())
return cls_segms
def onnx_export(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, **kwargs):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor): shape (n, #class, h, w).
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape (Tuple): original image height and width, shape (2,)
Returns:
Tensor: a mask of shape (N, img_h, img_w).
"""
mask_pred = mask_pred.sigmoid()
bboxes = det_bboxes[:, :4]
labels = det_labels
# No need to consider rescale and scale_factor while exporting to ONNX
img_h, img_w = ori_shape[:2]
threshold = rcnn_test_cfg.mask_thr_binary
if not self.class_agnostic:
box_inds = torch.arange(mask_pred.shape[0])
mask_pred = mask_pred[box_inds, labels][:, None]
masks, _ = _do_paste_mask(
mask_pred, bboxes, img_h, img_w, skip_empty=False)
if threshold >= 0:
# should convert to float to avoid problems in TRT
masks = (masks >= threshold).to(dtype=torch.float)
return masks
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks according to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
# IsInf op is not supported with ONNX<=1.7.0
if not torch.onnx.is_in_onnx_export():
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
| 17,449 | 41.251816 | 85 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
conv_cfg=None,
norm_cfg=None,
ignore_label=None,
loss_weight=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=0.2),
init_cfg=dict(
type='Kaiming', override=dict(name='conv_logits'))):
super(FusedSemanticHead, self).__init__(init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = build_loss(loss_seg)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
x += self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
| 4,150 | 34.177966 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/roi_heads/mask_heads/mask_point_head.py | # Copyright (c) OpenMMLab. All rights reserved.
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class MaskPointHead(BaseModule):
"""A mask point head use in PointRend.
``MaskPointHead`` use shared multi-layer perceptron (equivalent to
nn.Conv1d) to predict the logit of input points. The fine-grained feature
and coarse feature will be concatenate together for predication.
Args:
num_fcs (int): Number of fc layers in the head. Default: 3.
in_channels (int): Number of input channels. Default: 256.
fc_channels (int): Number of fc channels. Default: 256.
num_classes (int): Number of classes for logits. Default: 80.
class_agnostic (bool): Whether use class agnostic classification.
If so, the output channels of logits will be 1. Default: False.
coarse_pred_each_layer (bool): Whether concatenate coarse feature with
the output of each fc layer. Default: True.
conv_cfg (dict | None): Dictionary to construct and config conv layer.
Default: dict(type='Conv1d'))
norm_cfg (dict | None): Dictionary to construct and config norm layer.
Default: None.
loss_point (dict): Dictionary to construct and config loss layer of
point head. Default: dict(type='CrossEntropyLoss', use_mask=True,
loss_weight=1.0).
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_classes,
num_fcs=3,
in_channels=256,
fc_channels=256,
class_agnostic=False,
coarse_pred_each_layer=True,
conv_cfg=dict(type='Conv1d'),
norm_cfg=None,
act_cfg=dict(type='ReLU'),
loss_point=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg=dict(
type='Normal', std=0.001,
override=dict(name='fc_logits'))):
super().__init__(init_cfg)
self.num_fcs = num_fcs
self.in_channels = in_channels
self.fc_channels = fc_channels
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.coarse_pred_each_layer = coarse_pred_each_layer
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.loss_point = build_loss(loss_point)
fc_in_channels = in_channels + num_classes
self.fcs = nn.ModuleList()
for _ in range(num_fcs):
fc = ConvModule(
fc_in_channels,
fc_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.fcs.append(fc)
fc_in_channels = fc_channels
fc_in_channels += num_classes if self.coarse_pred_each_layer else 0
out_channels = 1 if self.class_agnostic else self.num_classes
self.fc_logits = nn.Conv1d(
fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, fine_grained_feats, coarse_feats):
"""Classify each point base on fine grained and coarse feats.
Args:
fine_grained_feats (Tensor): Fine grained feature sampled from FPN,
shape (num_rois, in_channels, num_points).
coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,
shape (num_rois, num_classes, num_points).
Returns:
Tensor: Point classification results,
shape (num_rois, num_class, num_points).
"""
x = torch.cat([fine_grained_feats, coarse_feats], dim=1)
for fc in self.fcs:
x = fc(x)
if self.coarse_pred_each_layer:
x = torch.cat((x, coarse_feats), dim=1)
return self.fc_logits(x)
def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,
cfg):
"""Get training targets of MaskPointHead for all images.
Args:
rois (Tensor): Region of Interest, shape (num_rois, 5).
rel_roi_points: Points coordinates relative to RoI, shape
(num_rois, num_points, 2).
sampling_results (:obj:`SamplingResult`): Sampling result after
sampling and assignment.
gt_masks (Tensor) : Ground truth segmentation masks of
corresponding boxes, shape (num_rois, height, width).
cfg (dict): Training cfg.
Returns:
Tensor: Point target, shape (num_rois, num_points).
"""
num_imgs = len(sampling_results)
rois_list = []
rel_roi_points_list = []
for batch_ind in range(num_imgs):
inds = (rois[:, 0] == batch_ind)
rois_list.append(rois[inds])
rel_roi_points_list.append(rel_roi_points[inds])
pos_assigned_gt_inds_list = [
res.pos_assigned_gt_inds for res in sampling_results
]
cfg_list = [cfg for _ in range(num_imgs)]
point_targets = map(self._get_target_single, rois_list,
rel_roi_points_list, pos_assigned_gt_inds_list,
gt_masks, cfg_list)
point_targets = list(point_targets)
if len(point_targets) > 0:
point_targets = torch.cat(point_targets)
return point_targets
def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,
gt_masks, cfg):
"""Get training target of MaskPointHead for each image."""
num_pos = rois.size(0)
num_points = cfg.num_points
if num_pos > 0:
gt_masks_th = (
gt_masks.to_tensor(rois.dtype, rois.device).index_select(
0, pos_assigned_gt_inds))
gt_masks_th = gt_masks_th.unsqueeze(1)
rel_img_points = rel_roi_point_to_rel_img_point(
rois, rel_roi_points, gt_masks_th)
point_targets = point_sample(gt_masks_th,
rel_img_points).squeeze(1)
else:
point_targets = rois.new_zeros((0, num_points))
return point_targets
def loss(self, point_pred, point_targets, labels):
"""Calculate loss for MaskPointHead.
Args:
point_pred (Tensor): Point predication result, shape
(num_rois, num_classes, num_points).
point_targets (Tensor): Point targets, shape (num_roi, num_points).
labels (Tensor): Class label of corresponding boxes,
shape (num_rois, )
Returns:
dict[str, Tensor]: a dictionary of point loss components
"""
loss = dict()
if self.class_agnostic:
loss_point = self.loss_point(point_pred, point_targets,
torch.zeros_like(labels))
else:
loss_point = self.loss_point(point_pred, point_targets, labels)
loss['loss_point'] = loss_point
return loss
def _get_uncertainty(self, mask_pred, labels):
"""Estimate uncertainty based on pred logits.
We estimate uncertainty as L1 distance between 0.0 and the logits
prediction in 'mask_pred' for the foreground class in `classes`.
Args:
mask_pred (Tensor): mask predication logits, shape (num_rois,
num_classes, mask_height, mask_width).
labels (list[Tensor]): Either predicted or ground truth label for
each predicted mask, of length num_rois.
Returns:
scores (Tensor): Uncertainty scores with the most uncertain
locations having the highest uncertainty score,
shape (num_rois, 1, mask_height, mask_width)
"""
if mask_pred.shape[1] == 1:
gt_class_logits = mask_pred.clone()
else:
inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
return -torch.abs(gt_class_logits)
def get_roi_rel_points_train(self, mask_pred, labels, cfg):
"""Get ``num_points`` most uncertain points with random points during
train.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'_get_uncertainty()' function that takes point's logit prediction as
input.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
labels (list): The ground truth class for each instance.
cfg (dict): Training config of point head.
Returns:
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains the coordinates sampled points.
"""
num_points = cfg.num_points
oversample_ratio = cfg.oversample_ratio
importance_sample_ratio = cfg.importance_sample_ratio
assert oversample_ratio >= 1
assert 0 <= importance_sample_ratio <= 1
batch_size = mask_pred.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(
batch_size, num_sampled, 2, device=mask_pred.device)
point_logits = point_sample(mask_pred, point_coords)
# It is crucial to calculate uncertainty based on the sampled
# prediction value for the points. Calculating uncertainties of the
# coarse predictions first and sampling them for points leads to
# incorrect results. To illustrate this: assume uncertainty func(
# logits)=-abs(logits), a sampled point between two coarse
# predictions with -1 and 1 logits has 0 logits, and therefore 0
# uncertainty value. However, if we calculate uncertainties for the
# coarse predictions first, both will have -1 uncertainty,
# and sampled point will get -1 uncertainty.
point_uncertainties = self._get_uncertainty(point_logits, labels)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(
point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(
batch_size, dtype=torch.long, device=mask_pred.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
batch_size, num_uncertain_points, 2)
if num_random_points > 0:
rand_roi_coords = torch.rand(
batch_size, num_random_points, 2, device=mask_pred.device)
point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
return point_coords
def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):
"""Get ``num_points`` most uncertain points during test.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
pred_label (list): The predication class for each instance.
cfg (dict): Testing config of point head.
Returns:
point_indices (Tensor): A tensor of shape (num_rois, num_points)
that contains indices from [0, mask_height x mask_width) of the
most uncertain points.
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the [mask_height, mask_width] grid .
"""
num_points = cfg.subdivision_num_points
uncertainty_map = self._get_uncertainty(mask_pred, pred_label)
num_rois, _, mask_height, mask_width = uncertainty_map.shape
# During ONNX exporting, the type of each elements of 'shape' is
# `Tensor(float)`, while it is `float` during PyTorch inference.
if isinstance(mask_height, torch.Tensor):
h_step = 1.0 / mask_height.float()
w_step = 1.0 / mask_width.float()
else:
h_step = 1.0 / mask_height
w_step = 1.0 / mask_width
# cast to int to avoid dynamic K for TopK op in ONNX
mask_size = int(mask_height * mask_width)
uncertainty_map = uncertainty_map.view(num_rois, mask_size)
num_points = min(mask_size, num_points)
point_indices = uncertainty_map.topk(num_points, dim=1)[1]
xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step
ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step
point_coords = torch.stack([xs, ys], dim=2)
return point_indices, point_coords
| 13,455 | 42.830619 | 126 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/ghm_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def _expand_onehot_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(
(labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module()
class GHMC(nn.Module):
"""GHM Classification Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
use_sigmoid (bool): Can only be true for BCE based loss now.
loss_weight (float): The weight of the total GHM-C loss.
reduction (str): Options are "none", "mean" and "sum".
Defaults to "mean"
"""
def __init__(self,
bins=10,
momentum=0,
use_sigmoid=True,
loss_weight=1.0,
reduction='mean'):
super(GHMC, self).__init__()
self.bins = bins
self.momentum = momentum
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] += 1e-6
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.use_sigmoid = use_sigmoid
if not self.use_sigmoid:
raise NotImplementedError
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self,
pred,
target,
label_weight,
reduction_override=None,
**kwargs):
"""Calculate the GHM-C loss.
Args:
pred (float tensor of size [batch_num, class_num]):
The direct prediction of classification fc layer.
target (float tensor of size [batch_num, class_num]):
Binary class target for each sample.
label_weight (float tensor of size [batch_num, class_num]):
the value is 1 if the sample is valid and 0 if ignored.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
The gradient harmonized loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
# the target should be binary class label
if pred.dim() != target.dim():
target, label_weight = _expand_onehot_labels(
target, label_weight, pred.size(-1))
target, label_weight = target.float(), label_weight.float()
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
# gradient length
g = torch.abs(pred.sigmoid().detach() - target)
valid = label_weight > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0 # n valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none')
loss = weight_reduce_loss(
loss, weights, reduction=reduction, avg_factor=tot)
return loss * self.loss_weight
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module()
class GHMR(nn.Module):
"""GHM Regression Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
mu (float): The parameter for the Authentic Smooth L1 loss.
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
loss_weight (float): The weight of the total GHM-R loss.
reduction (str): Options are "none", "mean" and "sum".
Defaults to "mean"
"""
def __init__(self,
mu=0.02,
bins=10,
momentum=0,
loss_weight=1.0,
reduction='mean'):
super(GHMR, self).__init__()
self.mu = mu
self.bins = bins
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] = 1e3
self.momentum = momentum
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.loss_weight = loss_weight
self.reduction = reduction
# TODO: support reduction parameter
def forward(self,
pred,
target,
label_weight,
avg_factor=None,
reduction_override=None):
"""Calculate the GHM-R loss.
Args:
pred (float tensor of size [batch_num, 4 (* class_num)]):
The prediction of box regression layer. Channel number can be 4
or 4 * class_num depending on whether it is class-agnostic.
target (float tensor of size [batch_num, 4 (* class_num)]):
The target regression values with the same size of pred.
label_weight (float tensor of size [batch_num, 4 (* class_num)]):
The weight of each sample, 0 if ignored.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
The gradient harmonized loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
mu = self.mu
edges = self.edges
mmt = self.momentum
# ASL1 loss
diff = pred - target
loss = torch.sqrt(diff * diff + mu * mu) - mu
# gradient length
g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
weights = torch.zeros_like(g)
valid = label_weight > 0
tot = max(label_weight.float().sum().item(), 1.0)
n = 0 # n: valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
n += 1
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
if n > 0:
weights /= n
loss = weight_reduce_loss(
loss, weights, reduction=reduction, avg_factor=tot)
return loss * self.loss_weight
| 7,923 | 36.028037 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/mse_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@weighted_loss
def mse_loss(pred, target):
"""Warpper of mse loss."""
return F.mse_loss(pred, target, reduction='none')
@LOSSES.register_module()
class MSELoss(nn.Module):
"""MSELoss.
Args:
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): Weight of the loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * mse_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss
| 1,905 | 31.862069 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/dice_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weight_reduce_loss
def dice_loss(pred,
target,
weight=None,
eps=1e-3,
reduction='mean',
avg_factor=None):
"""Calculate dice loss, which is proposed in
`V-Net: Fully Convolutional Neural Networks for Volumetric
Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *)
target (torch.Tensor): The learning label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
eps (float): Avoid dividing by zero. Default: 1e-3.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
input = pred.flatten(1)
target = target.flatten(1).float()
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + eps
c = torch.sum(target * target, 1) + eps
d = (2 * a) / (b + c)
loss = 1 - d
if weight is not None:
assert weight.ndim == loss.ndim
assert len(weight) == len(pred)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class DiceLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
activate=True,
reduction='mean',
loss_weight=1.0,
eps=1e-3):
"""`Dice Loss, which is proposed in
`V-Net: Fully Convolutional Neural Networks for Volumetric
Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
activate (bool): Whether to activate the predictions inside,
this will disable the inside sigmoid operation.
Defaults to True.
reduction (str, optional): The method used
to reduce the loss. Options are "none",
"mean" and "sum". Defaults to 'mean'.
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
eps (float): Avoid dividing by zero. Defaults to 1e-3.
"""
super(DiceLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.reduction = reduction
self.loss_weight = loss_weight
self.eps = eps
self.activate = activate
def forward(self,
pred,
target,
weight=None,
reduction_override=None,
avg_factor=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *).
target (torch.Tensor): The label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.activate:
if self.use_sigmoid:
pred = pred.sigmoid()
else:
raise NotImplementedError
loss = self.loss_weight * dice_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor)
return loss
| 4,340 | 34.008065 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/pisa_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.core import bbox_overlaps
@mmcv.jit(derivate=True, coderize=True)
def isr_p(cls_score,
bbox_pred,
bbox_targets,
rois,
sampling_results,
loss_cls,
bbox_coder,
k=2,
bias=0,
num_class=80):
"""Importance-based Sample Reweighting (ISR_P), positive part.
Args:
cls_score (Tensor): Predicted classification scores.
bbox_pred (Tensor): Predicted bbox deltas.
bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are
labels, label_weights, bbox_targets, bbox_weights, respectively.
rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs
(two_stage) in shape (n, 5).
sampling_results (obj): Sampling results.
loss_cls (func): Classification loss func of the head.
bbox_coder (obj): BBox coder of the head.
k (float): Power of the non-linear mapping.
bias (float): Shift of the non-linear mapping.
num_class (int): Number of classes, default: 80.
Return:
tuple([Tensor]): labels, imp_based_label_weights, bbox_targets,
bbox_target_weights
"""
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
pos_label_inds = ((labels >= 0) &
(labels < num_class)).nonzero().reshape(-1)
pos_labels = labels[pos_label_inds]
# if no positive samples, return the original targets
num_pos = float(pos_label_inds.size(0))
if num_pos == 0:
return labels, label_weights, bbox_targets, bbox_weights
# merge pos_assigned_gt_inds of per image to a single tensor
gts = list()
last_max_gt = 0
for i in range(len(sampling_results)):
gt_i = sampling_results[i].pos_assigned_gt_inds
gts.append(gt_i + last_max_gt)
if len(gt_i) != 0:
last_max_gt = gt_i.max() + 1
gts = torch.cat(gts)
assert len(gts) == num_pos
cls_score = cls_score.detach()
bbox_pred = bbox_pred.detach()
# For single stage detectors, rois here indicate anchors, in shape (N, 4)
# For two stage detectors, rois are in shape (N, 5)
if rois.size(-1) == 5:
pos_rois = rois[pos_label_inds][:, 1:]
else:
pos_rois = rois[pos_label_inds]
if bbox_pred.size(-1) > 4:
bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4)
else:
pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4)
# compute iou of the predicted bbox and the corresponding GT
pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4)
pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred)
target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target)
ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True)
pos_imp_weights = label_weights[pos_label_inds]
# Two steps to compute IoU-HLR. Samples are first sorted by IoU locally,
# then sorted again within the same-rank group
max_l_num = pos_labels.bincount().max()
for label in pos_labels.unique():
l_inds = (pos_labels == label).nonzero().view(-1)
l_gts = gts[l_inds]
for t in l_gts.unique():
t_inds = l_inds[l_gts == t]
t_ious = ious[t_inds]
_, t_iou_rank_idx = t_ious.sort(descending=True)
_, t_iou_rank = t_iou_rank_idx.sort()
ious[t_inds] += max_l_num - t_iou_rank.float()
l_ious = ious[l_inds]
_, l_iou_rank_idx = l_ious.sort(descending=True)
_, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR
# linearly map HLR to label weights
pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num
pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k)
# normalize to make the new weighted loss value equal to the original loss
pos_loss_cls = loss_cls(
cls_score[pos_label_inds], pos_labels, reduction_override='none')
if pos_loss_cls.dim() > 1:
ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:,
None]
new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None]
else:
ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds]
new_pos_loss_cls = pos_loss_cls * pos_imp_weights
pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum()
pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio
label_weights[pos_label_inds] = pos_imp_weights
bbox_targets = labels, label_weights, bbox_targets, bbox_weights
return bbox_targets
@mmcv.jit(derivate=True, coderize=True)
def carl_loss(cls_score,
labels,
bbox_pred,
bbox_targets,
loss_bbox,
k=1,
bias=0.2,
avg_factor=None,
sigmoid=False,
num_class=80):
"""Classification-Aware Regression Loss (CARL).
Args:
cls_score (Tensor): Predicted classification scores.
labels (Tensor): Targets of classification.
bbox_pred (Tensor): Predicted bbox deltas.
bbox_targets (Tensor): Target of bbox regression.
loss_bbox (func): Regression loss func of the head.
bbox_coder (obj): BBox coder of the head.
k (float): Power of the non-linear mapping.
bias (float): Shift of the non-linear mapping.
avg_factor (int): Average factor used in regression loss.
sigmoid (bool): Activation of the classification score.
num_class (int): Number of classes, default: 80.
Return:
dict: CARL loss dict.
"""
pos_label_inds = ((labels >= 0) &
(labels < num_class)).nonzero().reshape(-1)
if pos_label_inds.numel() == 0:
return dict(loss_carl=cls_score.sum()[None] * 0.)
pos_labels = labels[pos_label_inds]
# multiply pos_cls_score with the corresponding bbox weight
# and remain gradient
if sigmoid:
pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels]
else:
pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels]
carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k)
# normalize carl_loss_weight to make its sum equal to num positive
num_pos = float(pos_cls_score.size(0))
weight_ratio = num_pos / carl_loss_weights.sum()
carl_loss_weights *= weight_ratio
if avg_factor is None:
avg_factor = bbox_targets.size(0)
# if is class agnostic, bbox pred is in shape (N, 4)
# otherwise, bbox pred is in shape (N, #classes, 4)
if bbox_pred.size(-1) > 4:
bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels]
else:
pos_bbox_preds = bbox_pred[pos_label_inds]
ori_loss_reg = loss_bbox(
pos_bbox_preds,
bbox_targets[pos_label_inds],
reduction_override='none') / avg_factor
loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum()
return dict(loss_carl=loss_carl[None])
| 7,216 | 38.010811 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/balanced_l1_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
@LOSSES.register_module()
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
alpha=0.5,
gamma=1.5,
beta=1.0,
reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
weight (torch.Tensor, optional): Sample-wise loss weight with
shape (N, ).
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
| 4,252 | 33.024 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/iou_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import warnings
import mmcv
import torch
import torch.nn as nn
from mmdet.core import bbox_overlaps
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
linear (bool, optional): If True, use linear scale of loss instead of
log scale. Default: False.
mode (str): Loss scaling mode, including "linear", "square", and "log".
Default: 'log'
eps (float): Eps to avoid log(0).
Return:
torch.Tensor: Loss tensor.
"""
assert mode in ['linear', 'square', 'log']
if linear:
mode = 'linear'
warnings.warn('DeprecationWarning: Setting "linear=True" in '
'iou_loss is deprecated, please use "mode=`linear`" '
'instead.')
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
if mode == 'linear':
loss = 1 - ious
elif mode == 'square':
loss = 1 - ious**2
elif mode == 'log':
loss = -ious.log()
else:
raise NotImplementedError
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
"""BIoULoss.
This is an implementation of paper
`Improving Object Localization with Fitness NMS and Bounded IoU Loss.
<https://arxiv.org/abs/1711.00164>`_.
Args:
pred (torch.Tensor): Predicted bboxes.
target (torch.Tensor): Target bboxes.
beta (float): beta parameter in smoothl1.
eps (float): eps to avoid NaN.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0]
pred_h = pred[:, 3] - pred[:, 1]
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0]
target_h = target[:, 3] - target[:, 1]
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max(
(target_w - 2 * dx.abs()) /
(target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))
loss_dy = 1 - torch.max(
(target_h - 2 * dy.abs()) /
(target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /
(target_w + eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /
(target_h + eps))
# view(..., -1) does not work for empty tensor
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],
dim=-1).flatten(1)
loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
loss_comb - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def giou_loss(pred, target, eps=1e-7):
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def diou_loss(pred, target, eps=1e-7):
r"""`Implementation of Distance-IoU Loss: Faster and Better
Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.
Code is modified from https://github.com/Zzh-tju/DIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
# DIoU
dious = ious - rho2 / c2
loss = 1 - dious
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def ciou_loss(pred, target, eps=1e-7):
r"""`Implementation of paper `Enhancing Geometric Factors into
Model Learning and Inference for Object Detection and Instance
Segmentation <https://arxiv.org/abs/2005.03572>`_.
Code is modified from https://github.com/Zzh-tju/CIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
factor = 4 / math.pi**2
v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = (ious > 0.5).float() * v / (1 - ious + v)
# CIoU
cious = ious - (rho2 / c2 + alpha * v)
loss = 1 - cious.clamp(min=-1.0, max=1.0)
return loss
@LOSSES.register_module()
class IoULoss(nn.Module):
"""IoULoss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
Args:
linear (bool): If True, use linear scale of loss else determined
by mode. Default: False.
eps (float): Eps to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
mode (str): Loss scaling mode, including "linear", "square", and "log".
Default: 'log'
"""
def __init__(self,
linear=False,
eps=1e-6,
reduction='mean',
loss_weight=1.0,
mode='log'):
super(IoULoss, self).__init__()
assert mode in ['linear', 'square', 'log']
if linear:
mode = 'linear'
warnings.warn('DeprecationWarning: Setting "linear=True" in '
'IOULoss is deprecated, please use "mode=`linear`" '
'instead.')
self.mode = mode
self.linear = linear
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None. Options are "none", "mean" and "sum".
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if (weight is not None) and (not torch.any(weight > 0)) and (
reduction != 'none'):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# iou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * iou_loss(
pred,
target,
weight,
mode=self.mode,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class BoundedIoULoss(nn.Module):
def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0):
super(BoundedIoULoss, self).__init__()
self.beta = beta
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * bounded_iou_loss(
pred,
target,
weight,
beta=self.beta,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class GIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(GIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * giou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class DIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(DIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * diou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class CIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(CIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * ciou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
| 15,714 | 32.084211 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/smooth_l1_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
loss = torch.abs(pred - target)
return loss
@LOSSES.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@LOSSES.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
| 4,635 | 30.537415 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/gfocal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def quality_focal_loss(pred, target, beta=2.0):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert len(target) == 2, """target for QFL must be a tuple of two elements,
including category label and quality label, respectively"""
# label denotes the category id, score denotes the quality score
label, score = target
# negatives are supervised by 0 quality score
pred_sigmoid = pred.sigmoid()
scale_factor = pred_sigmoid
zerolabel = scale_factor.new_zeros(pred.shape)
loss = F.binary_cross_entropy_with_logits(
pred, zerolabel, reduction='none') * scale_factor.pow(beta)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = pred.size(1)
pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
pos_label = label[pos].long()
# positives are supervised by bbox quality (IoU) score
scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
pred[pos, pos_label], score[pos],
reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss
@weighted_loss
def quality_focal_loss_with_prob(pred, target, beta=2.0):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Different from `quality_focal_loss`, this function accepts probability
as input.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert len(target) == 2, """target for QFL must be a tuple of two elements,
including category label and quality label, respectively"""
# label denotes the category id, score denotes the quality score
label, score = target
# negatives are supervised by 0 quality score
pred_sigmoid = pred
scale_factor = pred_sigmoid
zerolabel = scale_factor.new_zeros(pred.shape)
loss = F.binary_cross_entropy(
pred, zerolabel, reduction='none') * scale_factor.pow(beta)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = pred.size(1)
pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
pos_label = label[pos].long()
# positives are supervised by bbox quality (IoU) score
scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
loss[pos, pos_label] = F.binary_cross_entropy(
pred[pos, pos_label], score[pos],
reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def distribution_focal_loss(pred, label):
r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted general distribution of bounding boxes
(before softmax) with shape (N, n+1), n is the max value of the
integral set `{0, ..., n}` in paper.
label (torch.Tensor): Target distance label for bounding boxes with
shape (N,).
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
dis_left = label.long()
dis_right = dis_left + 1
weight_left = dis_right.float() - label
weight_right = label - dis_left.float()
loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
+ F.cross_entropy(pred, dis_right, reduction='none') * weight_right
return loss
@LOSSES.register_module()
class QualityFocalLoss(nn.Module):
r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:
Learning Qualified and Distributed Bounding Boxes for Dense Object
Detection <https://arxiv.org/abs/2006.04388>`_.
Args:
use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
Defaults to True.
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
activated (bool, optional): Whether the input is activated.
If True, it means the input has been activated and can be
treated as probabilities. Else, it should be treated as logits.
Defaults to False.
"""
def __init__(self,
use_sigmoid=True,
beta=2.0,
reduction='mean',
loss_weight=1.0,
activated=False):
super(QualityFocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'
self.use_sigmoid = use_sigmoid
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
self.activated = activated
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): Predicted joint representation of
classification and quality (IoU) estimation with shape (N, C),
C is the number of classes.
target (tuple([torch.Tensor])): Target category label with shape
(N,) and target quality label with shape (N,).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
if self.activated:
calculate_loss_func = quality_focal_loss_with_prob
else:
calculate_loss_func = quality_focal_loss
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
@LOSSES.register_module()
class DistributionFocalLoss(nn.Module):
r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:
Learning Qualified and Distributed Bounding Boxes for Dense Object
Detection <https://arxiv.org/abs/2006.04388>`_.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(DistributionFocalLoss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): Predicted general distribution of bounding
boxes (before softmax) with shape (N, n+1), n is the max value
of the integral set `{0, ..., n}` in paper.
target (torch.Tensor): Target distance label for bounding boxes
with shape (N,).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * distribution_focal_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_cls
| 9,834 | 38.979675 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/varifocal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
@mmcv.jit(derivate=True, coderize=True)
def varifocal_loss(pred,
target,
weight=None,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
reduction='mean',
avg_factor=None):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# pred and target should be of the same size
assert pred.size() == target.size()
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + \
alpha * (pred_sigmoid - target).abs().pow(gamma) * \
(target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + \
alpha * (pred_sigmoid - target).abs().pow(gamma) * \
(target <= 0.0).float()
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class VarifocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
reduction='mean',
loss_weight=1.0):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(VarifocalLoss, self).__init__()
assert use_sigmoid is True, \
'Only sigmoid varifocal loss supported now.'
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = self.loss_weight * varifocal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
iou_weighted=self.iou_weighted,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
| 5,365 | 38.748148 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
@mmcv.jit(derivate=True, coderize=True)
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
| 3,103 | 29.431373 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/seesaw_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .accuracy import accuracy
from .cross_entropy_loss import cross_entropy
from .utils import weight_reduce_loss
def seesaw_ce_loss(cls_score,
labels,
label_weights,
cum_samples,
num_classes,
p,
q,
eps,
reduction='mean',
avg_factor=None):
"""Calculate the Seesaw CrossEntropy loss.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C),
C is the number of classes.
labels (torch.Tensor): The learning label of the prediction.
label_weights (torch.Tensor): Sample-wise loss weight.
cum_samples (torch.Tensor): Cumulative samples for each category.
num_classes (int): The number of classes.
p (float): The ``p`` in the mitigation factor.
q (float): The ``q`` in the compenstation factor.
eps (float): The minimal value of divisor to smooth
the computation of compensation factor
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
assert cls_score.size(-1) == num_classes
assert len(cum_samples) == num_classes
onehot_labels = F.one_hot(labels, num_classes)
seesaw_weights = cls_score.new_ones(onehot_labels.size())
# mitigation factor
if p > 0:
sample_ratio_matrix = cum_samples[None, :].clamp(
min=1) / cum_samples[:, None].clamp(min=1)
index = (sample_ratio_matrix < 1.0).float()
sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index)
mitigation_factor = sample_weights[labels.long(), :]
seesaw_weights = seesaw_weights * mitigation_factor
# compensation factor
if q > 0:
scores = F.softmax(cls_score.detach(), dim=1)
self_scores = scores[
torch.arange(0, len(scores)).to(scores.device).long(),
labels.long()]
score_matrix = scores / self_scores[:, None].clamp(min=eps)
index = (score_matrix > 1.0).float()
compensation_factor = score_matrix.pow(q) * index + (1 - index)
seesaw_weights = seesaw_weights * compensation_factor
cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels))
loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none')
if label_weights is not None:
label_weights = label_weights.float()
loss = weight_reduce_loss(
loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor)
return loss
@LOSSES.register_module()
class SeesawLoss(nn.Module):
"""
Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021)
arXiv: https://arxiv.org/abs/2008.10032
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Only False is supported.
p (float, optional): The ``p`` in the mitigation factor.
Defaults to 0.8.
q (float, optional): The ``q`` in the compenstation factor.
Defaults to 2.0.
num_classes (int, optional): The number of classes.
Default to 1203 for LVIS v1 dataset.
eps (float, optional): The minimal value of divisor to smooth
the computation of compensation factor
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
return_dict (bool, optional): Whether return the losses as a dict.
Default to True.
"""
def __init__(self,
use_sigmoid=False,
p=0.8,
q=2.0,
num_classes=1203,
eps=1e-2,
reduction='mean',
loss_weight=1.0,
return_dict=True):
super(SeesawLoss, self).__init__()
assert not use_sigmoid
self.use_sigmoid = False
self.p = p
self.q = q
self.num_classes = num_classes
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
self.return_dict = return_dict
# 0 for pos, 1 for neg
self.cls_criterion = seesaw_ce_loss
# cumulative samples for each category
self.register_buffer(
'cum_samples',
torch.zeros(self.num_classes + 1, dtype=torch.float))
# custom output channels of the classifier
self.custom_cls_channels = True
# custom activation of cls_score
self.custom_activation = True
# custom accuracy of the classsifier
self.custom_accuracy = True
def _split_cls_score(self, cls_score):
# split cls_score to cls_score_classes and cls_score_objectness
assert cls_score.size(-1) == self.num_classes + 2
cls_score_classes = cls_score[..., :-2]
cls_score_objectness = cls_score[..., -2:]
return cls_score_classes, cls_score_objectness
def get_cls_channels(self, num_classes):
"""Get custom classification channels.
Args:
num_classes (int): The number of classes.
Returns:
int: The custom classification channels.
"""
assert num_classes == self.num_classes
return num_classes + 2
def get_activation(self, cls_score):
"""Get custom activation of cls_score.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C + 2).
Returns:
torch.Tensor: The custom activation of cls_score with shape
(N, C + 1).
"""
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
score_classes = F.softmax(cls_score_classes, dim=-1)
score_objectness = F.softmax(cls_score_objectness, dim=-1)
score_pos = score_objectness[..., [0]]
score_neg = score_objectness[..., [1]]
score_classes = score_classes * score_pos
scores = torch.cat([score_classes, score_neg], dim=-1)
return scores
def get_accuracy(self, cls_score, labels):
"""Get custom accuracy w.r.t. cls_score and labels.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C + 2).
labels (torch.Tensor): The learning label of the prediction.
Returns:
Dict [str, torch.Tensor]: The accuracy for objectness and classes,
respectively.
"""
pos_inds = labels < self.num_classes
obj_labels = (labels == self.num_classes).long()
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
acc_objectness = accuracy(cls_score_objectness, obj_labels)
acc_classes = accuracy(cls_score_classes[pos_inds], labels[pos_inds])
acc = dict()
acc['acc_objectness'] = acc_objectness
acc['acc_classes'] = acc_classes
return acc
def forward(self,
cls_score,
labels,
label_weights=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C + 2).
labels (torch.Tensor): The learning label of the prediction.
label_weights (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor | Dict [str, torch.Tensor]:
if return_dict == False: The calculated loss |
if return_dict == True: The dict of calculated losses
for objectness and classes, respectively.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
assert cls_score.size(-1) == self.num_classes + 2
pos_inds = labels < self.num_classes
# 0 for pos, 1 for neg
obj_labels = (labels == self.num_classes).long()
# accumulate the samples for each category
unique_labels = labels.unique()
for u_l in unique_labels:
inds_ = labels == u_l.item()
self.cum_samples[u_l] += inds_.sum()
if label_weights is not None:
label_weights = label_weights.float()
else:
label_weights = labels.new_ones(labels.size(), dtype=torch.float)
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
# calculate loss_cls_classes (only need pos samples)
if pos_inds.sum() > 0:
loss_cls_classes = self.loss_weight * self.cls_criterion(
cls_score_classes[pos_inds], labels[pos_inds],
label_weights[pos_inds], self.cum_samples[:self.num_classes],
self.num_classes, self.p, self.q, self.eps, reduction,
avg_factor)
else:
loss_cls_classes = cls_score_classes[pos_inds].sum()
# calculate loss_cls_objectness
loss_cls_objectness = self.loss_weight * cross_entropy(
cls_score_objectness, obj_labels, label_weights, reduction,
avg_factor)
if self.return_dict:
loss_cls = dict()
loss_cls['loss_cls_objectness'] = loss_cls_objectness
loss_cls['loss_cls_classes'] = loss_cls_classes
else:
loss_cls = loss_cls_classes + loss_cls_objectness
return loss_cls
| 10,136 | 37.543726 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/ae_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
@mmcv.jit(derivate=True, coderize=True)
def ae_loss_per_image(tl_preds, br_preds, match):
"""Associative Embedding Loss in one image.
Associative Embedding Loss including two parts: pull loss and push loss.
Pull loss makes embedding vectors from same object closer to each other.
Push loss distinguish embedding vector from different objects, and makes
the gap between them is large enough.
During computing, usually there are 3 cases:
- no object in image: both pull loss and push loss will be 0.
- one object in image: push loss will be 0 and pull loss is computed
by the two corner of the only object.
- more than one objects in image: pull loss is computed by corner pairs
from each object, push loss is computed by each object with all
other objects. We use confusion matrix with 0 in diagonal to
compute the push loss.
Args:
tl_preds (tensor): Embedding feature map of left-top corner.
br_preds (tensor): Embedding feature map of bottim-right corner.
match (list): Downsampled coordinates pair of each ground truth box.
"""
tl_list, br_list, me_list = [], [], []
if len(match) == 0: # no object in image
pull_loss = tl_preds.sum() * 0.
push_loss = tl_preds.sum() * 0.
else:
for m in match:
[tl_y, tl_x], [br_y, br_x] = m
tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
br_e = br_preds[:, br_y, br_x].view(-1, 1)
tl_list.append(tl_e)
br_list.append(br_e)
me_list.append((tl_e + br_e) / 2.0)
tl_list = torch.cat(tl_list)
br_list = torch.cat(br_list)
me_list = torch.cat(me_list)
assert tl_list.size() == br_list.size()
# N is object number in image, M is dimension of embedding vector
N, M = tl_list.size()
pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
pull_loss = pull_loss.sum() / N
margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
# confusion matrix of push loss
conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
conf_weight = 1 - torch.eye(N).type_as(me_list)
conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
if N > 1: # more than one object in current image
push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
else:
push_loss = tl_preds.sum() * 0.
return pull_loss, push_loss
@LOSSES.register_module()
class AssociativeEmbeddingLoss(nn.Module):
"""Associative Embedding Loss.
More details can be found in
`Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and
`CornerNet <https://arxiv.org/abs/1808.01244>`_ .
Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501
Args:
pull_weight (float): Loss weight for corners from same object.
push_weight (float): Loss weight for corners from different object.
"""
def __init__(self, pull_weight=0.25, push_weight=0.25):
super(AssociativeEmbeddingLoss, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
def forward(self, pred, target, match):
"""Forward function."""
batch = pred.size(0)
pull_all, push_all = 0.0, 0.0
for i in range(batch):
pull, push = ae_loss_per_image(pred[i], target[i], match[i])
pull_all += self.pull_weight * pull
push_all += self.push_weight * push
return pull_all, push_all
| 3,857 | 36.096154 | 143 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/accuracy.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
@mmcv.jit(coderize=True)
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class)
target (torch.Tensor): The target of each prediction, shape (N, )
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == 2 and target.ndim == 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t() # transpose to shape (maxk, N)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
| 2,990 | 36.3875 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/focal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss
from ..builder import LOSSES
from .utils import weight_reduce_loss
# This method is only for debugging
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def py_focal_loss_with_prob(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Different from `py_sigmoid_focal_loss`, this function accepts probability
as input.
Args:
pred (torch.Tensor): The prediction probability with shape (N, C),
C is the number of classes.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
num_classes = pred.size(1)
target = F.one_hot(target, num_classes=num_classes + 1)
target = target[:, :num_classes]
target = target.type_as(pred)
pt = (1 - pred) * target + pred * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
r"""A warpper of cuda version `Focal Loss
<https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma,
alpha, None, 'none')
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0,
activated=False):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
activated (bool, optional): Whether the input is activated.
If True, it means the input has been activated and can be
treated as probabilities. Else, it should be treated as logits.
Defaults to False.
"""
super(FocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
self.activated = activated
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
if self.activated:
calculate_loss_func = py_focal_loss_with_prob
else:
if torch.cuda.is_available() and pred.is_cuda:
calculate_loss_func = sigmoid_focal_loss
else:
num_classes = pred.size(1)
target = F.one_hot(target, num_classes=num_classes + 1)
target = target[:, :num_classes]
calculate_loss_func = py_sigmoid_focal_loss
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
| 10,420 | 41.534694 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/cross_entropy_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
Returns:
torch.Tensor: The calculated loss
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
# element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(
valid_mask & (labels < label_channels), as_tuple=False)
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
valid_mask = valid_mask.view(-1, 1).expand(labels.size(0),
label_channels).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
Returns:
torch.Tensor: The calculated loss.
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
if pred.dim() != label.dim():
label, weight = _expand_onehot_labels(label, weight, pred.size(-1),
ignore_index)
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C, *), C is the
number of classes. The trailing * indicates arbitrary shape.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
Example:
>>> N, C = 3, 11
>>> H, W = 2, 2
>>> pred = torch.randn(N, C, H, W) * 1000
>>> target = torch.rand(N, H, W)
>>> label = torch.randint(0, C, size=(N,))
>>> reduction = 'mean'
>>> avg_factor = None
>>> class_weights = None
>>> loss = mask_cross_entropy(pred, target, label, reduction,
>>> avg_factor, class_weights)
>>> assert loss.shape == (1,)
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
ignore_index=None,
loss_weight=1.0):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
ignore_index (int | None): The label index to be ignored.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.ignore_index = ignore_index
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
ignore_index=None,
**kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss. Options are "none", "mean" and "sum".
ignore_index (int | None): The label index to be ignored.
If not None, it will override the default value. Default: None.
Returns:
torch.Tensor: The calculated loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if ignore_index is None:
ignore_index = self.ignore_index
if self.class_weight is not None:
class_weight = cls_score.new_tensor(
self.class_weight, device=cls_score.device)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
ignore_index=ignore_index,
**kwargs)
return loss_cls
| 9,696 | 37.480159 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/gaussian_focal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_loss + neg_loss
@LOSSES.register_module()
class GaussianFocalLoss(nn.Module):
"""GaussianFocalLoss is a variant of focal loss.
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_
Code is modified from `kp_utils.py
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
not 0/1 binary target.
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negative samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self,
alpha=2.0,
gamma=4.0,
reduction='mean',
loss_weight=1.0):
super(GaussianFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_reg = self.loss_weight * gaussian_focal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
reduction=reduction,
avg_factor=avg_factor)
return loss_reg
| 3,312 | 34.623656 | 108 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/losses/kd_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
soft_label,
T,
detach_target=True):
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@LOSSES.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred,
soft_label,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
| 2,912 | 31.730337 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/pvt.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import (Conv2d, build_activation_layer, build_norm_layer,
constant_init, normal_init, trunc_normal_init)
from mmcv.cnn.bricks.drop import build_dropout
from mmcv.cnn.bricks.transformer import MultiheadAttention
from mmcv.cnn.utils.weight_init import trunc_normal_
from mmcv.runner import (BaseModule, ModuleList, Sequential, _load_checkpoint,
load_state_dict)
from torch.nn.modules.utils import _pair as to_2tuple
from ...utils import get_root_logger
from ..builder import BACKBONES
from ..utils import PatchEmbed, nchw_to_nlc, nlc_to_nchw, pvt_convert
class MixFFN(BaseModule):
"""An implementation of MixFFN of PVT.
The differences between MixFFN & FFN:
1. Use 1X1 Conv to replace Linear layer.
2. Introduce 3X3 Depth-wise Conv to encode positional information.
Args:
embed_dims (int): The feature dimension. Same as
`MultiheadAttention`.
feedforward_channels (int): The hidden dimension of FFNs.
act_cfg (dict, optional): The activation config for FFNs.
Default: dict(type='GELU').
ffn_drop (float, optional): Probability of an element to be
zeroed in FFN. Default 0.0.
dropout_layer (obj:`ConfigDict`): The dropout_layer used
when adding the shortcut.
Default: None.
use_conv (bool): If True, add 3x3 DWConv between two Linear layers.
Defaults: False.
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
feedforward_channels,
act_cfg=dict(type='GELU'),
ffn_drop=0.,
dropout_layer=None,
use_conv=False,
init_cfg=None):
super(MixFFN, self).__init__(init_cfg=init_cfg)
self.embed_dims = embed_dims
self.feedforward_channels = feedforward_channels
self.act_cfg = act_cfg
activate = build_activation_layer(act_cfg)
in_channels = embed_dims
fc1 = Conv2d(
in_channels=in_channels,
out_channels=feedforward_channels,
kernel_size=1,
stride=1,
bias=True)
if use_conv:
# 3x3 depth wise conv to provide positional encode information
dw_conv = Conv2d(
in_channels=feedforward_channels,
out_channels=feedforward_channels,
kernel_size=3,
stride=1,
padding=(3 - 1) // 2,
bias=True,
groups=feedforward_channels)
fc2 = Conv2d(
in_channels=feedforward_channels,
out_channels=in_channels,
kernel_size=1,
stride=1,
bias=True)
drop = nn.Dropout(ffn_drop)
layers = [fc1, activate, drop, fc2, drop]
if use_conv:
layers.insert(1, dw_conv)
self.layers = Sequential(*layers)
self.dropout_layer = build_dropout(
dropout_layer) if dropout_layer else torch.nn.Identity()
def forward(self, x, hw_shape, identity=None):
out = nlc_to_nchw(x, hw_shape)
out = self.layers(out)
out = nchw_to_nlc(out)
if identity is None:
identity = x
return identity + self.dropout_layer(out)
class SpatialReductionAttention(MultiheadAttention):
"""An implementation of Spatial Reduction Attention of PVT.
This module is modified from MultiheadAttention which is a module from
mmcv.cnn.bricks.transformer.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads.
attn_drop (float): A Dropout layer on attn_output_weights.
Default: 0.0.
proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.
Default: 0.0.
dropout_layer (obj:`ConfigDict`): The dropout_layer used
when adding the shortcut. Default: None.
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dim)
or (n, batch, embed_dim). Default: False.
qkv_bias (bool): enable bias for qkv if True. Default: True.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
sr_ratio (int): The ratio of spatial reduction of Spatial Reduction
Attention of PVT. Default: 1.
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
attn_drop=0.,
proj_drop=0.,
dropout_layer=None,
batch_first=True,
qkv_bias=True,
norm_cfg=dict(type='LN'),
sr_ratio=1,
init_cfg=None):
super().__init__(
embed_dims,
num_heads,
attn_drop,
proj_drop,
batch_first=batch_first,
dropout_layer=dropout_layer,
bias=qkv_bias,
init_cfg=init_cfg)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = Conv2d(
in_channels=embed_dims,
out_channels=embed_dims,
kernel_size=sr_ratio,
stride=sr_ratio)
# The ret[0] of build_norm_layer is norm name.
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
# handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa
from mmdet import mmcv_version, digit_version
if mmcv_version < digit_version('1.3.17'):
warnings.warn('The legacy version of forward function in'
'SpatialReductionAttention is deprecated in'
'mmcv>=1.3.17 and will no longer support in the'
'future. Please upgrade your mmcv.')
self.forward = self.legacy_forward
def forward(self, x, hw_shape, identity=None):
x_q = x
if self.sr_ratio > 1:
x_kv = nlc_to_nchw(x, hw_shape)
x_kv = self.sr(x_kv)
x_kv = nchw_to_nlc(x_kv)
x_kv = self.norm(x_kv)
else:
x_kv = x
if identity is None:
identity = x_q
# Because the dataflow('key', 'query', 'value') of
# ``torch.nn.MultiheadAttention`` is (num_query, batch,
# embed_dims), We should adjust the shape of dataflow from
# batch_first (batch, num_query, embed_dims) to num_query_first
# (num_query ,batch, embed_dims), and recover ``attn_output``
# from num_query_first to batch_first.
if self.batch_first:
x_q = x_q.transpose(0, 1)
x_kv = x_kv.transpose(0, 1)
out = self.attn(query=x_q, key=x_kv, value=x_kv)[0]
if self.batch_first:
out = out.transpose(0, 1)
return identity + self.dropout_layer(self.proj_drop(out))
def legacy_forward(self, x, hw_shape, identity=None):
"""multi head attention forward in mmcv version < 1.3.17."""
x_q = x
if self.sr_ratio > 1:
x_kv = nlc_to_nchw(x, hw_shape)
x_kv = self.sr(x_kv)
x_kv = nchw_to_nlc(x_kv)
x_kv = self.norm(x_kv)
else:
x_kv = x
if identity is None:
identity = x_q
out = self.attn(query=x_q, key=x_kv, value=x_kv)[0]
return identity + self.dropout_layer(self.proj_drop(out))
class PVTEncoderLayer(BaseModule):
"""Implements one encoder layer in PVT.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
drop_rate (float): Probability of an element to be zeroed.
after the feed forward layer. Default: 0.0.
attn_drop_rate (float): The drop out rate for attention layer.
Default: 0.0.
drop_path_rate (float): stochastic depth rate. Default: 0.0.
qkv_bias (bool): enable bias for qkv if True.
Default: True.
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
sr_ratio (int): The ratio of spatial reduction of Spatial Reduction
Attention of PVT. Default: 1.
use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN.
Default: False.
init_cfg (dict, optional): Initialization config dict.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
qkv_bias=True,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
sr_ratio=1,
use_conv_ffn=False,
init_cfg=None):
super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg)
# The ret[0] of build_norm_layer is norm name.
self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]
self.attn = SpatialReductionAttention(
embed_dims=embed_dims,
num_heads=num_heads,
attn_drop=attn_drop_rate,
proj_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
qkv_bias=qkv_bias,
norm_cfg=norm_cfg,
sr_ratio=sr_ratio)
# The ret[0] of build_norm_layer is norm name.
self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1]
self.ffn = MixFFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
use_conv=use_conv_ffn,
act_cfg=act_cfg)
def forward(self, x, hw_shape):
x = self.attn(self.norm1(x), hw_shape, identity=x)
x = self.ffn(self.norm2(x), hw_shape, identity=x)
return x
class AbsolutePositionEmbedding(BaseModule):
"""An implementation of the absolute position embedding in PVT.
Args:
pos_shape (int): The shape of the absolute position embedding.
pos_dim (int): The dimension of the absolute position embedding.
drop_rate (float): Probability of an element to be zeroed.
Default: 0.0.
"""
def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None):
super().__init__(init_cfg=init_cfg)
if isinstance(pos_shape, int):
pos_shape = to_2tuple(pos_shape)
elif isinstance(pos_shape, tuple):
if len(pos_shape) == 1:
pos_shape = to_2tuple(pos_shape[0])
assert len(pos_shape) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(pos_shape)}'
self.pos_shape = pos_shape
self.pos_dim = pos_dim
self.pos_embed = nn.Parameter(
torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim))
self.drop = nn.Dropout(p=drop_rate)
def init_weights(self):
trunc_normal_(self.pos_embed, std=0.02)
def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'):
"""Resize pos_embed weights.
Resize pos_embed using bilinear interpolate method.
Args:
pos_embed (torch.Tensor): Position embedding weights.
input_shape (tuple): Tuple for (downsampled input image height,
downsampled input image width).
mode (str): Algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'``. Default: ``'bilinear'``.
Return:
torch.Tensor: The resized pos_embed of shape [B, L_new, C].
"""
assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'
pos_h, pos_w = self.pos_shape
pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]
pos_embed_weight = pos_embed_weight.reshape(
1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous()
pos_embed_weight = F.interpolate(
pos_embed_weight, size=input_shape, mode=mode)
pos_embed_weight = torch.flatten(pos_embed_weight,
2).transpose(1, 2).contiguous()
pos_embed = pos_embed_weight
return pos_embed
def forward(self, x, hw_shape, mode='bilinear'):
pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode)
return self.drop(x + pos_embed)
@BACKBONES.register_module()
class PyramidVisionTransformer(BaseModule):
"""Pyramid Vision Transformer (PVT)
Implementation of `Pyramid Vision Transformer: A Versatile Backbone for
Dense Prediction without Convolutions
<https://arxiv.org/pdf/2102.12122.pdf>`_.
Args:
pretrain_img_size (int | tuple[int]): The size of input image when
pretrain. Defaults: 224.
in_channels (int): Number of input channels. Default: 3.
embed_dims (int): Embedding dimension. Default: 64.
num_stags (int): The num of stages. Default: 4.
num_layers (Sequence[int]): The layer number of each transformer encode
layer. Default: [3, 4, 6, 3].
num_heads (Sequence[int]): The attention heads of each transformer
encode layer. Default: [1, 2, 5, 8].
patch_sizes (Sequence[int]): The patch_size of each patch embedding.
Default: [4, 2, 2, 2].
strides (Sequence[int]): The stride of each patch embedding.
Default: [4, 2, 2, 2].
paddings (Sequence[int]): The padding of each patch embedding.
Default: [0, 0, 0, 0].
sr_ratios (Sequence[int]): The spatial reduction rate of each
transformer encode layer. Default: [8, 4, 2, 1].
out_indices (Sequence[int] | int): Output from which stages.
Default: (0, 1, 2, 3).
mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the
embedding dim of each transformer encode layer.
Default: [8, 8, 4, 4].
qkv_bias (bool): Enable bias for qkv if True. Default: True.
drop_rate (float): Probability of an element to be zeroed.
Default 0.0.
attn_drop_rate (float): The drop out rate for attention layer.
Default 0.0.
drop_path_rate (float): stochastic depth rate. Default 0.1.
use_abs_pos_embed (bool): If True, add absolute position embedding to
the patch embedding. Defaults: True.
use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN.
Default: False.
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
pretrained (str, optional): model pretrained path. Default: None.
convert_weights (bool): The flag indicates whether the
pre-trained model is from the original repo. We may need
to convert some keys to make it compatible.
Default: True.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
pretrain_img_size=224,
in_channels=3,
embed_dims=64,
num_stages=4,
num_layers=[3, 4, 6, 3],
num_heads=[1, 2, 5, 8],
patch_sizes=[4, 2, 2, 2],
strides=[4, 2, 2, 2],
paddings=[0, 0, 0, 0],
sr_ratios=[8, 4, 2, 1],
out_indices=(0, 1, 2, 3),
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
use_abs_pos_embed=True,
norm_after_stage=False,
use_conv_ffn=False,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN', eps=1e-6),
pretrained=None,
convert_weights=True,
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.convert_weights = convert_weights
if isinstance(pretrain_img_size, int):
pretrain_img_size = to_2tuple(pretrain_img_size)
elif isinstance(pretrain_img_size, tuple):
if len(pretrain_img_size) == 1:
pretrain_img_size = to_2tuple(pretrain_img_size[0])
assert len(pretrain_img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(pretrain_img_size)}'
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be setting at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = init_cfg
else:
raise TypeError('pretrained must be a str or None')
self.embed_dims = embed_dims
self.num_stages = num_stages
self.num_layers = num_layers
self.num_heads = num_heads
self.patch_sizes = patch_sizes
self.strides = strides
self.sr_ratios = sr_ratios
assert num_stages == len(num_layers) == len(num_heads) \
== len(patch_sizes) == len(strides) == len(sr_ratios)
self.out_indices = out_indices
assert max(out_indices) < self.num_stages
self.pretrained = pretrained
# transformer encoder
dpr = [
x.item()
for x in torch.linspace(0, drop_path_rate, sum(num_layers))
] # stochastic num_layer decay rule
cur = 0
self.layers = ModuleList()
for i, num_layer in enumerate(num_layers):
embed_dims_i = embed_dims * num_heads[i]
patch_embed = PatchEmbed(
in_channels=in_channels,
embed_dims=embed_dims_i,
kernel_size=patch_sizes[i],
stride=strides[i],
padding=paddings[i],
bias=True,
norm_cfg=norm_cfg)
layers = ModuleList()
if use_abs_pos_embed:
pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1])
pos_embed = AbsolutePositionEmbedding(
pos_shape=pos_shape,
pos_dim=embed_dims_i,
drop_rate=drop_rate)
layers.append(pos_embed)
layers.extend([
PVTEncoderLayer(
embed_dims=embed_dims_i,
num_heads=num_heads[i],
feedforward_channels=mlp_ratios[i] * embed_dims_i,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=dpr[cur + idx],
qkv_bias=qkv_bias,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
sr_ratio=sr_ratios[i],
use_conv_ffn=use_conv_ffn) for idx in range(num_layer)
])
in_channels = embed_dims_i
# The ret[0] of build_norm_layer is norm name.
if norm_after_stage:
norm = build_norm_layer(norm_cfg, embed_dims_i)[1]
else:
norm = nn.Identity()
self.layers.append(ModuleList([patch_embed, layers, norm]))
cur += num_layer
def init_weights(self):
logger = get_root_logger()
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
f'training start from scratch')
for m in self.modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=.02, bias=0.)
elif isinstance(m, nn.LayerNorm):
constant_init(m, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[
1] * m.out_channels
fan_out //= m.groups
normal_init(m, 0, math.sqrt(2.0 / fan_out))
elif isinstance(m, AbsolutePositionEmbedding):
m.init_weights()
else:
assert 'checkpoint' in self.init_cfg, f'Only support ' \
f'specify `Pretrained` in ' \
f'`init_cfg` in ' \
f'{self.__class__.__name__} '
checkpoint = _load_checkpoint(
self.init_cfg.checkpoint, logger=logger, map_location='cpu')
logger.warn(f'Load pre-trained model for '
f'{self.__class__.__name__} from original repo')
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
if self.convert_weights:
# Because pvt backbones are not supported by mmcls,
# so we need to convert pre-trained weights to match this
# implementation.
state_dict = pvt_convert(state_dict)
load_state_dict(self, state_dict, strict=False, logger=logger)
def forward(self, x):
outs = []
for i, layer in enumerate(self.layers):
x, hw_shape = layer[0](x)
for block in layer[1]:
x = block(x, hw_shape)
x = layer[2](x)
x = nlc_to_nchw(x, hw_shape)
if i in self.out_indices:
outs.append(x)
return outs
@BACKBONES.register_module()
class PyramidVisionTransformerV2(PyramidVisionTransformer):
"""Implementation of `PVTv2: Improved Baselines with Pyramid Vision
Transformer <https://arxiv.org/pdf/2106.13797.pdf>`_."""
def __init__(self, **kwargs):
super(PyramidVisionTransformerV2, self).__init__(
patch_sizes=[7, 3, 3, 3],
paddings=[3, 1, 1, 1],
use_abs_pos_embed=False,
norm_after_stage=True,
use_conv_ffn=True,
**kwargs)
| 23,217 | 38.219595 | 89 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/hrnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import BaseModule, ModuleList, Sequential
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from .resnet import BasicBlock, Bottleneck
class HRModule(BaseModule):
"""High-Resolution Module for HRNet.
In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
is in this module.
"""
def __init__(self,
num_branches,
blocks,
num_blocks,
in_channels,
num_channels,
multiscale_output=True,
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
block_init_cfg=None,
init_cfg=None):
super(HRModule, self).__init__(init_cfg)
self.block_init_cfg = block_init_cfg
self._check_branches(num_branches, num_blocks, in_channels,
num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks,
num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels,
num_channels):
if num_branches != len(num_blocks):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_BLOCKS({len(num_blocks)})'
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_CHANNELS({len(num_channels)})'
raise ValueError(error_msg)
if num_branches != len(in_channels):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_INCHANNELS({len(in_channels)})'
raise ValueError(error_msg)
def _make_one_branch(self,
branch_index,
block,
num_blocks,
num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.in_channels[branch_index] != \
num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
self.conv_cfg,
self.in_channels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(self.norm_cfg, num_channels[branch_index] *
block.expansion)[1])
layers = []
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=self.block_init_cfg))
self.in_channels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=self.block_init_cfg))
return Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = num_branches if self.multiscale_output else 1
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[i],
kernel_size=1,
stride=1,
padding=0,
bias=False),
build_norm_layer(self.norm_cfg, in_channels[i])[1],
nn.Upsample(
scale_factor=2**(j - i), mode='nearest')))
elif j == i:
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range(i - j):
if k == i - j - 1:
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[i],
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
in_channels[i])[1]))
else:
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[j],
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
in_channels[j])[1],
nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
"""Forward function."""
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if i == j:
y += x[j]
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
@BACKBONES.register_module()
class HRNet(BaseModule):
"""HRNet backbone.
`High-Resolution Representations for Labeling Pixels and Regions
arXiv: <https://arxiv.org/abs/1904.04514>`_.
Args:
extra (dict): Detailed configuration for each stage of HRNet.
There must be 4 stages, the configuration for each stage must have
5 keys:
- num_modules(int): The number of HRModule in this stage.
- num_branches(int): The number of branches in the HRModule.
- block(str): The type of convolution block.
- num_blocks(tuple): The number of blocks in each branch.
The length must be equal to num_branches.
- num_channels(tuple): The number of channels in each branch.
The length must be equal to num_branches.
in_channels (int): Number of input image channels. Default: 3.
conv_cfg (dict): Dictionary to construct and config conv layer.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: True.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: False.
multiscale_output (bool): Whether to output multi-level features
produced by multiple branches. If False, only the first level
feature will be output. Default: True.
pretrained (str, optional): Model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
Example:
>>> from mmdet.models import HRNet
>>> import torch
>>> extra = dict(
>>> stage1=dict(
>>> num_modules=1,
>>> num_branches=1,
>>> block='BOTTLENECK',
>>> num_blocks=(4, ),
>>> num_channels=(64, )),
>>> stage2=dict(
>>> num_modules=1,
>>> num_branches=2,
>>> block='BASIC',
>>> num_blocks=(4, 4),
>>> num_channels=(32, 64)),
>>> stage3=dict(
>>> num_modules=4,
>>> num_branches=3,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4),
>>> num_channels=(32, 64, 128)),
>>> stage4=dict(
>>> num_modules=3,
>>> num_branches=4,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4, 4),
>>> num_channels=(32, 64, 128, 256)))
>>> self = HRNet(extra, in_channels=1)
>>> self.eval()
>>> inputs = torch.rand(1, 1, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 32, 8, 8)
(1, 64, 4, 4)
(1, 128, 2, 2)
(1, 256, 1, 1)
"""
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
def __init__(self,
extra,
in_channels=3,
conv_cfg=None,
norm_cfg=dict(type='BN'),
norm_eval=True,
with_cp=False,
zero_init_residual=False,
multiscale_output=True,
pretrained=None,
init_cfg=None):
super(HRNet, self).__init__(init_cfg)
self.pretrained = pretrained
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
# Assert configurations of 4 stages are in extra
assert 'stage1' in extra and 'stage2' in extra \
and 'stage3' in extra and 'stage4' in extra
# Assert whether the length of `num_blocks` and `num_channels` are
# equal to `num_branches`
for i in range(4):
cfg = extra[f'stage{i + 1}']
assert len(cfg['num_blocks']) == cfg['num_branches'] and \
len(cfg['num_channels']) == cfg['num_branches']
self.extra = extra
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
# stem net
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
64,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
self.conv_cfg,
64,
64,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
# stage 1
self.stage1_cfg = self.extra['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = self.stage1_cfg['block']
num_blocks = self.stage1_cfg['num_blocks'][0]
block = self.blocks_dict[block_type]
stage1_out_channels = num_channels * block.expansion
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
# stage 2
self.stage2_cfg = self.extra['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = self.stage2_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition1 = self._make_transition_layer([stage1_out_channels],
num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
# stage 3
self.stage3_cfg = self.extra['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = self.stage3_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition2 = self._make_transition_layer(pre_stage_channels,
num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
# stage 4
self.stage4_cfg = self.extra['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = self.stage4_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition3 = self._make_transition_layer(pre_stage_channels,
num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multiscale_output=multiscale_output)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: the normalization layer named "norm2" """
return getattr(self, self.norm2_name)
def _make_transition_layer(self, num_channels_pre_layer,
num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
num_channels_pre_layer[i],
num_channels_cur_layer[i],
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
num_channels_cur_layer[i])[1],
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv_downsamples = []
for j in range(i + 1 - num_branches_pre):
in_channels = num_channels_pre_layer[-1]
out_channels = num_channels_cur_layer[i] \
if j == i - num_branches_pre else in_channels
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
out_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, out_channels)[1],
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv_downsamples))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
self.conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
layers = []
block_init_cfg = None
if self.pretrained is None and not hasattr(
self, 'init_cfg') and self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm3'))
layers.append(
block(
inplanes,
planes,
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=block_init_cfg,
))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=block_init_cfg))
return Sequential(*layers)
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block = self.blocks_dict[layer_config['block']]
hr_modules = []
block_init_cfg = None
if self.pretrained is None and not hasattr(
self, 'init_cfg') and self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm3'))
for i in range(num_modules):
# multi_scale_output is only used for the last module
if not multiscale_output and i == num_modules - 1:
reset_multiscale_output = False
else:
reset_multiscale_output = True
hr_modules.append(
HRModule(
num_branches,
block,
num_blocks,
in_channels,
num_channels,
reset_multiscale_output,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
block_init_cfg=block_init_cfg))
return Sequential(*hr_modules), in_channels
def forward(self, x):
"""Forward function."""
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['num_branches']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['num_branches']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['num_branches']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
return y_list
def train(self, mode=True):
"""Convert the model into training mode will keeping the normalization
layer freezed."""
super(HRNet, self).train(mode)
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
| 23,106 | 38.164407 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/regnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import numpy as np
import torch.nn as nn
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from .resnet import ResNet
from .resnext import Bottleneck
@BACKBONES.register_module()
class RegNet(ResNet):
"""RegNet backbone.
More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ .
Args:
arch (dict): The parameter of RegNets.
- w0 (int): initial width
- wa (float): slope of width
- wm (float): quantization parameter to quantize the width
- depth (int): depth of the backbone
- group_w (int): width of group
- bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.
strides (Sequence[int]): Strides of the first block of each stage.
base_channels (int): Base channels after stem layer.
in_channels (int): Number of input image channels. Default: 3.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import RegNet
>>> import torch
>>> self = RegNet(
arch=dict(
w0=88,
wa=26.31,
wm=2.25,
group_w=48,
depth=25,
bot_mul=1.0))
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 96, 8, 8)
(1, 192, 4, 4)
(1, 432, 2, 2)
(1, 1008, 1, 1)
"""
arch_settings = {
'regnetx_400mf':
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),
'regnetx_800mf':
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0),
'regnetx_1.6gf':
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0),
'regnetx_3.2gf':
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0),
'regnetx_4.0gf':
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0),
'regnetx_6.4gf':
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0),
'regnetx_8.0gf':
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0),
'regnetx_12gf':
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0),
}
def __init__(self,
arch,
in_channels=3,
stem_channels=32,
base_channels=32,
strides=(2, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=None,
stage_with_dcn=(False, False, False, False),
plugins=None,
with_cp=False,
zero_init_residual=True,
pretrained=None,
init_cfg=None):
super(ResNet, self).__init__(init_cfg)
# Generate RegNet parameters first
if isinstance(arch, str):
assert arch in self.arch_settings, \
f'"arch": "{arch}" is not one of the' \
' arch_settings'
arch = self.arch_settings[arch]
elif not isinstance(arch, dict):
raise ValueError('Expect "arch" to be either a string '
f'or a dict, got {type(arch)}')
widths, num_stages = self.generate_regnet(
arch['w0'],
arch['wa'],
arch['wm'],
arch['depth'],
)
# Convert to per stage format
stage_widths, stage_blocks = self.get_stages_from_blocks(widths)
# Generate group widths and bot muls
group_widths = [arch['group_w'] for _ in range(num_stages)]
self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)]
# Adjust the compatibility of stage_widths and group_widths
stage_widths, group_widths = self.adjust_width_group(
stage_widths, self.bottleneck_ratio, group_widths)
# Group params by stage
self.stage_widths = stage_widths
self.group_widths = group_widths
self.depth = sum(stage_blocks)
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.plugins = plugins
self.zero_init_residual = zero_init_residual
self.block = Bottleneck
expansion_bak = self.block.expansion
self.block.expansion = 1
self.stage_blocks = stage_blocks[:num_stages]
self._make_stem_layer(in_channels, stem_channels)
block_init_cfg = None
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
if self.zero_init_residual:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.inplanes = stem_channels
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
group_width = self.group_widths[i]
width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i]))
stage_groups = width // group_width
dcn = self.dcn if self.stage_with_dcn[i] else None
if self.plugins is not None:
stage_plugins = self.make_stage_plugins(self.plugins, i)
else:
stage_plugins = None
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=self.stage_widths[i],
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
plugins=stage_plugins,
groups=stage_groups,
base_width=group_width,
base_channels=self.stage_widths[i],
init_cfg=block_init_cfg)
self.inplanes = self.stage_widths[i]
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = stage_widths[-1]
self.block.expansion = expansion_bak
def _make_stem_layer(self, in_channels, base_channels):
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
base_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, base_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
def generate_regnet(self,
initial_width,
width_slope,
width_parameter,
depth,
divisor=8):
"""Generates per block width from RegNet parameters.
Args:
initial_width ([int]): Initial width of the backbone
width_slope ([float]): Slope of the quantized linear function
width_parameter ([int]): Parameter used to quantize the width.
depth ([int]): Depth of the backbone.
divisor (int, optional): The divisor of channels. Defaults to 8.
Returns:
list, int: return a list of widths of each stage and the number \
of stages
"""
assert width_slope >= 0
assert initial_width > 0
assert width_parameter > 1
assert initial_width % divisor == 0
widths_cont = np.arange(depth) * width_slope + initial_width
ks = np.round(
np.log(widths_cont / initial_width) / np.log(width_parameter))
widths = initial_width * np.power(width_parameter, ks)
widths = np.round(np.divide(widths, divisor)) * divisor
num_stages = len(np.unique(widths))
widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist()
return widths, num_stages
@staticmethod
def quantize_float(number, divisor):
"""Converts a float to closest non-zero int divisible by divisor.
Args:
number (int): Original number to be quantized.
divisor (int): Divisor used to quantize the number.
Returns:
int: quantized number that is divisible by devisor.
"""
return int(round(number / divisor) * divisor)
def adjust_width_group(self, widths, bottleneck_ratio, groups):
"""Adjusts the compatibility of widths and groups.
Args:
widths (list[int]): Width of each stage.
bottleneck_ratio (float): Bottleneck ratio.
groups (int): number of groups in each stage
Returns:
tuple(list): The adjusted widths and groups of each stage.
"""
bottleneck_width = [
int(w * b) for w, b in zip(widths, bottleneck_ratio)
]
groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)]
bottleneck_width = [
self.quantize_float(w_bot, g)
for w_bot, g in zip(bottleneck_width, groups)
]
widths = [
int(w_bot / b)
for w_bot, b in zip(bottleneck_width, bottleneck_ratio)
]
return widths, groups
def get_stages_from_blocks(self, widths):
"""Gets widths/stage_blocks of network at each stage.
Args:
widths (list[int]): Width in each stage.
Returns:
tuple(list): width and depth of each stage
"""
width_diff = [
width != width_prev
for width, width_prev in zip(widths + [0], [0] + widths)
]
stage_widths = [
width for width, diff in zip(widths, width_diff[:-1]) if diff
]
stage_blocks = np.diff([
depth for depth, diff in zip(range(len(width_diff)), width_diff)
if diff
]).tolist()
return stage_widths, stage_blocks
def forward(self, x):
"""Forward function."""
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 13,605 | 37.112045 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/mobilenet_v2.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from ..utils import InvertedResidual, make_divisible
@BACKBONES.register_module()
class MobileNetV2(BaseModule):
"""MobileNetV2 backbone.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (Sequence[int], optional): Output from which stages.
Default: (1, 2, 4, 7).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(1, 2, 4, 7),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
norm_eval=False,
with_cp=False,
pretrained=None,
init_cfg=None):
super(MobileNetV2, self).__init__(init_cfg)
self.pretrained = pretrained
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
self.widen_factor = widen_factor
self.out_indices = out_indices
if not set(out_indices).issubset(set(range(0, 8))):
raise ValueError('out_indices must be a subset of range'
f'(0, 8). But received {out_indices}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
mid_channels=int(round(self.in_channels * expand_ratio)),
stride=stride,
with_expand_conv=expand_ratio != 1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def forward(self, x):
"""Forward function."""
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
frozen."""
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
| 7,599 | 37.383838 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/swin.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import build_norm_layer, constant_init, trunc_normal_init
from mmcv.cnn.bricks.transformer import FFN, build_dropout
from mmcv.cnn.utils.weight_init import trunc_normal_
from mmcv.runner import BaseModule, ModuleList, _load_checkpoint
from mmcv.utils import to_2tuple
from ...utils import get_root_logger
from ..builder import BACKBONES
from ..utils.ckpt_convert import swin_converter
from ..utils.transformer import PatchEmbed, PatchMerging
class WindowMSA(BaseModule):
"""Window based multi-head self-attention (W-MSA) module with relative
position bias.
Args:
embed_dims (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (tuple[int]): The height and width of the window.
qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.
Default: True.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
attn_drop_rate (float, optional): Dropout ratio of attention weight.
Default: 0.0
proj_drop_rate (float, optional): Dropout ratio of output. Default: 0.
init_cfg (dict | None, optional): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
window_size,
qkv_bias=True,
qk_scale=None,
attn_drop_rate=0.,
proj_drop_rate=0.,
init_cfg=None):
super().__init__()
self.embed_dims = embed_dims
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_embed_dims = embed_dims // num_heads
self.scale = qk_scale or head_embed_dims**-0.5
self.init_cfg = init_cfg
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),
num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# About 2x faster than original impl
Wh, Ww = self.window_size
rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)
rel_position_index = rel_index_coords + rel_index_coords.T
rel_position_index = rel_position_index.flip(1).contiguous()
self.register_buffer('relative_position_index', rel_position_index)
self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop_rate)
self.proj = nn.Linear(embed_dims, embed_dims)
self.proj_drop = nn.Dropout(proj_drop_rate)
self.softmax = nn.Softmax(dim=-1)
def init_weights(self):
trunc_normal_(self.relative_position_bias_table, std=0.02)
def forward(self, x, mask=None):
"""
Args:
x (tensor): input features with shape of (num_windows*B, N, C)
mask (tensor | None, Optional): mask with shape of (num_windows,
Wh*Ww, Wh*Ww), value should be between (-inf, 0].
"""
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)
# make torchscript happy (cannot use tensor as tuple)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1],
-1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B // nW, nW, self.num_heads, N,
N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@staticmethod
def double_step_seq(step1, len1, step2, len2):
seq1 = torch.arange(0, step1 * len1, step1)
seq2 = torch.arange(0, step2 * len2, step2)
return (seq1[:, None] + seq2[None, :]).reshape(1, -1)
class ShiftWindowMSA(BaseModule):
"""Shifted Window Multihead Self-Attention Module.
Args:
embed_dims (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): The height and width of the window.
shift_size (int, optional): The shift step of each window towards
right-bottom. If zero, act as regular window-msa. Defaults to 0.
qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.
Default: True
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Defaults: None.
attn_drop_rate (float, optional): Dropout ratio of attention weight.
Defaults: 0.
proj_drop_rate (float, optional): Dropout ratio of output.
Defaults: 0.
dropout_layer (dict, optional): The dropout_layer used before output.
Defaults: dict(type='DropPath', drop_prob=0.).
init_cfg (dict, optional): The extra config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
window_size,
shift_size=0,
qkv_bias=True,
qk_scale=None,
attn_drop_rate=0,
proj_drop_rate=0,
dropout_layer=dict(type='DropPath', drop_prob=0.),
init_cfg=None):
super().__init__(init_cfg)
self.window_size = window_size
self.shift_size = shift_size
assert 0 <= self.shift_size < self.window_size
self.w_msa = WindowMSA(
embed_dims=embed_dims,
num_heads=num_heads,
window_size=to_2tuple(window_size),
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop_rate=attn_drop_rate,
proj_drop_rate=proj_drop_rate,
init_cfg=None)
self.drop = build_dropout(dropout_layer)
def forward(self, query, hw_shape):
B, L, C = query.shape
H, W = hw_shape
assert L == H * W, 'input feature has wrong size'
query = query.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b))
H_pad, W_pad = query.shape[1], query.shape[2]
# cyclic shift
if self.shift_size > 0:
shifted_query = torch.roll(
query,
shifts=(-self.shift_size, -self.shift_size),
dims=(1, 2))
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device)
h_slices = (slice(0, -self.window_size),
slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
# nW, window_size, window_size, 1
mask_windows = self.window_partition(img_mask)
mask_windows = mask_windows.view(
-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0,
float(-100.0)).masked_fill(
attn_mask == 0, float(0.0))
else:
shifted_query = query
attn_mask = None
# nW*B, window_size, window_size, C
query_windows = self.window_partition(shifted_query)
# nW*B, window_size*window_size, C
query_windows = query_windows.view(-1, self.window_size**2, C)
# W-MSA/SW-MSA (nW*B, window_size*window_size, C)
attn_windows = self.w_msa(query_windows, mask=attn_mask)
# merge windows
attn_windows = attn_windows.view(-1, self.window_size,
self.window_size, C)
# B H' W' C
shifted_x = self.window_reverse(attn_windows, H_pad, W_pad)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(
shifted_x,
shifts=(self.shift_size, self.shift_size),
dims=(1, 2))
else:
x = shifted_x
if pad_r > 0 or pad_b:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
x = self.drop(x)
return x
def window_reverse(self, windows, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
window_size = self.window_size
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size,
window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
def window_partition(self, x):
"""
Args:
x: (B, H, W, C)
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
window_size = self.window_size
x = x.view(B, H // window_size, window_size, W // window_size,
window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous()
windows = windows.view(-1, window_size, window_size, C)
return windows
class SwinBlock(BaseModule):
""""
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
window_size (int, optional): The local window scale. Default: 7.
shift (bool, optional): whether to shift window or not. Default False.
qkv_bias (bool, optional): enable bias for qkv if True. Default: True.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
drop_rate (float, optional): Dropout rate. Default: 0.
attn_drop_rate (float, optional): Attention dropout rate. Default: 0.
drop_path_rate (float, optional): Stochastic depth rate. Default: 0.
act_cfg (dict, optional): The config dict of activation function.
Default: dict(type='GELU').
norm_cfg (dict, optional): The config dict of normalization.
Default: dict(type='LN').
with_cp (bool, optional): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
init_cfg (dict | list | None, optional): The init config.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
window_size=7,
shift=False,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
with_cp=False,
init_cfg=None):
super(SwinBlock, self).__init__()
self.init_cfg = init_cfg
self.with_cp = with_cp
self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]
self.attn = ShiftWindowMSA(
embed_dims=embed_dims,
num_heads=num_heads,
window_size=window_size,
shift_size=window_size // 2 if shift else 0,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop_rate=attn_drop_rate,
proj_drop_rate=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
init_cfg=None)
self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1]
self.ffn = FFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
num_fcs=2,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
act_cfg=act_cfg,
add_identity=True,
init_cfg=None)
def forward(self, x, hw_shape):
def _inner_forward(x):
identity = x
x = self.norm1(x)
x = self.attn(x, hw_shape)
x = x + identity
identity = x
x = self.norm2(x)
x = self.ffn(x, identity=identity)
return x
if self.with_cp and x.requires_grad:
x = cp.checkpoint(_inner_forward, x)
else:
x = _inner_forward(x)
return x
class SwinBlockSequence(BaseModule):
"""Implements one stage in Swin Transformer.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
depth (int): The number of blocks in this stage.
window_size (int, optional): The local window scale. Default: 7.
qkv_bias (bool, optional): enable bias for qkv if True. Default: True.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
drop_rate (float, optional): Dropout rate. Default: 0.
attn_drop_rate (float, optional): Attention dropout rate. Default: 0.
drop_path_rate (float | list[float], optional): Stochastic depth
rate. Default: 0.
downsample (BaseModule | None, optional): The downsample operation
module. Default: None.
act_cfg (dict, optional): The config dict of activation function.
Default: dict(type='GELU').
norm_cfg (dict, optional): The config dict of normalization.
Default: dict(type='LN').
with_cp (bool, optional): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
init_cfg (dict | list | None, optional): The init config.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
depth,
window_size=7,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
downsample=None,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
with_cp=False,
init_cfg=None):
super().__init__(init_cfg=init_cfg)
if isinstance(drop_path_rate, list):
drop_path_rates = drop_path_rate
assert len(drop_path_rates) == depth
else:
drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)]
self.blocks = ModuleList()
for i in range(depth):
block = SwinBlock(
embed_dims=embed_dims,
num_heads=num_heads,
feedforward_channels=feedforward_channels,
window_size=window_size,
shift=False if i % 2 == 0 else True,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=drop_path_rates[i],
act_cfg=act_cfg,
norm_cfg=norm_cfg,
with_cp=with_cp,
init_cfg=None)
self.blocks.append(block)
self.downsample = downsample
def forward(self, x, hw_shape):
for block in self.blocks:
x = block(x, hw_shape)
if self.downsample:
x_down, down_hw_shape = self.downsample(x, hw_shape)
return x_down, down_hw_shape, x, hw_shape
else:
return x, hw_shape, x, hw_shape
@BACKBONES.register_module()
class SwinTransformer(BaseModule):
""" Swin Transformer
A PyTorch implement of : `Swin Transformer:
Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/abs/2103.14030
Inspiration from
https://github.com/microsoft/Swin-Transformer
Args:
pretrain_img_size (int | tuple[int]): The size of input image when
pretrain. Defaults: 224.
in_channels (int): The num of input channels.
Defaults: 3.
embed_dims (int): The feature dimension. Default: 96.
patch_size (int | tuple[int]): Patch size. Default: 4.
window_size (int): Window size. Default: 7.
mlp_ratio (int): Ratio of mlp hidden dim to embedding dim.
Default: 4.
depths (tuple[int]): Depths of each Swin Transformer stage.
Default: (2, 2, 6, 2).
num_heads (tuple[int]): Parallel attention heads of each Swin
Transformer stage. Default: (3, 6, 12, 24).
strides (tuple[int]): The patch merging or patch embedding stride of
each Swin Transformer stage. (In swin, we set kernel size equal to
stride.) Default: (4, 2, 2, 2).
out_indices (tuple[int]): Output from which stages.
Default: (0, 1, 2, 3).
qkv_bias (bool, optional): If True, add a learnable bias to query, key,
value. Default: True
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
patch_norm (bool): If add a norm layer for patch embed and patch
merging. Default: True.
drop_rate (float): Dropout rate. Defaults: 0.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Defaults: 0.1.
use_abs_pos_embed (bool): If True, add absolute position embedding to
the patch embedding. Defaults: False.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LN').
norm_cfg (dict): Config dict for normalization layer at
output of backone. Defaults: dict(type='LN').
with_cp (bool, optional): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
pretrained (str, optional): model pretrained path. Default: None.
convert_weights (bool): The flag indicates whether the
pre-trained model is from the original repo. We may need
to convert some keys to make it compatible.
Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
Default: -1 (-1 means not freezing any parameters).
init_cfg (dict, optional): The Config for initialization.
Defaults to None.
"""
def __init__(self,
pretrain_img_size=224,
in_channels=3,
embed_dims=96,
patch_size=4,
window_size=7,
mlp_ratio=4,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
strides=(4, 2, 2, 2),
out_indices=(0, 1, 2, 3),
qkv_bias=True,
qk_scale=None,
patch_norm=True,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
use_abs_pos_embed=False,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
with_cp=False,
pretrained=None,
convert_weights=False,
frozen_stages=-1,
init_cfg=None):
self.convert_weights = convert_weights
self.frozen_stages = frozen_stages
if isinstance(pretrain_img_size, int):
pretrain_img_size = to_2tuple(pretrain_img_size)
elif isinstance(pretrain_img_size, tuple):
if len(pretrain_img_size) == 1:
pretrain_img_size = to_2tuple(pretrain_img_size[0])
assert len(pretrain_img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(pretrain_img_size)}'
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = init_cfg
else:
raise TypeError('pretrained must be a str or None')
super(SwinTransformer, self).__init__(init_cfg=init_cfg)
num_layers = len(depths)
self.out_indices = out_indices
self.use_abs_pos_embed = use_abs_pos_embed
assert strides[0] == patch_size, 'Use non-overlapping patch embed.'
self.patch_embed = PatchEmbed(
in_channels=in_channels,
embed_dims=embed_dims,
conv_type='Conv2d',
kernel_size=patch_size,
stride=strides[0],
norm_cfg=norm_cfg if patch_norm else None,
init_cfg=None)
if self.use_abs_pos_embed:
patch_row = pretrain_img_size[0] // patch_size
patch_col = pretrain_img_size[1] // patch_size
num_patches = patch_row * patch_col
self.absolute_pos_embed = nn.Parameter(
torch.zeros((1, num_patches, embed_dims)))
self.drop_after_pos = nn.Dropout(p=drop_rate)
# set stochastic depth decay rule
total_depth = sum(depths)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, total_depth)
]
self.stages = ModuleList()
in_channels = embed_dims
for i in range(num_layers):
if i < num_layers - 1:
downsample = PatchMerging(
in_channels=in_channels,
out_channels=2 * in_channels,
stride=strides[i + 1],
norm_cfg=norm_cfg if patch_norm else None,
init_cfg=None)
else:
downsample = None
stage = SwinBlockSequence(
embed_dims=in_channels,
num_heads=num_heads[i],
feedforward_channels=mlp_ratio * in_channels,
depth=depths[i],
window_size=window_size,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])],
downsample=downsample,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
with_cp=with_cp,
init_cfg=None)
self.stages.append(stage)
if downsample:
in_channels = downsample.out_channels
self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)]
# Add a norm layer for each output
for i in out_indices:
layer = build_norm_layer(norm_cfg, self.num_features[i])[1]
layer_name = f'norm{i}'
self.add_module(layer_name, layer)
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer, self).train(mode)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.use_abs_pos_embed:
self.absolute_pos_embed.requires_grad = False
self.drop_after_pos.eval()
for i in range(1, self.frozen_stages + 1):
if (i - 1) in self.out_indices:
norm_layer = getattr(self, f'norm{i-1}')
norm_layer.eval()
for param in norm_layer.parameters():
param.requires_grad = False
m = self.stages[i - 1]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
logger = get_root_logger()
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
f'training start from scratch')
if self.use_abs_pos_embed:
trunc_normal_(self.absolute_pos_embed, std=0.02)
for m in self.modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=.02, bias=0.)
elif isinstance(m, nn.LayerNorm):
constant_init(m, 1.0)
else:
assert 'checkpoint' in self.init_cfg, f'Only support ' \
f'specify `Pretrained` in ' \
f'`init_cfg` in ' \
f'{self.__class__.__name__} '
ckpt = _load_checkpoint(
self.init_cfg.checkpoint, logger=logger, map_location='cpu')
if 'state_dict' in ckpt:
_state_dict = ckpt['state_dict']
elif 'model' in ckpt:
_state_dict = ckpt['model']
else:
_state_dict = ckpt
if self.convert_weights:
# supported loading weight from original repo,
_state_dict = swin_converter(_state_dict)
state_dict = OrderedDict()
for k, v in _state_dict.items():
if k.startswith('backbone.'):
state_dict[k[9:]] = v
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# reshape absolute position embedding
if state_dict.get('absolute_pos_embed') is not None:
absolute_pos_embed = state_dict['absolute_pos_embed']
N1, L, C1 = absolute_pos_embed.size()
N2, C2, H, W = self.absolute_pos_embed.size()
if N1 != N2 or C1 != C2 or L != H * W:
logger.warning('Error in loading absolute_pos_embed, pass')
else:
state_dict['absolute_pos_embed'] = absolute_pos_embed.view(
N2, H, W, C2).permute(0, 3, 1, 2).contiguous()
# interpolate position bias table if needed
relative_position_bias_table_keys = [
k for k in state_dict.keys()
if 'relative_position_bias_table' in k
]
for table_key in relative_position_bias_table_keys:
table_pretrained = state_dict[table_key]
table_current = self.state_dict()[table_key]
L1, nH1 = table_pretrained.size()
L2, nH2 = table_current.size()
if nH1 != nH2:
logger.warning(f'Error in loading {table_key}, pass')
elif L1 != L2:
S1 = int(L1**0.5)
S2 = int(L2**0.5)
table_pretrained_resized = F.interpolate(
table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1),
size=(S2, S2),
mode='bicubic')
state_dict[table_key] = table_pretrained_resized.view(
nH2, L2).permute(1, 0).contiguous()
# load state_dict
self.load_state_dict(state_dict, False)
def forward(self, x):
x, hw_shape = self.patch_embed(x)
if self.use_abs_pos_embed:
x = x + self.absolute_pos_embed
x = self.drop_after_pos(x)
outs = []
for i, stage in enumerate(self.stages):
x, hw_shape, out, out_hw_shape = stage(x, hw_shape)
if i in self.out_indices:
norm_layer = getattr(self, f'norm{i}')
out = norm_layer(out)
out = out.view(-1, *out_hw_shape,
self.num_features[i]).permute(0, 3, 1,
2).contiguous()
outs.append(out)
return outs
| 30,138 | 38.448953 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/trident_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import BaseModule
from torch.nn.modules.utils import _pair
from mmdet.models.backbones.resnet import Bottleneck, ResNet
from mmdet.models.builder import BACKBONES
class TridentConv(BaseModule):
"""Trident Convolution Module.
Args:
in_channels (int): Number of channels in input.
out_channels (int): Number of channels in output.
kernel_size (int): Size of convolution kernel.
stride (int, optional): Convolution stride. Default: 1.
trident_dilations (tuple[int, int, int], optional): Dilations of
different trident branch. Default: (1, 2, 3).
test_branch_idx (int, optional): In inference, all 3 branches will
be used if `test_branch_idx==-1`, otherwise only branch with
index `test_branch_idx` will be used. Default: 1.
bias (bool, optional): Whether to use bias in convolution or not.
Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
trident_dilations=(1, 2, 3),
test_branch_idx=1,
bias=False,
init_cfg=None):
super(TridentConv, self).__init__(init_cfg)
self.num_branch = len(trident_dilations)
self.with_bias = bias
self.test_branch_idx = test_branch_idx
self.stride = _pair(stride)
self.kernel_size = _pair(kernel_size)
self.paddings = _pair(trident_dilations)
self.dilations = trident_dilations
self.in_channels = in_channels
self.out_channels = out_channels
self.bias = bias
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
def extra_repr(self):
tmpstr = f'in_channels={self.in_channels}'
tmpstr += f', out_channels={self.out_channels}'
tmpstr += f', kernel_size={self.kernel_size}'
tmpstr += f', num_branch={self.num_branch}'
tmpstr += f', test_branch_idx={self.test_branch_idx}'
tmpstr += f', stride={self.stride}'
tmpstr += f', paddings={self.paddings}'
tmpstr += f', dilations={self.dilations}'
tmpstr += f', bias={self.bias}'
return tmpstr
def forward(self, inputs):
if self.training or self.test_branch_idx == -1:
outputs = [
F.conv2d(input, self.weight, self.bias, self.stride, padding,
dilation) for input, dilation, padding in zip(
inputs, self.dilations, self.paddings)
]
else:
assert len(inputs) == 1
outputs = [
F.conv2d(inputs[0], self.weight, self.bias, self.stride,
self.paddings[self.test_branch_idx],
self.dilations[self.test_branch_idx])
]
return outputs
# Since TridentNet is defined over ResNet50 and ResNet101, here we
# only support TridentBottleneckBlock.
class TridentBottleneck(Bottleneck):
"""BottleBlock for TridentResNet.
Args:
trident_dilations (tuple[int, int, int]): Dilations of different
trident branch.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
concat_output (bool): Whether to concat the output list to a Tensor.
`True` only in the last Block.
"""
def __init__(self, trident_dilations, test_branch_idx, concat_output,
**kwargs):
super(TridentBottleneck, self).__init__(**kwargs)
self.trident_dilations = trident_dilations
self.num_branch = len(trident_dilations)
self.concat_output = concat_output
self.test_branch_idx = test_branch_idx
self.conv2 = TridentConv(
self.planes,
self.planes,
kernel_size=3,
stride=self.conv2_stride,
bias=False,
trident_dilations=self.trident_dilations,
test_branch_idx=test_branch_idx,
init_cfg=dict(
type='Kaiming',
distribution='uniform',
mode='fan_in',
override=dict(name='conv2')))
def forward(self, x):
def _inner_forward(x):
num_branch = (
self.num_branch
if self.training or self.test_branch_idx == -1 else 1)
identity = x
if not isinstance(x, list):
x = (x, ) * num_branch
identity = x
if self.downsample is not None:
identity = [self.downsample(b) for b in x]
out = [self.conv1(b) for b in x]
out = [self.norm1(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv1_plugin_names)
out = self.conv2(out)
out = [self.norm2(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv2_plugin_names)
out = [self.conv3(b) for b in out]
out = [self.norm3(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv3_plugin_names)
out = [
out_b + identity_b for out_b, identity_b in zip(out, identity)
]
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = [self.relu(b) for b in out]
if self.concat_output:
out = torch.cat(out, dim=0)
return out
def make_trident_res_layer(block,
inplanes,
planes,
num_blocks,
stride=1,
trident_dilations=(1, 2, 3),
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
test_branch_idx=-1):
"""Build Trident Res Layers."""
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
for i in range(num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride if i == 0 else 1,
trident_dilations=trident_dilations,
downsample=downsample if i == 0 else None,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=plugins,
test_branch_idx=test_branch_idx,
concat_output=True if i == num_blocks - 1 else False))
inplanes = planes * block.expansion
return nn.Sequential(*layers)
@BACKBONES.register_module()
class TridentResNet(ResNet):
"""The stem layer, stage 1 and stage 2 in Trident ResNet are identical to
ResNet, while in stage 3, Trident BottleBlock is utilized to replace the
normal BottleBlock to yield trident output. Different branch shares the
convolution weight but uses different dilations to achieve multi-scale
output.
/ stage3(b0) \
x - stem - stage1 - stage2 - stage3(b1) - output
\ stage3(b2) /
Args:
depth (int): Depth of resnet, from {50, 101, 152}.
num_branch (int): Number of branches in TridentNet.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
trident_dilations (tuple[int]): Dilations of different trident branch.
len(trident_dilations) should be equal to num_branch.
""" # noqa
def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,
**kwargs):
assert num_branch == len(trident_dilations)
assert depth in (50, 101, 152)
super(TridentResNet, self).__init__(depth, **kwargs)
assert self.num_stages == 3
self.test_branch_idx = test_branch_idx
self.num_branch = num_branch
last_stage_idx = self.num_stages - 1
stride = self.strides[last_stage_idx]
dilation = trident_dilations
dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None
if self.plugins is not None:
stage_plugins = self.make_stage_plugins(self.plugins,
last_stage_idx)
else:
stage_plugins = None
planes = self.base_channels * 2**last_stage_idx
res_layer = make_trident_res_layer(
TridentBottleneck,
inplanes=(self.block.expansion * self.base_channels *
2**(last_stage_idx - 1)),
planes=planes,
num_blocks=self.stage_blocks[last_stage_idx],
stride=stride,
trident_dilations=dilation,
style=self.style,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
plugins=stage_plugins,
test_branch_idx=self.test_branch_idx)
layer_name = f'layer{last_stage_idx + 1}'
self.__setattr__(layer_name, res_layer)
self.res_layers.pop(last_stage_idx)
self.res_layers.insert(last_stage_idx, layer_name)
self._freeze_stages()
| 11,129 | 36.22408 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/detectors_resnext.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from .detectors_resnet import Bottleneck as _Bottleneck
from .detectors_resnet import DetectoRS_ResNet
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
**kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_sac:
self.conv2 = build_conv_layer(
self.sac,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
elif not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
@BACKBONES.register_module()
class DetectoRS_ResNeXt(DetectoRS_ResNet):
"""ResNeXt backbone for DetectoRS.
Args:
groups (int): The number of groups in ResNeXt.
base_width (int): The base width of ResNeXt.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(DetectoRS_ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
return super().make_res_layer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
| 3,920 | 30.620968 | 77 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from ..utils import ResLayer
class BasicBlock(BaseModule):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_cfg=None):
super(BasicBlock, self).__init__(init_cfg)
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Bottleneck(BaseModule):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_cfg=None):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(init_cfg)
assert style in ['pytorch', 'caffe']
assert dcn is None or isinstance(dcn, dict)
assert plugins is None or isinstance(plugins, list)
if plugins is not None:
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
assert all(p['position'] in allowed_position for p in plugins)
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = dcn is not None
self.plugins = plugins
self.with_plugins = plugins is not None
if self.with_plugins:
# collect plugins for conv1/conv2/conv3
self.after_conv1_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv1'
]
self.after_conv2_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv2'
]
self.after_conv3_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv3'
]
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
dcn,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_plugins:
self.after_conv1_plugin_names = self.make_block_plugins(
planes, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(
planes, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins(
planes * self.expansion, self.after_conv3_plugins)
def make_block_plugins(self, in_channels, plugins):
"""make plugins for block.
Args:
in_channels (int): Input channels of plugin.
plugins (list[dict]): List of plugins cfg to build.
Returns:
list[str]: List of the names of plugin.
"""
assert isinstance(plugins, list)
plugin_names = []
for plugin in plugins:
plugin = plugin.copy()
name, layer = build_plugin_layer(
plugin,
in_channels=in_channels,
postfix=plugin.pop('postfix', ''))
assert not hasattr(self, name), f'duplicate plugin {name}'
self.add_module(name, layer)
plugin_names.append(name)
return plugin_names
def forward_plugin(self, x, plugin_names):
out = x
for name in plugin_names:
out = getattr(self, name)(x)
return out
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
@property
def norm3(self):
"""nn.Module: normalization layer after the third convolution layer"""
return getattr(self, self.norm3_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNet(BaseModule):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
stem_channels (int | None): Number of stem channels. If not specified,
it will be the same as `base_channels`. Default: None.
base_channels (int): Number of base channels of res layer. Default: 64.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Resnet stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
stem_channels=None,
base_channels=64,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=None,
stage_with_dcn=(False, False, False, False),
plugins=None,
with_cp=False,
zero_init_residual=True,
pretrained=None,
init_cfg=None):
super(ResNet, self).__init__(init_cfg)
self.zero_init_residual = zero_init_residual
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
block_init_cfg = None
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
block = self.arch_settings[depth][0]
if self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant',
val=0,
override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant',
val=0,
override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.depth = depth
if stem_channels is None:
stem_channels = base_channels
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.plugins = plugins
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
if plugins is not None:
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
planes = base_channels * 2**i
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=planes,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=stage_plugins,
init_cfg=block_init_cfg)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * base_channels * 2**(
len(self.stage_blocks) - 1)
def make_stage_plugins(self, plugins, stage_idx):
"""Make plugins for ResNet ``stage_idx`` th stage.
Currently we support to insert ``context_block``,
``empirical_attention_block``, ``nonlocal_block`` into the backbone
like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
Bottleneck.
An example of plugins format could be:
Examples:
>>> plugins=[
... dict(cfg=dict(type='xxx', arg1='xxx'),
... stages=(False, True, True, True),
... position='after_conv2'),
... dict(cfg=dict(type='yyy'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='1'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='2'),
... stages=(True, True, True, True),
... position='after_conv3')
... ]
>>> self = ResNet(depth=18)
>>> stage_plugins = self.make_stage_plugins(plugins, 0)
>>> assert len(stage_plugins) == 3
Suppose ``stage_idx=0``, the structure of blocks in the stage would be:
.. code-block:: none
conv1-> conv2->conv3->yyy->zzz1->zzz2
Suppose 'stage_idx=1', the structure of blocks in the stage would be:
.. code-block:: none
conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
If stages is missing, the plugin would be applied to all stages.
Args:
plugins (list[dict]): List of plugins cfg to build. The postfix is
required if multiple same type plugins are inserted.
stage_idx (int): Index of stage to build
Returns:
list[dict]: Plugins for current stage
"""
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert stages is None or len(stages) == self.num_stages
# whether to insert plugin into current stage
if stages is None or stages[stage_idx]:
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``."""
return ResLayer(**kwargs)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
if self.deep_stem:
self.stem = nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels)[1],
nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
"""Forward function."""
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
freezed."""
super(ResNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
@BACKBONES.register_module()
class ResNetV1d(ResNet):
r"""ResNetV1d variant described in `Bag of Tricks
<https://arxiv.org/pdf/1812.01187.pdf>`_.
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
"""
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(
deep_stem=True, avg_down=True, **kwargs)
| 23,838 | 34.421991 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/detectors_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
kaiming_init)
from mmcv.runner import Sequential, load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
from .resnet import BasicBlock
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottleneck(_Bottleneck):
r"""Bottleneck for the ResNet backbone in `DetectoRS
<https://arxiv.org/pdf/2006.02334.pdf>`_.
This bottleneck allows the users to specify whether to use
SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).
Args:
inplanes (int): The number of input channels.
planes (int): The number of output channels before expansion.
rfp_inplanes (int, optional): The number of channels from RFP.
Default: None. If specified, an additional conv layer will be
added for ``rfp_feat``. Otherwise, the structure is the same as
base class.
sac (dict, optional): Dictionary to construct SAC. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
expansion = 4
def __init__(self,
inplanes,
planes,
rfp_inplanes=None,
sac=None,
init_cfg=None,
**kwargs):
super(Bottleneck, self).__init__(
inplanes, planes, init_cfg=init_cfg, **kwargs)
assert sac is None or isinstance(sac, dict)
self.sac = sac
self.with_sac = sac is not None
if self.with_sac:
self.conv2 = build_conv_layer(
self.sac,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False)
self.rfp_inplanes = rfp_inplanes
if self.rfp_inplanes:
self.rfp_conv = build_conv_layer(
None,
self.rfp_inplanes,
planes * self.expansion,
1,
stride=1,
bias=True)
if init_cfg is None:
self.init_cfg = dict(
type='Constant', val=0, override=dict(name='rfp_conv'))
def rfp_forward(self, x, rfp_feat):
"""The forward function that also takes the RFP features as input."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
if self.rfp_inplanes:
rfp_feat = self.rfp_conv(rfp_feat)
out = out + rfp_feat
out = self.relu(out)
return out
class ResLayer(Sequential):
"""ResLayer to build ResNet style backbone for RPF in detectoRS.
The difference between this module and base class is that we pass
``rfp_inplanes`` to the first block.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
downsample_first (bool): Downsample at the first block or last block.
False for Hourglass, True for ResNet. Default: True
rfp_inplanes (int, optional): The number of channels from RFP.
Default: None. If specified, an additional conv layer will be
added for ``rfp_feat``. Otherwise, the structure is the same as
base class.
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
downsample_first=True,
rfp_inplanes=None,
**kwargs):
self.block = block
assert downsample_first, f'downsample_first={downsample_first} is ' \
'not supported in DetectoRS'
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down and stride != 1:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
rfp_inplanes=rfp_inplanes,
**kwargs))
inplanes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
@BACKBONES.register_module()
class DetectoRS_ResNet(ResNet):
"""ResNet backbone for DetectoRS.
Args:
sac (dict, optional): Dictionary to construct SAC (Switchable Atrous
Convolution). Default: None.
stage_with_sac (list): Which stage to use sac. Default: (False, False,
False, False).
rfp_inplanes (int, optional): The number of channels from RFP.
Default: None. If specified, an additional conv layer will be
added for ``rfp_feat``. Otherwise, the structure is the same as
base class.
output_img (bool): If ``True``, the input image will be inserted into
the starting position of output. Default: False.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
sac=None,
stage_with_sac=(False, False, False, False),
rfp_inplanes=None,
output_img=False,
pretrained=None,
init_cfg=None,
**kwargs):
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
self.pretrained = pretrained
if init_cfg is not None:
assert isinstance(init_cfg, dict), \
f'init_cfg must be a dict, but got {type(init_cfg)}'
if 'type' in init_cfg:
assert init_cfg.get('type') == 'Pretrained', \
'Only can initialize module by loading a pretrained model'
else:
raise KeyError('`init_cfg` must contain the key "type"')
self.pretrained = init_cfg.get('checkpoint')
self.sac = sac
self.stage_with_sac = stage_with_sac
self.rfp_inplanes = rfp_inplanes
self.output_img = output_img
super(DetectoRS_ResNet, self).__init__(**kwargs)
self.inplanes = self.stem_channels
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
sac = self.sac if self.stage_with_sac[i] else None
if self.plugins is not None:
stage_plugins = self.make_stage_plugins(self.plugins, i)
else:
stage_plugins = None
planes = self.base_channels * 2**i
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=planes,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
sac=sac,
rfp_inplanes=rfp_inplanes if i > 0 else None,
plugins=stage_plugins)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
# In order to be properly initialized by RFP
def init_weights(self):
# Calling this method will cause parameter initialization exception
# super(DetectoRS_ResNet, self).init_weights()
if isinstance(self.pretrained, str):
logger = get_root_logger()
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottleneck) and hasattr(
m.conv2, 'conv_offset'):
constant_init(m.conv2.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer`` for DetectoRS."""
return ResLayer(**kwargs)
def forward(self, x):
"""Forward function."""
outs = list(super(DetectoRS_ResNet, self).forward(x))
if self.output_img:
outs.insert(0, x)
return tuple(outs)
def rfp_forward(self, x, rfp_feats):
"""Forward function for RFP."""
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
rfp_feat = rfp_feats[i] if i > 0 else None
for layer in res_layer:
x = layer.rfp_forward(x, rfp_feat)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 12,736 | 34.980226 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/ssd_vgg.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import VGG
from mmcv.runner import BaseModule
from ..builder import BACKBONES
from ..necks import ssd_neck
@BACKBONES.register_module()
class SSDVGG(VGG, BaseModule):
"""VGG Backbone network for single-shot-detection.
Args:
depth (int): Depth of vgg, from {11, 13, 16, 19}.
with_last_pool (bool): Whether to add a pooling layer at the last
of the model
ceil_mode (bool): When True, will use `ceil` instead of `floor`
to compute the output shape.
out_indices (Sequence[int]): Output from which stages.
out_feature_indices (Sequence[int]): Output from which feature map.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
input_size (int, optional): Deprecated argumment.
Width and height of input, from {300, 512}.
l2_norm_scale (float, optional) : Deprecated argumment.
L2 normalization layer init scale.
Example:
>>> self = SSDVGG(input_size=300, depth=11)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 300, 300)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 19, 19)
(1, 512, 10, 10)
(1, 256, 5, 5)
(1, 256, 3, 3)
(1, 256, 1, 1)
"""
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
pretrained=None,
init_cfg=None,
input_size=None,
l2_norm_scale=None):
# TODO: in_channels for mmcv.VGG
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if init_cfg is not None:
self.init_cfg = init_cfg
elif isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
dict(type='Normal', std=0.01, layer='Linear'),
]
else:
raise TypeError('pretrained must be a str or None')
if input_size is not None:
warnings.warn('DeprecationWarning: input_size is deprecated')
if l2_norm_scale is not None:
warnings.warn('DeprecationWarning: l2_norm_scale in VGG is '
'deprecated, it has been moved to SSDNeck.')
def init_weights(self, pretrained=None):
super(VGG, self).init_weights()
def forward(self, x):
"""Forward function."""
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py '
'is deprecated, please use L2Norm in '
'mmdet/models/necks/ssd_neck.py instead')
| 4,705 | 35.48062 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/resnext.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from ..utils import ResLayer
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
**kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
if self.with_plugins:
self._del_block_plugins(self.after_conv1_plugin_names +
self.after_conv2_plugin_names +
self.after_conv3_plugin_names)
self.after_conv1_plugin_names = self.make_block_plugins(
width, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(
width, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins(
self.planes * self.expansion, self.after_conv3_plugins)
def _del_block_plugins(self, plugin_names):
"""delete plugins for block if exist.
Args:
plugin_names (list[str]): List of plugins name to delete.
"""
assert isinstance(plugin_names, list)
for plugin_name in plugin_names:
del self._modules[plugin_name]
@BACKBONES.register_module()
class ResNeXt(ResNet):
"""ResNeXt backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Resnet stages. Default: 4.
groups (int): Group of resnext.
base_width (int): Base width of resnext.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``"""
return ResLayer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
| 5,712 | 35.858065 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/resnest.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import BaseModule
from ..builder import BACKBONES
from ..utils import ResLayer
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNetV1d
class RSoftmax(nn.Module):
"""Radix Softmax module in ``SplitAttentionConv2d``.
Args:
radix (int): Radix of input.
groups (int): Groups of input.
"""
def __init__(self, radix, groups):
super().__init__()
self.radix = radix
self.groups = groups
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
class SplitAttentionConv2d(BaseModule):
"""Split-Attention Conv2d in ResNeSt.
Args:
in_channels (int): Number of channels in the input feature map.
channels (int): Number of intermediate channels.
kernel_size (int | tuple[int]): Size of the convolution kernel.
stride (int | tuple[int]): Stride of the convolution.
padding (int | tuple[int]): Zero-padding added to both sides of
dilation (int | tuple[int]): Spacing between kernel elements.
groups (int): Number of blocked connections from input channels to
output channels.
groups (int): Same as nn.Conv2d.
radix (int): Radix of SpltAtConv2d. Default: 2
reduction_factor (int): Reduction factor of inter_channels. Default: 4.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer. Default: None.
dcn (dict): Config dict for DCN. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
radix=2,
reduction_factor=4,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
init_cfg=None):
super(SplitAttentionConv2d, self).__init__(init_cfg)
inter_channels = max(in_channels * radix // reduction_factor, 32)
self.radix = radix
self.groups = groups
self.channels = channels
self.with_dcn = dcn is not None
self.dcn = dcn
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_dcn and not fallback_on_stride:
assert conv_cfg is None, 'conv_cfg must be None for DCN'
conv_cfg = dcn
self.conv = build_conv_layer(
conv_cfg,
in_channels,
channels * radix,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups * radix,
bias=False)
# To be consistent with original implementation, starting from 0
self.norm0_name, norm0 = build_norm_layer(
norm_cfg, channels * radix, postfix=0)
self.add_module(self.norm0_name, norm0)
self.relu = nn.ReLU(inplace=True)
self.fc1 = build_conv_layer(
None, channels, inter_channels, 1, groups=self.groups)
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, inter_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.fc2 = build_conv_layer(
None, inter_channels, channels * radix, 1, groups=self.groups)
self.rsoftmax = RSoftmax(radix, groups)
@property
def norm0(self):
"""nn.Module: the normalization layer named "norm0" """
return getattr(self, self.norm0_name)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def forward(self, x):
x = self.conv(x)
x = self.norm0(x)
x = self.relu(x)
batch, rchannel = x.shape[:2]
batch = x.size(0)
if self.radix > 1:
splits = x.view(batch, self.radix, -1, *x.shape[2:])
gap = splits.sum(dim=1)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
gap = self.norm1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
if self.radix > 1:
attens = atten.view(batch, self.radix, -1, *atten.shape[2:])
out = torch.sum(attens * splits, dim=1)
else:
out = atten * x
return out.contiguous()
class Bottleneck(_Bottleneck):
"""Bottleneck block for ResNeSt.
Args:
inplane (int): Input planes of this block.
planes (int): Middle planes of this block.
groups (int): Groups of conv2.
base_width (int): Base of width in terms of base channels. Default: 4.
base_channels (int): Base of channels for calculating width.
Default: 64.
radix (int): Radix of SpltAtConv2d. Default: 2
reduction_factor (int): Reduction factor of inter_channels in
SplitAttentionConv2d. Default: 4.
avg_down_stride (bool): Whether to use average pool for stride in
Bottleneck. Default: True.
kwargs (dict): Key word arguments for base class.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
radix=2,
reduction_factor=4,
avg_down_stride=True,
**kwargs):
"""Bottleneck block for ResNeSt."""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.avg_down_stride = avg_down_stride and self.conv2_stride > 1
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
self.with_modulated_dcn = False
self.conv2 = SplitAttentionConv2d(
width,
width,
kernel_size=3,
stride=1 if self.avg_down_stride else self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
radix=radix,
reduction_factor=reduction_factor,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=self.dcn)
delattr(self, self.norm2_name)
if self.avg_down_stride:
self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
if self.avg_down_stride:
out = self.avd_layer(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNeSt(ResNetV1d):
"""ResNeSt backbone.
Args:
groups (int): Number of groups of Bottleneck. Default: 1
base_width (int): Base width of Bottleneck. Default: 4
radix (int): Radix of SplitAttentionConv2d. Default: 2
reduction_factor (int): Reduction factor of inter_channels in
SplitAttentionConv2d. Default: 4.
avg_down_stride (bool): Whether to use average pool for stride in
Bottleneck. Default: True.
kwargs (dict): Keyword arguments for ResNet.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3)),
200: (Bottleneck, (3, 24, 36, 3))
}
def __init__(self,
groups=1,
base_width=4,
radix=2,
reduction_factor=4,
avg_down_stride=True,
**kwargs):
self.groups = groups
self.base_width = base_width
self.radix = radix
self.reduction_factor = reduction_factor
self.avg_down_stride = avg_down_stride
super(ResNeSt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``."""
return ResLayer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
radix=self.radix,
reduction_factor=self.reduction_factor,
avg_down_stride=self.avg_down_stride,
**kwargs)
| 10,579 | 31.755418 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/csp_darknet.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
from ..utils import CSPLayer
class Focus(nn.Module):
"""Focus width and height information into channel space.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
kernel_size (int): The kernel size of the convolution. Default: 1
stride (int): The stride of the convolution. Default: 1
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish').
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish')):
super().__init__()
self.conv = ConvModule(
in_channels * 4,
out_channels,
kernel_size,
stride,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x):
# shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)
patch_top_left = x[..., ::2, ::2]
patch_top_right = x[..., ::2, 1::2]
patch_bot_left = x[..., 1::2, ::2]
patch_bot_right = x[..., 1::2, 1::2]
x = torch.cat(
(
patch_top_left,
patch_bot_left,
patch_top_right,
patch_bot_right,
),
dim=1,
)
return self.conv(x)
class SPPBottleneck(BaseModule):
"""Spatial pyramid pooling layer used in YOLOv3-SPP.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
kernel_sizes (tuple[int]): Sequential of kernel sizes of pooling
layers. Default: (5, 9, 13).
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish').
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
kernel_sizes=(5, 9, 13),
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
init_cfg=None):
super().__init__(init_cfg)
mid_channels = in_channels // 2
self.conv1 = ConvModule(
in_channels,
mid_channels,
1,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.poolings = nn.ModuleList([
nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)
for ks in kernel_sizes
])
conv2_channels = mid_channels * (len(kernel_sizes) + 1)
self.conv2 = ConvModule(
conv2_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x):
x = self.conv1(x)
x = torch.cat([x] + [pooling(x) for pooling in self.poolings], dim=1)
x = self.conv2(x)
return x
@BACKBONES.register_module()
class CSPDarknet(BaseModule):
"""CSP-Darknet backbone used in YOLOv5 and YOLOX.
Args:
arch (str): Architecture of CSP-Darknet, from {P5, P6}.
Default: P5.
deepen_factor (float): Depth multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
widen_factor (float): Width multiplier, multiply number of
blocks in CSP layer by this amount. Default: 1.0.
out_indices (Sequence[int]): Output from which stages.
Default: (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Default: -1.
use_depthwise (bool): Whether to use depthwise separable convolution.
Default: False.
arch_ovewrite(list): Overwrite default arch settings. Default: None.
spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP
layers. Default: (5, 9, 13).
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True).
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
Example:
>>> from mmdet.models import CSPDarknet
>>> import torch
>>> self = CSPDarknet(depth=53)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 416, 416)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
...
(1, 256, 52, 52)
(1, 512, 26, 26)
(1, 1024, 13, 13)
"""
# From left to right:
# in_channels, out_channels, num_blocks, add_identity, use_spp
arch_settings = {
'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False],
[256, 512, 9, True, False], [512, 1024, 3, False, True]],
'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False],
[256, 512, 9, True, False], [512, 768, 3, True, False],
[768, 1024, 3, False, True]]
}
def __init__(self,
arch='P5',
deepen_factor=1.0,
widen_factor=1.0,
out_indices=(2, 3, 4),
frozen_stages=-1,
use_depthwise=False,
arch_ovewrite=None,
spp_kernal_sizes=(5, 9, 13),
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
norm_eval=False,
init_cfg=dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')):
super().__init__(init_cfg)
arch_setting = self.arch_settings[arch]
if arch_ovewrite:
arch_setting = arch_ovewrite
assert set(out_indices).issubset(
i for i in range(len(arch_setting) + 1))
if frozen_stages not in range(-1, len(arch_setting) + 1):
raise ValueError('frozen_stages must be in range(-1, '
'len(arch_setting) + 1). But received '
f'{frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.use_depthwise = use_depthwise
self.norm_eval = norm_eval
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
self.stem = Focus(
3,
int(arch_setting[0][0] * widen_factor),
kernel_size=3,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.layers = ['stem']
for i, (in_channels, out_channels, num_blocks, add_identity,
use_spp) in enumerate(arch_setting):
in_channels = int(in_channels * widen_factor)
out_channels = int(out_channels * widen_factor)
num_blocks = max(round(num_blocks * deepen_factor), 1)
stage = []
conv_layer = conv(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(conv_layer)
if use_spp:
spp = SPPBottleneck(
out_channels,
out_channels,
kernel_sizes=spp_kernal_sizes,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(spp)
csp_layer = CSPLayer(
out_channels,
out_channels,
num_blocks=num_blocks,
add_identity=add_identity,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(csp_layer)
self.add_module(f'stage{i + 1}', nn.Sequential(*stage))
self.layers.append(f'stage{i + 1}')
def _freeze_stages(self):
if self.frozen_stages >= 0:
for i in range(self.frozen_stages + 1):
m = getattr(self, self.layers[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(CSPDarknet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
def forward(self, x):
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 10,543 | 35.996491 | 77 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/hourglass.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from ..builder import BACKBONES
from ..utils import ResLayer
from .resnet import BasicBlock
class HourglassModule(BaseModule):
"""Hourglass Module for HourglassNet backbone.
Generate module recursively and use BasicBlock as the base unit.
Args:
depth (int): Depth of current HourglassModule.
stage_channels (list[int]): Feature channels of sub-modules in current
and follow-up HourglassModule.
stage_blocks (list[int]): Number of sub-modules stacked in current and
follow-up HourglassModule.
norm_cfg (dict): Dictionary to construct and config norm layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
upsample_cfg (dict, optional): Config dict for interpolate layer.
Default: `dict(mode='nearest')`
"""
def __init__(self,
depth,
stage_channels,
stage_blocks,
norm_cfg=dict(type='BN', requires_grad=True),
init_cfg=None,
upsample_cfg=dict(mode='nearest')):
super(HourglassModule, self).__init__(init_cfg)
self.depth = depth
cur_block = stage_blocks[0]
next_block = stage_blocks[1]
cur_channel = stage_channels[0]
next_channel = stage_channels[1]
self.up1 = ResLayer(
BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg)
self.low1 = ResLayer(
BasicBlock,
cur_channel,
next_channel,
cur_block,
stride=2,
norm_cfg=norm_cfg)
if self.depth > 1:
self.low2 = HourglassModule(depth - 1, stage_channels[1:],
stage_blocks[1:])
else:
self.low2 = ResLayer(
BasicBlock,
next_channel,
next_channel,
next_block,
norm_cfg=norm_cfg)
self.low3 = ResLayer(
BasicBlock,
next_channel,
cur_channel,
cur_block,
norm_cfg=norm_cfg,
downsample_first=False)
self.up2 = F.interpolate
self.upsample_cfg = upsample_cfg
def forward(self, x):
"""Forward function."""
up1 = self.up1(x)
low1 = self.low1(x)
low2 = self.low2(low1)
low3 = self.low3(low2)
# Fixing `scale factor` (e.g. 2) is common for upsampling, but
# in some cases the spatial size is mismatched and error will arise.
if 'scale_factor' in self.upsample_cfg:
up2 = self.up2(low3, **self.upsample_cfg)
else:
shape = up1.shape[2:]
up2 = self.up2(low3, size=shape, **self.upsample_cfg)
return up1 + up2
@BACKBONES.register_module()
class HourglassNet(BaseModule):
"""HourglassNet backbone.
Stacked Hourglass Networks for Human Pose Estimation.
More details can be found in the `paper
<https://arxiv.org/abs/1603.06937>`_ .
Args:
downsample_times (int): Downsample times in a HourglassModule.
num_stacks (int): Number of HourglassModule modules stacked,
1 for Hourglass-52, 2 for Hourglass-104.
stage_channels (list[int]): Feature channel of each sub-module in a
HourglassModule.
stage_blocks (list[int]): Number of sub-modules stacked in a
HourglassModule.
feat_channel (int): Feature channel of conv after a HourglassModule.
norm_cfg (dict): Dictionary to construct and config norm layer.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import HourglassNet
>>> import torch
>>> self = HourglassNet()
>>> self.eval()
>>> inputs = torch.rand(1, 3, 511, 511)
>>> level_outputs = self.forward(inputs)
>>> for level_output in level_outputs:
... print(tuple(level_output.shape))
(1, 256, 128, 128)
(1, 256, 128, 128)
"""
def __init__(self,
downsample_times=5,
num_stacks=2,
stage_channels=(256, 256, 384, 384, 384, 512),
stage_blocks=(2, 2, 2, 2, 2, 4),
feat_channel=256,
norm_cfg=dict(type='BN', requires_grad=True),
pretrained=None,
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(HourglassNet, self).__init__(init_cfg)
self.num_stacks = num_stacks
assert self.num_stacks >= 1
assert len(stage_channels) == len(stage_blocks)
assert len(stage_channels) > downsample_times
cur_channel = stage_channels[0]
self.stem = nn.Sequential(
ConvModule(
3, cur_channel // 2, 7, padding=3, stride=2,
norm_cfg=norm_cfg),
ResLayer(
BasicBlock,
cur_channel // 2,
cur_channel,
1,
stride=2,
norm_cfg=norm_cfg))
self.hourglass_modules = nn.ModuleList([
HourglassModule(downsample_times, stage_channels, stage_blocks)
for _ in range(num_stacks)
])
self.inters = ResLayer(
BasicBlock,
cur_channel,
cur_channel,
num_stacks - 1,
norm_cfg=norm_cfg)
self.conv1x1s = nn.ModuleList([
ConvModule(
cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
for _ in range(num_stacks - 1)
])
self.out_convs = nn.ModuleList([
ConvModule(
cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg)
for _ in range(num_stacks)
])
self.remap_convs = nn.ModuleList([
ConvModule(
feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
for _ in range(num_stacks - 1)
])
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
"""Init module weights."""
# Training Centripetal Model needs to reset parameters for Conv2d
super(HourglassNet, self).init_weights()
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.reset_parameters()
def forward(self, x):
"""Forward function."""
inter_feat = self.stem(x)
out_feats = []
for ind in range(self.num_stacks):
single_hourglass = self.hourglass_modules[ind]
out_conv = self.out_convs[ind]
hourglass_feat = single_hourglass(inter_feat)
out_feat = out_conv(hourglass_feat)
out_feats.append(out_feat)
if ind < self.num_stacks - 1:
inter_feat = self.conv1x1s[ind](
inter_feat) + self.remap_convs[ind](
out_feat)
inter_feat = self.inters[ind](self.relu(inter_feat))
return out_feats
| 7,494 | 32.609865 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/res2net.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import Sequential
from ..builder import BACKBONES
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
scales=4,
base_width=26,
base_channels=64,
stage_type='normal',
**kwargs):
"""Bottle2neck block for Res2Net.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor(self.planes * (base_width / base_channels)))
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width * scales, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width * scales,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
if stage_type == 'stage' and self.conv2_stride != 1:
self.pool = nn.AvgPool2d(
kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(
self.conv_cfg,
width * scales,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, self.scales - 1):
if self.stage_type == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if self.stage_type == 'normal' or self.conv2_stride == 1:
out = torch.cat((out, spx[self.scales - 1]), 1)
elif self.stage_type == 'stage':
out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Res2Layer(Sequential):
"""Res2Layer to build Res2Net style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
scales=4,
base_width=26,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False),
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=1,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
stage_type='stage',
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
**kwargs))
super(Res2Layer, self).__init__(*layers)
@BACKBONES.register_module()
class Res2Net(ResNet):
"""Res2Net backbone.
Args:
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
depth (int): Depth of res2net, from {50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Res2net stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import Res2Net
>>> import torch
>>> self = Res2Net(depth=50, scales=4, base_width=26)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 256, 8, 8)
(1, 512, 4, 4)
(1, 1024, 2, 2)
(1, 2048, 1, 1)
"""
arch_settings = {
50: (Bottle2neck, (3, 4, 6, 3)),
101: (Bottle2neck, (3, 4, 23, 3)),
152: (Bottle2neck, (3, 8, 36, 3))
}
def __init__(self,
scales=4,
base_width=26,
style='pytorch',
deep_stem=True,
avg_down=True,
pretrained=None,
init_cfg=None,
**kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(
style='pytorch',
deep_stem=True,
avg_down=True,
pretrained=pretrained,
init_cfg=init_cfg,
**kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(
scales=self.scales,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
| 11,659 | 34.54878 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/backbones/darknet.py | # Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
import warnings
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
class ResBlock(BaseModule):
"""The basic residual block used in Darknet. Each ResBlock consists of two
ConvModules and the input is added to the final output. Each ConvModule is
composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer
has half of the number of the filters as much as the second convLayer. The
first convLayer has filter size of 1x1 and the second one has the filter
size of 3x3.
Args:
in_channels (int): The input channels. Must be even.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=None):
super(ResBlock, self).__init__(init_cfg)
assert in_channels % 2 == 0 # ensure the in_channels is even
half_in_channels = in_channels // 2
# shortcut
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)
self.conv2 = ConvModule(
half_in_channels, in_channels, 3, padding=1, **cfg)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
out = out + residual
return out
@BACKBONES.register_module()
class Darknet(BaseModule):
"""Darknet backbone.
Args:
depth (int): Depth of Darknet. Currently only support 53.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import Darknet
>>> import torch
>>> self = Darknet(depth=53)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 416, 416)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
...
(1, 256, 52, 52)
(1, 512, 26, 26)
(1, 1024, 13, 13)
"""
# Dict(depth: (layers, channels))
arch_settings = {
53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512),
(512, 1024)))
}
def __init__(self,
depth=53,
out_indices=(3, 4, 5),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
norm_eval=True,
pretrained=None,
init_cfg=None):
super(Darknet, self).__init__(init_cfg)
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for darknet')
self.depth = depth
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.layers, self.channels = self.arch_settings[depth]
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)
self.cr_blocks = ['conv1']
for i, n_layers in enumerate(self.layers):
layer_name = f'conv_res_block{i + 1}'
in_c, out_c = self.channels[i]
self.add_module(
layer_name,
self.make_conv_res_block(in_c, out_c, n_layers, **cfg))
self.cr_blocks.append(layer_name)
self.norm_eval = norm_eval
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
for i, layer_name in enumerate(self.cr_blocks):
cr_block = getattr(self, layer_name)
x = cr_block(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for i in range(self.frozen_stages):
m = getattr(self, self.cr_blocks[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(Darknet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
@staticmethod
def make_conv_res_block(in_channels,
out_channels,
res_repeat,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU',
negative_slope=0.1)):
"""In Darknet backbone, ConvLayer is usually followed by ResBlock. This
function will make that. The Conv layers always have 3x3 filters with
stride=2. The number of the filters in Conv layer is the same as the
out channels of the ResBlock.
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
res_repeat (int): The number of ResBlocks.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
"""
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
model = nn.Sequential()
model.add_module(
'conv',
ConvModule(
in_channels, out_channels, 3, stride=2, padding=1, **cfg))
for idx in range(res_repeat):
model.add_module('res{}'.format(idx),
ResBlock(out_channels, **cfg))
return model
| 8,233 | 37.476636 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/datasets/custom.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from torch.utils.data import Dataset
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .pipelines import Compose
@DATASETS.register_module()
class CustomDataset(Dataset):
"""Custom dataset for detection.
The annotation format is shown as follows. The `ann` field is optional for
testing.
.. code-block:: none
[
{
'filename': 'a.jpg',
'width': 1280,
'height': 720,
'ann': {
'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.
'labels': <np.ndarray> (n, ),
'bboxes_ignore': <np.ndarray> (k, 4), (optional field)
'labels_ignore': <np.ndarray> (k, 4) (optional field)
}
},
...
]
Args:
ann_file (str): Annotation file path.
pipeline (list[dict]): Processing pipeline.
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
data_root (str, optional): Data root for ``ann_file``,
``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.
test_mode (bool, optional): If set True, annotation will not be loaded.
filter_empty_gt (bool, optional): If set true, images without bounding
boxes of the dataset's classes will be filtered out. This option
only works when `test_mode=False`, i.e., we never filter images
during tests.
"""
CLASSES = None
def __init__(self,
ann_file,
pipeline,
classes=None,
data_root=None,
img_prefix='',
seg_prefix=None,
proposal_file=None,
test_mode=False,
filter_empty_gt=True,
file_client_args=dict(backend='disk')):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.CLASSES = self.get_classes(classes)
self.file_client = mmcv.FileClient(**file_client_args)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.ann_file):
self.ann_file = osp.join(self.data_root, self.ann_file)
if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if not (self.proposal_file is None
or osp.isabs(self.proposal_file)):
self.proposal_file = osp.join(self.data_root,
self.proposal_file)
# load annotations (and proposals)
if hasattr(self.file_client, 'get_local_path'):
with self.file_client.get_local_path(self.ann_file) as local_path:
self.data_infos = self.load_annotations(local_path)
else:
warnings.warn(
'The used MMCV version does not have get_local_path. '
f'We treat the {self.ann_file} as local paths and it '
'might cause errors if the path is not a local path. '
'Please use MMCV>= 1.3.16 if you meet errors.')
self.data_infos = self.load_annotations(self.ann_file)
if self.proposal_file is not None:
if hasattr(self.file_client, 'get_local_path'):
with self.file_client.get_local_path(
self.proposal_file) as local_path:
self.proposals = self.load_proposals(local_path)
else:
warnings.warn(
'The used MMCV version does not have get_local_path. '
f'We treat the {self.ann_file} as local paths and it '
'might cause errors if the path is not a local path. '
'Please use MMCV>= 1.3.16 if you meet errors.')
self.proposals = self.load_proposals(self.proposal_file)
else:
self.proposals = None
# filter images too small and containing no annotations
if not test_mode:
valid_inds = self._filter_imgs()
self.data_infos = [self.data_infos[i] for i in valid_inds]
if self.proposals is not None:
self.proposals = [self.proposals[i] for i in valid_inds]
# set group flag for the sampler
self._set_group_flag()
# processing pipeline
self.pipeline = Compose(pipeline)
def __len__(self):
"""Total number of samples of data."""
return len(self.data_infos)
def load_annotations(self, ann_file):
"""Load annotation from annotation file."""
return mmcv.load(ann_file)
def load_proposals(self, proposal_file):
"""Load proposal from proposal file."""
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.data_infos[idx]['ann']
def get_cat_ids(self, idx):
"""Get category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
"""Filter images too small."""
if self.filter_empty_gt:
warnings.warn(
'CustomDataset does not support filtering empty gt images.')
valid_inds = []
for i, img_info in enumerate(self.data_infos):
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.data_infos[i]
if img_info['width'] / img_info['height'] > 1:
self.flag[i] = 1
def _rand_another(self, idx):
"""Get another random index from the same group as the given index."""
pool = np.where(self.flag == self.flag[idx])[0]
return np.random.choice(pool)
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set \
True).
"""
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys \
introduced by pipeline.
"""
img_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys introduced by \
pipeline.
"""
img_info = self.data_infos[idx]
results = dict(img_info=img_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
@classmethod
def get_classes(cls, classes=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
Returns:
tuple[str] or list[str]: Names of categories of the dataset.
"""
if classes is None:
return cls.CLASSES
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
Default: None.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=scale_ranges,
iou_thr=iou_thr,
dataset=self.CLASSES,
logger=logger)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for i, num in enumerate(proposal_nums):
for j, iou in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
def __repr__(self):
"""Print the number of instance number."""
dataset_type = 'Test' if self.test_mode else 'Train'
result = (f'\n{self.__class__.__name__} {dataset_type} dataset '
f'with number of images {len(self)}, '
f'and instance counts: \n')
if self.CLASSES is None:
result += 'Category names are not provided. \n'
return result
instance_count = np.zeros(len(self.CLASSES) + 1).astype(int)
# count the instance number in each image
for idx in range(len(self)):
label = self.get_ann_info(idx)['labels']
unique, counts = np.unique(label, return_counts=True)
if len(unique) > 0:
# add the occurrence number to each class
instance_count[unique] += counts
else:
# background is the last index
instance_count[-1] += 1
# create a table with category count
table_data = [['category', 'count'] * 5]
row_data = []
for cls, count in enumerate(instance_count):
if cls < len(self.CLASSES):
row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}']
else:
# add the background number
row_data += ['-1 background', f'{count}']
if len(row_data) == 10:
table_data.append(row_data)
row_data = []
if len(row_data) >= 2:
if row_data[-1] == '0':
row_data = row_data[:-2]
if len(row_data) >= 2:
table_data.append([])
table_data.append(row_data)
table = AsciiTable(table_data)
result += table.table
return result
| 14,679 | 36.641026 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/datasets/dataset_wrappers.py | # Copyright (c) OpenMMLab. All rights reserved.
import bisect
import collections
import copy
import math
from collections import defaultdict
import numpy as np
from mmcv.utils import build_from_cfg, print_log
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .builder import DATASETS, PIPELINES
from .coco import CocoDataset
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
separate_eval (bool): Whether to evaluate the results
separately if it is used as validation dataset.
Defaults to True.
"""
def __init__(self, datasets, separate_eval=True):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.separate_eval = separate_eval
if not separate_eval:
if any([isinstance(ds, CocoDataset) for ds in datasets]):
raise NotImplementedError(
'Evaluating concatenated CocoDataset as a whole is not'
' supported! Please set "separate_eval=True"')
elif len(set([type(ds) for ds in datasets])) != 1:
raise NotImplementedError(
'All the datasets should have same types')
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
def get_cat_ids(self, idx):
"""Get category ids of concatenated dataset by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
if idx < 0:
if -idx > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
def get_ann_info(self, idx):
"""Get annotation of concatenated dataset by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
if idx < 0:
if -idx > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx].get_ann_info(sample_idx)
def evaluate(self, results, logger=None, **kwargs):
"""Evaluate the results.
Args:
results (list[list | tuple]): Testing results of the dataset.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str: float]: AP results of the total dataset or each separate
dataset if `self.separate_eval=True`.
"""
assert len(results) == self.cumulative_sizes[-1], \
('Dataset and results have different sizes: '
f'{self.cumulative_sizes[-1]} v.s. {len(results)}')
# Check whether all the datasets support evaluation
for dataset in self.datasets:
assert hasattr(dataset, 'evaluate'), \
f'{type(dataset)} does not implement evaluate function'
if self.separate_eval:
dataset_idx = -1
total_eval_results = dict()
for size, dataset in zip(self.cumulative_sizes, self.datasets):
start_idx = 0 if dataset_idx == -1 else \
self.cumulative_sizes[dataset_idx]
end_idx = self.cumulative_sizes[dataset_idx + 1]
results_per_dataset = results[start_idx:end_idx]
print_log(
f'\nEvaluateing {dataset.ann_file} with '
f'{len(results_per_dataset)} images now',
logger=logger)
eval_results_per_dataset = dataset.evaluate(
results_per_dataset, logger=logger, **kwargs)
dataset_idx += 1
for k, v in eval_results_per_dataset.items():
total_eval_results.update({f'{dataset_idx}_{k}': v})
return total_eval_results
elif any([isinstance(ds, CocoDataset) for ds in self.datasets]):
raise NotImplementedError(
'Evaluating concatenated CocoDataset as a whole is not'
' supported! Please set "separate_eval=True"')
elif len(set([type(ds) for ds in self.datasets])) != 1:
raise NotImplementedError(
'All the datasets should have same types')
else:
original_data_infos = self.datasets[0].data_infos
self.datasets[0].data_infos = sum(
[dataset.data_infos for dataset in self.datasets], [])
eval_results = self.datasets[0].evaluate(
results, logger=logger, **kwargs)
self.datasets[0].data_infos = original_data_infos
return eval_results
@DATASETS.register_module()
class RepeatDataset:
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def get_cat_ids(self, idx):
"""Get category ids of repeat dataset by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
return self.dataset.get_cat_ids(idx % self._ori_len)
def get_ann_info(self, idx):
"""Get annotation of repeat dataset by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.dataset.get_ann_info(idx % self._ori_len)
def __len__(self):
"""Length after repetition."""
return self.times * self._ori_len
# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa
@DATASETS.register_module()
class ClassBalancedDataset:
"""A wrapper of repeated dataset with repeat factor.
Suitable for training on class imbalanced datasets like LVIS. Following
the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,
in each epoch, an image may appear multiple times based on its
"repeat factor".
The repeat factor for an image is a function of the frequency the rarest
category labeled in that image. The "frequency of category c" in [0, 1]
is defined by the fraction of images in the training set (without repeats)
in which category c appears.
The dataset needs to instantiate :func:`self.get_cat_ids` to support
ClassBalancedDataset.
The repeat factor is computed as followed.
1. For each category c, compute the fraction # of images
that contain it: :math:`f(c)`
2. For each category c, compute the category-level repeat factor:
:math:`r(c) = max(1, sqrt(t/f(c)))`
3. For each image I, compute the image-level repeat factor:
:math:`r(I) = max_{c in I} r(c)`
Args:
dataset (:obj:`CustomDataset`): The dataset to be repeated.
oversample_thr (float): frequency threshold below which data is
repeated. For categories with ``f_c >= oversample_thr``, there is
no oversampling. For categories with ``f_c < oversample_thr``, the
degree of oversampling following the square-root inverse frequency
heuristic above.
filter_empty_gt (bool, optional): If set true, images without bounding
boxes will not be oversampled. Otherwise, they will be categorized
as the pure background class and involved into the oversampling.
Default: True.
"""
def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
self.dataset = dataset
self.oversample_thr = oversample_thr
self.filter_empty_gt = filter_empty_gt
self.CLASSES = dataset.CLASSES
repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
repeat_indices = []
for dataset_idx, repeat_factor in enumerate(repeat_factors):
repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor))
self.repeat_indices = repeat_indices
flags = []
if hasattr(self.dataset, 'flag'):
for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
flags.extend([flag] * int(math.ceil(repeat_factor)))
assert len(flags) == len(repeat_indices)
self.flag = np.asarray(flags, dtype=np.uint8)
def _get_repeat_factors(self, dataset, repeat_thr):
"""Get repeat factor for each images in the dataset.
Args:
dataset (:obj:`CustomDataset`): The dataset
repeat_thr (float): The threshold of frequency. If an image
contains the categories whose frequency below the threshold,
it would be repeated.
Returns:
list[float]: The repeat factors for each images in the dataset.
"""
# 1. For each category c, compute the fraction # of images
# that contain it: f(c)
category_freq = defaultdict(int)
num_images = len(dataset)
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if len(cat_ids) == 0 and not self.filter_empty_gt:
cat_ids = set([len(self.CLASSES)])
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t/f(c)))
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I, compute the image-level repeat factor:
# r(I) = max_{c in I} r(c)
repeat_factors = []
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if len(cat_ids) == 0 and not self.filter_empty_gt:
cat_ids = set([len(self.CLASSES)])
repeat_factor = 1
if len(cat_ids) > 0:
repeat_factor = max(
{category_repeat[cat_id]
for cat_id in cat_ids})
repeat_factors.append(repeat_factor)
return repeat_factors
def __getitem__(self, idx):
ori_index = self.repeat_indices[idx]
return self.dataset[ori_index]
def get_ann_info(self, idx):
"""Get annotation of dataset by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
ori_index = self.repeat_indices[idx]
return self.dataset.get_ann_info(ori_index)
def __len__(self):
"""Length after repetition."""
return len(self.repeat_indices)
@DATASETS.register_module()
class MultiImageMixDataset:
"""A wrapper of multiple images mixed dataset.
Suitable for training on multiple images mixed data augmentation like
mosaic and mixup. For the augmentation pipeline of mixed image data,
the `get_indexes` method needs to be provided to obtain the image
indexes, and you can set `skip_flags` to change the pipeline running
process. At the same time, we provide the `dynamic_scale` parameter
to dynamically change the output image size.
Args:
dataset (:obj:`CustomDataset`): The dataset to be mixed.
pipeline (Sequence[dict]): Sequence of transform object or
config dict to be composed.
dynamic_scale (tuple[int], optional): The image scale can be changed
dynamically. Default to None. It is deprecated.
skip_type_keys (list[str], optional): Sequence of type string to
be skip pipeline. Default to None.
"""
def __init__(self,
dataset,
pipeline,
dynamic_scale=None,
skip_type_keys=None):
if dynamic_scale is not None:
raise RuntimeError(
'dynamic_scale is deprecated. Please use Resize pipeline '
'to achieve similar functions')
assert isinstance(pipeline, collections.abc.Sequence)
if skip_type_keys is not None:
assert all([
isinstance(skip_type_key, str)
for skip_type_key in skip_type_keys
])
self._skip_type_keys = skip_type_keys
self.pipeline = []
self.pipeline_types = []
for transform in pipeline:
if isinstance(transform, dict):
self.pipeline_types.append(transform['type'])
transform = build_from_cfg(transform, PIPELINES)
self.pipeline.append(transform)
else:
raise TypeError('pipeline must be a dict')
self.dataset = dataset
self.CLASSES = dataset.CLASSES
if hasattr(self.dataset, 'flag'):
self.flag = dataset.flag
self.num_samples = len(dataset)
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
results = copy.deepcopy(self.dataset[idx])
for (transform, transform_type) in zip(self.pipeline,
self.pipeline_types):
if self._skip_type_keys is not None and \
transform_type in self._skip_type_keys:
continue
if hasattr(transform, 'get_indexes'):
indexes = transform.get_indexes(self.dataset)
if not isinstance(indexes, collections.abc.Sequence):
indexes = [indexes]
mix_results = [
copy.deepcopy(self.dataset[index]) for index in indexes
]
results['mix_results'] = mix_results
results = transform(results)
if 'mix_results' in results:
results.pop('mix_results')
return results
def update_skip_type_keys(self, skip_type_keys):
"""Update skip_type_keys. It is called by an external hook.
Args:
skip_type_keys (list[str], optional): Sequence of type
string to be skip pipeline.
"""
assert all([
isinstance(skip_type_key, str) for skip_type_key in skip_type_keys
])
self._skip_type_keys = skip_type_keys
| 16,052 | 36.683099 | 167 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/datasets/builder.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
import warnings
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import TORCH_VERSION, Registry, build_from_cfg, digit_version
from torch.utils.data import DataLoader
from .samplers import (DistributedGroupSampler, DistributedSampler,
GroupSampler, InfiniteBatchSampler,
InfiniteGroupBatchSampler)
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', True)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
# pop 'separate_eval' since it is not a valid key for common datasets.
if 'separate_eval' in data_cfg:
data_cfg.pop('separate_eval')
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import (ConcatDataset, RepeatDataset,
ClassBalancedDataset, MultiImageMixDataset)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'ConcatDataset':
dataset = ConcatDataset(
[build_dataset(c, default_args) for c in cfg['datasets']],
cfg.get('separate_eval', True))
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif cfg['type'] == 'MultiImageMixDataset':
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
runner_type='EpochBasedRunner',
persistent_workers=False,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int, Optional): Seed to be used. Default: None.
runner_type (str): Type of runner. Default: `EpochBasedRunner`
persistent_workers (bool): If True, the data loader will not shutdown
the worker processes after a dataset has been consumed once.
This allows to maintain the workers `Dataset` instances alive.
This argument is only valid when PyTorch>=1.7.0. Default: False.
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
# When model is :obj:`DistributedDataParallel`,
# `batch_size` of :obj:`dataloader` is the
# number of training samples on each GPU.
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
# When model is obj:`DataParallel`
# the batch size is samples on all the GPUS
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
if runner_type == 'IterBasedRunner':
# this is a batch sampler, which can yield
# a mini-batch indices each time.
# it can be used in both `DataParallel` and
# `DistributedDataParallel`
if shuffle:
batch_sampler = InfiniteGroupBatchSampler(
dataset, batch_size, world_size, rank, seed=seed)
else:
batch_sampler = InfiniteBatchSampler(
dataset,
batch_size,
world_size,
rank,
seed=seed,
shuffle=False)
batch_size = 1
sampler = None
else:
if dist:
# DistributedGroupSampler will definitely shuffle the data to
# satisfy that images on each GPU are in the same group
if shuffle:
sampler = DistributedGroupSampler(
dataset, samples_per_gpu, world_size, rank, seed=seed)
else:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=False, seed=seed)
else:
sampler = GroupSampler(dataset,
samples_per_gpu) if shuffle else None
batch_sampler = None
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
if (TORCH_VERSION != 'parrots'
and digit_version(TORCH_VERSION) >= digit_version('1.7.0')):
kwargs['persistent_workers'] = persistent_workers
elif persistent_workers is True:
warnings.warn('persistent_workers is invalid because your pytorch '
'version is lower than 1.7.0')
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=False,
worker_init_fn=init_fn,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 7,707 | 37.54 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/datasets/samplers/group_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import numpy as np
import torch
from mmcv.runner import get_dist_info
from torch.utils.data import Sampler
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, size in enumerate(self.group_sizes):
self.num_samples += int(np.ceil(
size / self.samples_per_gpu)) * self.samples_per_gpu
def __iter__(self):
indices = []
for i, size in enumerate(self.group_sizes):
if size == 0:
continue
indice = np.where(self.flag == i)[0]
assert len(indice) == size
np.random.shuffle(indice)
num_extra = int(np.ceil(size / self.samples_per_gpu)
) * self.samples_per_gpu - len(indice)
indice = np.concatenate(
[indice, np.random.choice(indice, num_extra)])
indices.append(indice)
indices = np.concatenate(indices)
indices = [
indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
for i in np.random.permutation(
range(len(indices) // self.samples_per_gpu))
]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
class DistributedGroupSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
seed (int, optional): random seed used to shuffle the sampler if
``shuffle=True``. This number should be identical across all
processes in the distributed group. Default: 0.
"""
def __init__(self,
dataset,
samples_per_gpu=1,
num_replicas=None,
rank=None,
seed=0):
_rank, _num_replicas = get_dist_info()
if num_replicas is None:
num_replicas = _num_replicas
if rank is None:
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.seed = seed if seed is not None else 0
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, j in enumerate(self.group_sizes):
self.num_samples += int(
math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
self.num_replicas)) * self.samples_per_gpu
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = []
for i, size in enumerate(self.group_sizes):
if size > 0:
indice = np.where(self.flag == i)[0]
assert len(indice) == size
# add .numpy() to avoid bug when selecting indice in parrots.
# TODO: check whether torch.randperm() can be replaced by
# numpy.random.permutation().
indice = indice[list(
torch.randperm(int(size), generator=g).numpy())].tolist()
extra = int(
math.ceil(
size * 1.0 / self.samples_per_gpu / self.num_replicas)
) * self.samples_per_gpu * self.num_replicas - len(indice)
# pad indice
tmp = indice.copy()
for _ in range(extra // size):
indice.extend(tmp)
indice.extend(tmp[:extra % size])
indices.extend(indice)
assert len(indices) == self.total_size
indices = [
indices[j] for i in list(
torch.randperm(
len(indices) // self.samples_per_gpu, generator=g))
for j in range(i * self.samples_per_gpu, (i + 1) *
self.samples_per_gpu)
]
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 5,384 | 35.14094 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/datasets/samplers/infinite_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import itertools
import numpy as np
import torch
from mmcv.runner import get_dist_info
from torch.utils.data.sampler import Sampler
class InfiniteGroupBatchSampler(Sampler):
"""Similar to `BatchSampler` warping a `GroupSampler. It is designed for
iteration-based runners like `IterBasedRunner` and yields a mini-batch
indices each time, all indices in a batch should be in the same group.
The implementation logic is referred to
https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py
Args:
dataset (object): The dataset.
batch_size (int): When model is :obj:`DistributedDataParallel`,
it is the number of training samples on each GPU.
When model is :obj:`DataParallel`, it is
`num_gpus * samples_per_gpu`.
Default : 1.
world_size (int, optional): Number of processes participating in
distributed training. Default: None.
rank (int, optional): Rank of current process. Default: None.
seed (int): Random seed. Default: 0.
shuffle (bool): Whether shuffle the indices of a dummy `epoch`, it
should be noted that `shuffle` can not guarantee that you can
generate sequential indices because it need to ensure
that all indices in a batch is in a group. Default: True.
""" # noqa: W605
def __init__(self,
dataset,
batch_size=1,
world_size=None,
rank=None,
seed=0,
shuffle=True):
_rank, _world_size = get_dist_info()
if world_size is None:
world_size = _world_size
if rank is None:
rank = _rank
self.rank = rank
self.world_size = world_size
self.dataset = dataset
self.batch_size = batch_size
self.seed = seed if seed is not None else 0
self.shuffle = shuffle
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
# buffer used to save indices of each group
self.buffer_per_group = {k: [] for k in range(len(self.group_sizes))}
self.size = len(dataset)
self.indices = self._indices_of_rank()
def _infinite_indices(self):
"""Infinitely yield a sequence of indices."""
g = torch.Generator()
g.manual_seed(self.seed)
while True:
if self.shuffle:
yield from torch.randperm(self.size, generator=g).tolist()
else:
yield from torch.arange(self.size).tolist()
def _indices_of_rank(self):
"""Slice the infinite indices by rank."""
yield from itertools.islice(self._infinite_indices(), self.rank, None,
self.world_size)
def __iter__(self):
# once batch size is reached, yield the indices
for idx in self.indices:
flag = self.flag[idx]
group_buffer = self.buffer_per_group[flag]
group_buffer.append(idx)
if len(group_buffer) == self.batch_size:
yield group_buffer[:]
del group_buffer[:]
def __len__(self):
"""Length of base dataset."""
return self.size
def set_epoch(self, epoch):
"""Not supported in `IterationBased` runner."""
raise NotImplementedError
class InfiniteBatchSampler(Sampler):
"""Similar to `BatchSampler` warping a `DistributedSampler. It is designed
iteration-based runners like `IterBasedRunner` and yields a mini-batch
indices each time.
The implementation logic is referred to
https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py
Args:
dataset (object): The dataset.
batch_size (int): When model is :obj:`DistributedDataParallel`,
it is the number of training samples on each GPU,
When model is :obj:`DataParallel`, it is
`num_gpus * samples_per_gpu`.
Default : 1.
world_size (int, optional): Number of processes participating in
distributed training. Default: None.
rank (int, optional): Rank of current process. Default: None.
seed (int): Random seed. Default: 0.
shuffle (bool): Whether shuffle the dataset or not. Default: True.
""" # noqa: W605
def __init__(self,
dataset,
batch_size=1,
world_size=None,
rank=None,
seed=0,
shuffle=True):
_rank, _world_size = get_dist_info()
if world_size is None:
world_size = _world_size
if rank is None:
rank = _rank
self.rank = rank
self.world_size = world_size
self.dataset = dataset
self.batch_size = batch_size
self.seed = seed if seed is not None else 0
self.shuffle = shuffle
self.size = len(dataset)
self.indices = self._indices_of_rank()
def _infinite_indices(self):
"""Infinitely yield a sequence of indices."""
g = torch.Generator()
g.manual_seed(self.seed)
while True:
if self.shuffle:
yield from torch.randperm(self.size, generator=g).tolist()
else:
yield from torch.arange(self.size).tolist()
def _indices_of_rank(self):
"""Slice the infinite indices by rank."""
yield from itertools.islice(self._infinite_indices(), self.rank, None,
self.world_size)
def __iter__(self):
# once batch size is reached, yield the indices
batch_buffer = []
for idx in self.indices:
batch_buffer.append(idx)
if len(batch_buffer) == self.batch_size:
yield batch_buffer
batch_buffer = []
def __len__(self):
"""Length of base dataset."""
return self.size
def set_epoch(self, epoch):
"""Not supported in `IterationBased` runner."""
raise NotImplementedError
| 6,267 | 35.231214 | 110 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/datasets/samplers/distributed_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| 1,358 | 32.146341 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/datasets/pipelines/formating.py | # Copyright (c) OpenMMLab. All rights reserved.
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some results to :obj:`torch.Tensor` by given keys.
Args:
keys (Sequence[str]): Keys that need to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert data in results to :obj:`torch.Tensor`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted
to :obj:`torch.Tensor`.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image to :obj:`torch.Tensor` by given keys.
The dimension order of input image is (H, W, C). The pipeline will convert
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
(1, H, W).
Args:
keys (Sequence[str]): Key of images to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = (to_tensor(img.transpose(2, 0, 1))).contiguous()
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose:
"""Transpose some results by given keys.
Args:
keys (Sequence[str]): Keys of results to be transposed.
order (Sequence[int]): Order of transpose.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Call function to transpose the channel order of data in results.
Args:
results (dict): Result dict contains the data to transpose.
Returns:
dict: The result dict contains the data transposed to \
``self.order``.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToDataContainer:
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
Args:
fields (Sequence[dict]): Each field is a dict like
``dict(key='xxx', **kwargs)``. The ``key`` in result will
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))``.
"""
def __init__(self,
fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))):
self.fields = fields
def __call__(self, results):
"""Call function to convert data in results to
:obj:`mmcv.DataContainer`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted to \
:obj:`mmcv.DataContainer`.
"""
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class DefaultFormatBundle:
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img",
"proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".
These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- proposals: (1)to tensor, (2)to DataContainer
- gt_bboxes: (1)to tensor, (2)to DataContainer
- gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
- gt_labels: (1)to tensor, (2)to DataContainer
- gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \
(3)to DataContainer (stack=True)
Args:
img_to_float (bool): Whether to force the image to be converted to
float type. Default: True.
pad_val (dict): A dict for padding value in batch collating,
the default value is `dict(img=0, masks=0, seg=255)`.
Without this argument, the padding value of "gt_semantic_seg"
will be set to 0 by default, which should be 255.
"""
def __init__(self,
img_to_float=True,
pad_val=dict(img=0, masks=0, seg=255)):
self.img_to_float = img_to_float
self.pad_val = pad_val
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with \
default bundle.
"""
if 'img' in results:
img = results['img']
if self.img_to_float is True and img.dtype == np.uint8:
# Normally, image is of uint8 type without normalization.
# At this time, it needs to be forced to be converted to
# flot32, otherwise the model training and inference
# will be wrong. Only used for YOLOX currently .
img = img.astype(np.float32)
# add default meta keys
results = self._add_default_meta_keys(results)
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(
to_tensor(img), padding_value=self.pad_val['img'], stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if key not in results:
continue
results[key] = DC(to_tensor(results[key]))
if 'gt_masks' in results:
results['gt_masks'] = DC(
results['gt_masks'],
padding_value=self.pad_val['masks'],
cpu_only=True)
if 'gt_semantic_seg' in results:
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None, ...]),
padding_value=self.pad_val['seg'],
stack=True)
return results
def _add_default_meta_keys(self, results):
"""Add default meta keys.
We set default meta keys including `pad_shape`, `scale_factor` and
`img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and
`Pad` are implemented during the whole pipeline.
Args:
results (dict): Result dict contains the data to convert.
Returns:
results (dict): Updated result dict contains the data to convert.
"""
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results.setdefault(
'img_norm_cfg',
dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False))
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(img_to_float={self.img_to_float})'
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "proposals", "gt_bboxes",
"gt_bboxes_ignore", "gt_labels", and/or "gt_masks".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple \
(h, w, c). Note that images may be zero padded on the \
bottom/right if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
'img_norm_cfg')``
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function to collect keys in results. The keys in ``meta_keys``
will be converted to :obj:mmcv.DataContainer.
Args:
results (dict): Result dict contains the data to collect.
Returns:
dict: The result dict contains the following keys
- keys in``self.keys``
- ``img_metas``
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
@PIPELINES.register_module()
class WrapFieldsToLists:
"""Wrap fields of the data dictionary into lists for evaluation.
This class can be used as a last step of a test or validation
pipeline for single image evaluation or inference.
Example:
>>> test_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
>>> dict(type='Pad', size_divisor=32),
>>> dict(type='ImageToTensor', keys=['img']),
>>> dict(type='Collect', keys=['img']),
>>> dict(type='WrapFieldsToLists')
>>> ]
"""
def __call__(self, results):
"""Call function to wrap fields into lists.
Args:
results (dict): Result dict contains the data to wrap.
Returns:
dict: The result dict where value of ``self.keys`` are wrapped \
into list.
"""
# Wrap dict fields into lists
for key, val in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()'
| 13,291 | 32.821883 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/utils/contextmanagers.py | # Copyright (c) OpenMMLab. All rights reserved.
import asyncio
import contextlib
import logging
import os
import time
from typing import List
import torch
logger = logging.getLogger(__name__)
DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
@contextlib.asynccontextmanager
async def completed(trace_name='',
name='',
sleep_interval=0.05,
streams: List[torch.cuda.Stream] = None):
"""Async context manager that waits for work to complete on given CUDA
streams."""
if not torch.cuda.is_available():
yield
return
stream_before_context_switch = torch.cuda.current_stream()
if not streams:
streams = [stream_before_context_switch]
else:
streams = [s if s else stream_before_context_switch for s in streams]
end_events = [
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
]
if DEBUG_COMPLETED_TIME:
start = torch.cuda.Event(enable_timing=True)
stream_before_context_switch.record_event(start)
cpu_start = time.monotonic()
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
grad_enabled_before = torch.is_grad_enabled()
try:
yield
finally:
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_end = time.monotonic()
for i, stream in enumerate(streams):
event = end_events[i]
stream.record_event(event)
grad_enabled_after = torch.is_grad_enabled()
# observed change of torch.is_grad_enabled() during concurrent run of
# async_test_bboxes code
assert (grad_enabled_before == grad_enabled_after
), 'Unexpected is_grad_enabled() value change'
are_done = [e.query() for e in end_events]
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
are_done, streams)
with torch.cuda.stream(stream_before_context_switch):
while not all(are_done):
await asyncio.sleep(sleep_interval)
are_done = [e.query() for e in end_events]
logger.debug(
'%s %s completed: %s streams: %s',
trace_name,
name,
are_done,
streams,
)
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_time = (cpu_end - cpu_start) * 1000
stream_times_ms = ''
for i, stream in enumerate(streams):
elapsed_time = start.elapsed_time(end_events[i])
stream_times_ms += f' {stream} {elapsed_time:.2f} ms'
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
stream_times_ms)
@contextlib.asynccontextmanager
async def concurrent(streamqueue: asyncio.Queue,
trace_name='concurrent',
name='stream'):
"""Run code concurrently in different streams.
:param streamqueue: asyncio.Queue instance.
Queue tasks define the pool of streams used for concurrent execution.
"""
if not torch.cuda.is_available():
yield
return
initial_stream = torch.cuda.current_stream()
with torch.cuda.stream(initial_stream):
stream = await streamqueue.get()
assert isinstance(stream, torch.cuda.Stream)
try:
with torch.cuda.stream(stream):
logger.debug('%s %s is starting, stream: %s', trace_name, name,
stream)
yield
current = torch.cuda.current_stream()
assert current == stream
logger.debug('%s %s has finished, stream: %s', trace_name,
name, stream)
finally:
streamqueue.task_done()
streamqueue.put_nowait(stream)
| 4,125 | 32.544715 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/utils/profiling.py | # Copyright (c) OpenMMLab. All rights reserved.
import contextlib
import sys
import time
import torch
if sys.version_info >= (3, 7):
@contextlib.contextmanager
def profile_time(trace_name,
name,
enabled=True,
stream=None,
end_stream=None):
"""Print time spent by CPU and GPU.
Useful as a temporary context manager to find sweet spots of code
suitable for async implementation.
"""
if (not enabled) or not torch.cuda.is_available():
yield
return
stream = stream if stream else torch.cuda.current_stream()
end_stream = end_stream if end_stream else stream
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
stream.record_event(start)
try:
cpu_start = time.monotonic()
yield
finally:
cpu_end = time.monotonic()
end_stream.record_event(end)
end.synchronize()
cpu_time = (cpu_end - cpu_start) * 1000
gpu_time = start.elapsed_time(end)
msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms '
msg += f'gpu_time {gpu_time:.2f} ms stream {stream}'
print(msg, end_stream)
| 1,336 | 31.609756 | 73 | py |
BS-Net | BS-Net-main/loaddata.py | import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
import random
from nyu_transform import *
import pdb
from scipy import io
class depthDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, csv_file, transform=None):
self.frame = pd.read_csv(csv_file, header=None)
self.transform = transform
def __getitem__(self, idx):
image_name = self.frame.iloc[idx, 0]
depth_name = self.frame.iloc[idx, 1]
image = Image.open(image_name)
depth = Image.open(depth_name)
sample = {'image': image, 'depth': depth}
if self.transform:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.frame)
class depthDataset_iBims1(Dataset):
"""Face Landmarks dataset."""
def __init__(self, imagelist, transform=None):
with open(imagelist) as f:
image_names = f.readlines()
self.image_names = [x.strip() for x in image_names]
#self.frame = pd.read_csv(csv_file, header=None)
self.transform = transform
def __getitem__(self, idx):
image_name = self.image_names[idx]
image_data = io.loadmat('./data/iBims1/ibims1_core_mat/'+image_name)
data = image_data['data']
image = data['rgb'][0][0] # RGB image
depth = data['depth'][0][0] # Raw depth map
edges = data['edges'][0][0] # Ground truth edges
calib = data['calib'][0][0] # Calibration parameters
mask_invalid = data['mask_invalid'][0][0] # Mask for invalid pixels
mask_transp = data['mask_transp'][0][0] # Mask for transparent pixels
mask_wall = data['mask_wall'][0][0] # RGB image
mask_wall_paras = data['mask_wall_paras'][0][0] # Raw depth map
mask_table = data['mask_table'][0][0] # Ground truth edges
mask_table_paras = data['mask_table_paras'][0][0] # Calibration parameters
mask_floor = data['mask_floor'][0][0] # Mask for invalid pixels
mask_floor_paras = data['mask_floor_paras'][0][0]
#print(image_name,mask_wall_paras)
image = Image.fromarray(image)
depth = Image.fromarray(depth)
edges = Image.fromarray(edges)
calib = Image.fromarray(calib)
mask_invalid = Image.fromarray(mask_invalid)
mask_transp = Image.fromarray(mask_transp)
mask_wall=Image.fromarray(mask_wall)
mask_table=Image.fromarray(mask_table)
mask_floor=Image.fromarray(mask_floor)
sample = {'image': image, 'depth': depth,'edges': edges,'calib': calib,
'mask_invalid': mask_invalid,'mask_transp':mask_transp,"mask_wall":mask_wall,
"mask_wall_paras":mask_wall_paras,"mask_table":mask_table,"mask_table_paras":mask_table_paras,
"mask_floor":mask_floor,"mask_floor_paras":mask_floor_paras}
if self.transform:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.image_names)
def getTrainingData(batch_size=64):
__imagenet_pca = {
'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),
'eigvec': torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
transformed_training = depthDataset(csv_file='./data/nyu2_train.csv',
transform=transforms.Compose([
Scale(240),
RandomHorizontalFlip(),
RandomRotate(5),
CenterCrop([304, 228], [152, 114]),
ToTensor(),
Lighting(0.1, __imagenet_pca[
'eigval'], __imagenet_pca['eigvec']),
ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
Normalize(__imagenet_stats['mean'],
__imagenet_stats['std'])
]))
dataloader_training = DataLoader(transformed_training, batch_size,
shuffle=True, num_workers=16, pin_memory=True)
return dataloader_training
def getTestingData(batch_size=64):
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
# scale = random.uniform(1, 1.5)
transformed_testing = depthDataset(csv_file='./data/nyu2_test.csv',
transform=transforms.Compose([
Scale(240),
CenterCrop([304, 228], [304, 228]),
#CenterCrop([304, 228], [152, 114]),
ToTensor(is_test=True),
Normalize(__imagenet_stats['mean'],
__imagenet_stats['std'])
]))
dataloader_testing = DataLoader(transformed_testing, batch_size,
shuffle=False, num_workers=0, pin_memory=False)
return dataloader_testing
def getTestingData_iBims1(batch_size=64):
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
# scale = random.uniform(1, 1.5)
transformed_testing = depthDataset_iBims1(imagelist='./data/iBims1/imagelist.txt',
transform=transforms.Compose([
Scale_iBims1(240),
CenterCrop_iBims1([304, 228], [304, 228]),
#CenterCrop_iBims1([304, 228], [152, 114]),
ToTensor_iBims1(is_test=True),
Normalize_iBims1(__imagenet_stats['mean'],
__imagenet_stats['std'])
]))
dataloader_testing = DataLoader(transformed_testing, batch_size,shuffle=False, num_workers=0, pin_memory=False)
return dataloader_testing
| 6,915 | 42.772152 | 115 | py |
BS-Net | BS-Net-main/sobel.py | import torch
import torch.nn as nn
import numpy as np
print(19//5)
class Sobel(nn.Module):
def __init__(self):
super(Sobel, self).__init__()
self.edge_conv=nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1, bias=False)
# edge_kx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
edge_kx=np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
edge_ky=np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
edge_k=np.stack((edge_kx, edge_ky))
edge_k=torch.from_numpy(edge_k).float().view(2, 1, 3, 3)
self.edge_conv.weight=nn.Parameter(edge_k)
for param in self.parameters():
param.requires_grad=False
def forward(self, x):
out=self.edge_conv(x)
out=out.contiguous().view(-1, 2, x.size(2), x.size(3))
return out
| 815 | 29.222222 | 86 | py |
BS-Net | BS-Net-main/test_iBims1.py | import warnings
warnings.filterwarnings("ignore")
import torch
import numpy as np
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import loaddata
import sobel
import os
import argparse
from models import modules as modules, net as net, dilation_resnet as resnet
from util import compute_global_errors,\
compute_directed_depth_error,\
compute_depth_boundary_error,\
compute_planarity_error,\
compute_distance_related_errors
parser = argparse.ArgumentParser(description='BS-Net iBims-1 testing')
parser.add_argument('--path', '--p', default="BSN_NYUD.pth.tar", type=str,help='results_root (default:BSN_NYUD.pth.tar)')
os.environ['CUDA_VISIBLE_DEVICES']='1'
with open('./data/iBims1/imagelist.txt') as f:
image_names = f.readlines()
image_names = [x.strip() for x in image_names]
num_samples = len(image_names) # number of images
# Initialize global and geometric errors ...
rms = np.zeros(num_samples, np.float32)
log10 = np.zeros(num_samples, np.float32)
abs_rel = np.zeros(num_samples, np.float32)
sq_rel = np.zeros(num_samples, np.float32)
thr1 = np.zeros(num_samples, np.float32)
thr2 = np.zeros(num_samples, np.float32)
thr3 = np.zeros(num_samples, np.float32)
abs_rel_vec = np.zeros((num_samples,20),np.float32)
log10_vec = np.zeros((num_samples,20),np.float32)
rms_vec = np.zeros((num_samples,20),np.float32)
dde_0 = np.zeros(num_samples, np.float32)
dde_m = np.zeros(num_samples, np.float32)
dde_p = np.zeros(num_samples, np.float32)
dbe_acc = np.zeros(num_samples, np.float32)
dbe_com = np.zeros(num_samples, np.float32)
pe_fla = np.empty(0)
pe_ori = np.empty(0)
def define_model(pre_train=True):
original_model = resnet.resnet50(pretrained=pre_train)
Encoder = modules.E_resnet(original_model)
model = net.model(Encoder, num_features=2048, block_channel=[256, 512, 1024, 2048])
return model
def main():
model = define_model(pre_train=False)
cudnn.benchmark = True
global args
args=parser.parse_args()
val_loader = loaddata.getTestingData_iBims1(1)
checkpoint = torch.load(args.path)
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict)
model.cuda()
model.eval() # switch to evaluate mode
print("=> loaded model (epoch {})".format(checkpoint["epoch"]))
validate(val_loader, model)
validate_PRF(val_loader,model)
validate_VP(val_loader,model)
def validate(val_loader, model):
for i, sample_batched in enumerate(val_loader):
#print('正在处理:{0}'.format(i))
input, target, edges, calib, mask_invalid, mask_transp, mask_wall, \
paras_wall, mask_table, paras_table, mask_floor, paras_floor=sample_batched['image'], sample_batched['depth'], \
sample_batched['edges'], sample_batched['calib'], \
sample_batched['mask_invalid'], sample_batched['mask_transp'], \
sample_batched['mask_wall'], sample_batched['mask_wall_paras'], \
sample_batched['mask_table'], sample_batched['mask_table_paras'], \
sample_batched['mask_floor'], sample_batched['mask_floor_paras']
with torch.no_grad():
input = torch.autograd.Variable(input)
input = input.cuda()
pred = model(input)
pred=torch.nn.functional.interpolate(pred, size=[target.size(2), target.size(3)], mode='bilinear',align_corners=True)
pred=pred.data[0].cpu().numpy().squeeze()
depth=target.cpu().numpy().squeeze()
edges=edges.numpy().squeeze()
calib=calib.numpy().squeeze()
mask_transp=mask_transp.numpy().squeeze()
mask_invalid=mask_invalid.numpy().squeeze()
mask_wall=mask_wall.numpy().squeeze()
paras_wall=paras_wall.numpy().squeeze()
mask_table=mask_table.numpy().squeeze()
paras_table=paras_table.numpy().squeeze()
mask_floor=mask_floor.numpy().squeeze()
paras_floor=paras_floor.numpy().squeeze()
pred[np.isnan(pred)] = 0
pred_invalid = pred.copy()
pred_invalid[pred_invalid != 0] = 1
mask_missing = depth.copy() # Mask for further missing depth values in depth map
mask_missing[mask_missing != 0] = 1
mask_valid = mask_transp * mask_invalid * mask_missing * pred_invalid # Combine masks
# Apply 'valid_mask' to raw depth map
depth_valid = depth * mask_valid
gt = depth_valid
gt_vec = gt.flatten()
# Apply 'valid_mask' to raw depth map
pred = pred * mask_valid
pred_vec = pred.flatten()
# Compute errors ...
abs_rel[i], sq_rel[i], rms[i], log10[i], thr1[i], thr2[i], thr3[i] = compute_global_errors(gt_vec, pred_vec)
abs_rel_vec[i, :], log10_vec[i, :], rms_vec[i, :] = compute_distance_related_errors(gt, pred)
dde_0[i], dde_m[i], dde_p[i] = compute_directed_depth_error(gt_vec, pred_vec, 3.0)
dbe_acc[i], dbe_com[i], est_edges = compute_depth_boundary_error(edges, pred)
mask_wall = mask_wall * mask_valid
global pe_fla,pe_ori
if paras_wall.size > 0:
pe_fla_wall, pe_ori_wall = compute_planarity_error(gt, pred, paras_wall, mask_wall, calib)
pe_fla = np.append(pe_fla, pe_fla_wall)
pe_ori = np.append(pe_ori, pe_ori_wall)
mask_table =mask_table * mask_valid
if paras_table.size > 0:
pe_fla_table, pe_ori_table = compute_planarity_error(gt, pred, paras_table, mask_table, calib)
pe_fla = np.append(pe_fla, pe_fla_table)
pe_ori = np.append(pe_ori, pe_ori_table)
mask_floor = mask_floor * mask_valid
if paras_floor.size > 0:
pe_fla_floor, pe_ori_floor = compute_planarity_error(gt, pred, paras_floor, mask_floor, calib)
pe_fla = np.append(pe_fla, pe_fla_floor)
pe_ori = np.append(pe_ori, pe_ori_floor)
print('Results:')
print ('############ Global Error Metrics #################')
print ('rel = ', np.nanmean(abs_rel))
print('sq_rel = ', np.nanmean(sq_rel))
print ('log10 = ', np.nanmean(log10))
print ('rms = ', np.nanmean(rms))
print ('thr1 = ', np.nanmean(thr1))
print ('thr2 = ', np.nanmean(thr2))
print ('thr3 = ', np.nanmean(thr3))
print ('############ Planarity Error Metrics #################')
print('pe_fla = ', np.nanmean(pe_fla))
print('pe_ori = ', np.nanmean(pe_ori))
print ('############ Depth Boundary Error Metrics #################')
print ('dbe_acc = ', np.nanmean(dbe_acc))
print ('dbe_com = ', np.nanmean(dbe_com))
print ('############ Directed Depth Error Metrics #################')
print ('dde_0 = ', np.nanmean(dde_0) * 100.)
print ('dde_m = ', np.nanmean(dde_m) * 100.)
print ('dde_p = ', np.nanmean(dde_p) * 100.)
def validate_PRF(val_loader, model):
for th in [0.25,0.5,1]:
totalNumber = 0
Ae = 0
Pe = 0
Re = 0
Fe = 0
for i, sample_batched in enumerate(val_loader):
input, target, edges, calib, mask_invalid, mask_transp, mask_wall, \
paras_wall, mask_table, paras_table, mask_floor, paras_floor=sample_batched['image'], sample_batched['depth'], \
sample_batched['edges'], sample_batched['calib'], \
sample_batched['mask_invalid'], sample_batched['mask_transp'], \
sample_batched['mask_wall'], sample_batched['mask_wall_paras'], \
sample_batched['mask_table'], sample_batched['mask_table_paras'], \
sample_batched['mask_floor'], sample_batched['mask_floor_paras']
totalNumber = totalNumber + input.size(0)
target = target.cuda(async=True)
input = input.cuda()
with torch.no_grad():
pred = model(input)
pred=torch.nn.functional.interpolate(pred, size=[target.size(2), target.size(3)], mode='bilinear',align_corners=True)
depth_edge = edge_detection(target)
output_edge = edge_detection(pred)
edge1_valid = (depth_edge > th)
edge2_valid = (output_edge > th)
edge1_valid = np.array(edge1_valid.data.cpu().numpy(), dtype=np.uint8)
edge2_valid = np.array(edge2_valid.data.cpu().numpy(), dtype=np.uint8)
equal=edge1_valid==edge2_valid
nvalid = np.sum(equal)
A = nvalid / (target.size(2) * target.size(3))
nvalid2 = np.sum(((edge1_valid + edge2_valid) == 2))
P = nvalid2 / (np.sum(edge2_valid))
R = nvalid2 / (np.sum(edge1_valid))
F = (2 * P * R) / (P + R)
Ae += A
Pe += P
Re += R
Fe += F
Av = Ae / totalNumber
Pv = Pe / totalNumber
Rv = Re / totalNumber
Fv = Fe / totalNumber
print(th,'###################')
print('avgPV:', Pv)
print('avgRV:', Rv)
print('avgFV:', Fv,end="\n")
def validate_VP(val_loader, model):
totalNumber = 0
De_6 = 0
De_12 = 0
De_24 = 0
for i, sample_batched in enumerate(val_loader):
input, target = sample_batched['image'], sample_batched['depth']
totalNumber = totalNumber + input.size(0)
target = target.cuda(async=True)
input = input.cuda()
with torch.no_grad():
pred = model(input)
pred=torch.nn.functional.interpolate(pred, size=[target.size(2), target.size(3)], mode='bilinear',align_corners=True)
pred_6=torch.nn.functional.adaptive_avg_pool2d(pred,(6,6))
pred_12=torch.nn.functional.adaptive_avg_pool2d(pred,(12,12))
pred_24=torch.nn.functional.adaptive_avg_pool2d(pred,(24,24))
gt_6=torch.nn.functional.adaptive_avg_pool2d(target, (6,6))
gt_12=torch.nn.functional.adaptive_avg_pool2d(target, (12,12))
gt_24=torch.nn.functional.adaptive_avg_pool2d(target, (24,24))
D6=vp_dis(pred_6,gt_6)/8.48
D12=vp_dis(pred_12, gt_12)/16.97
D24=vp_dis(pred_24, gt_24)/33.94
De_6+=D6
De_12+=D12
De_24+=D24
De_6 = De_6 / totalNumber
De_12 = De_12 / totalNumber
De_24 = De_24 / totalNumber
print("###################")
print('De_6:', De_6)
print('De_12:', De_12)
print('De_24:', De_24)
def vp_dis(pred,gt):
pred=pred.squeeze().cpu().detach().numpy()
gt=gt.squeeze().cpu().detach().numpy()
pred_index=np.unravel_index(pred.argmax(), pred.shape)
gt_index=np.unravel_index(gt.argmax(), gt.shape)
return ((pred_index[0]-gt_index[0])**2+(pred_index[1]-gt_index[1])**2)**0.5
def edge_detection(depth):
get_edge = sobel.Sobel().cuda()
edge_xy = get_edge(depth)
edge_sobel = torch.pow(edge_xy[:, 0, :, :], 2) + \
torch.pow(edge_xy[:, 1, :, :], 2)
edge_sobel = torch.sqrt(edge_sobel)
return edge_sobel
if __name__ == '__main__':
main() | 11,843 | 41 | 140 | py |
BS-Net | BS-Net-main/util.py | import torch
from PIL import Image,ImageDraw,ImageFont
import matplotlib.pyplot as plt
import torch.nn as nn
import numpy as np
from skimage import feature
from scipy import ndimage
from sklearn.decomposition import PCA
import math
cmap = plt.cm.viridis
def lg10(x):
return torch.div(torch.log(x), math.log(10))
def maxOfTwo(x, y):
z = x.clone()
maskYLarger = torch.lt(x, y)
z[maskYLarger.detach()] = y[maskYLarger.detach()]
return z
def nValid(x):
return torch.sum(torch.eq(x, x).float())
def nNanElement(x):
return torch.sum(torch.ne(x, x).float())
def getNanMask(x):
return torch.ne(x, x)
def setNanToZero(input, target):
nanMask = getNanMask(target)
nValidElement = nValid(target)
_input = input.clone()
_target = target.clone()
_input[nanMask] = 0
_target[nanMask] = 0
return _input, _target, nanMask, nValidElement
def evaluateError(output, target):
errors = {'MSE': 0, 'RMSE': 0, 'ABS_REL': 0, 'LG10': 0,
'MAE': 0, 'DELTA1': 0, 'DELTA2': 0, 'DELTA3': 0}
_output, _target, nanMask, nValidElement = setNanToZero(output, target)
if (nValidElement.data.cpu().numpy() > 0):
diffMatrix = torch.abs(_output - _target)
errors['MSE'] = torch.sum(torch.pow(diffMatrix, 2)) / nValidElement
errors['MAE'] = torch.sum(diffMatrix) / nValidElement
realMatrix = torch.div(diffMatrix, _target)
realMatrix[nanMask] = 0
errors['ABS_REL'] = torch.sum(realMatrix) / nValidElement
LG10Matrix = torch.abs(lg10(_output) - lg10(_target))
LG10Matrix[nanMask] = 0
errors['LG10'] = torch.sum(LG10Matrix) / nValidElement
yOverZ = torch.div(_output, _target)
zOverY = torch.div(_target, _output)
maxRatio = maxOfTwo(yOverZ, zOverY)
errors['DELTA1'] = torch.sum(
torch.le(maxRatio, 1.25).float()) / nValidElement
errors['DELTA2'] = torch.sum(
torch.le(maxRatio, math.pow(1.25, 2)).float()) / nValidElement
errors['DELTA3'] = torch.sum(
torch.le(maxRatio, math.pow(1.25, 3)).float()) / nValidElement
errors['MSE'] = float(errors['MSE'].data.cpu().numpy())
errors['ABS_REL'] = float(errors['ABS_REL'].data.cpu().numpy())
errors['LG10'] = float(errors['LG10'].data.cpu().numpy())
errors['MAE'] = float(errors['MAE'].data.cpu().numpy())
errors['DELTA1'] = float(errors['DELTA1'].data.cpu().numpy())
errors['DELTA2'] = float(errors['DELTA2'].data.cpu().numpy())
errors['DELTA3'] = float(errors['DELTA3'].data.cpu().numpy())
return errors
def addErrors(errorSum, errors, batchSize):
errorSum['MSE']=errorSum['MSE'] + errors['MSE'] * batchSize
errorSum['ABS_REL']=errorSum['ABS_REL'] + errors['ABS_REL'] * batchSize
errorSum['LG10']=errorSum['LG10'] + errors['LG10'] * batchSize
errorSum['MAE']=errorSum['MAE'] + errors['MAE'] * batchSize
errorSum['DELTA1']=errorSum['DELTA1'] + errors['DELTA1'] * batchSize
errorSum['DELTA2']=errorSum['DELTA2'] + errors['DELTA2'] * batchSize
errorSum['DELTA3']=errorSum['DELTA3'] + errors['DELTA3'] * batchSize
return errorSum
def averageErrors(errorSum, N):
averageError={'MSE': 0, 'RMSE': 0, 'ABS_REL': 0, 'LG10': 0,
'MAE': 0, 'DELTA1': 0, 'DELTA2': 0, 'DELTA3': 0}
averageError['MSE'] = errorSum['MSE'] / N
averageError['ABS_REL'] = errorSum['ABS_REL'] / N
averageError['LG10'] = errorSum['LG10'] / N
averageError['MAE'] = errorSum['MAE'] / N
averageError['DELTA1'] = errorSum['DELTA1'] / N
averageError['DELTA2'] = errorSum['DELTA2'] / N
averageError['DELTA3'] = errorSum['DELTA3'] / N
return averageError
def colored_depthmap(depth, d_min=None, d_max=None):
if d_min is None:
d_min=np.min(depth)
if d_max is None:
d_max=np.max(depth)
depth_relative=(depth-d_min)/(d_max-d_min)
return 255*cmap(depth_relative)[:, :, :3] # H, W, C
def merge_into_row(input, depth_target, depth_pred,object_mask,object_nums):
rgb=np.transpose(np.squeeze(input), (2, 1, 0)) # H, W, C
depth_target_cpu=np.squeeze(depth_target.cpu().numpy())
depth_pred_cpu=np.squeeze(depth_pred.data.cpu().numpy())
mask=object_mask==object_nums
target_mse=depth_target_cpu[mask].mean()
pred_mse=depth_pred_cpu[mask].mean()
print(target_mse,pred_mse)
indexs=np.argwhere(object_mask==object_nums)
print(indexs.shape)
min_x=np.min(indexs[:,0])
min_y=np.min(indexs[:,1])
max_x=np.max(indexs[:,0])
max_y=np.max(indexs[:,1])
print(min_x,min_y)
print(max_x,max_y)
d_min=min(np.min(depth_target_cpu), np.min(depth_pred_cpu))
d_max=max(np.max(depth_target_cpu), np.max(depth_pred_cpu))
depth_target_col=colored_depthmap(depth_target_cpu, d_min, d_max)
depth_pred_col=colored_depthmap(depth_pred_cpu, d_min, d_max)
depth_target_col=Image.fromarray(depth_target_col.astype('uint8'))
depth_pred_col=Image.fromarray(depth_pred_col.astype('uint8'))
font=ImageFont.truetype('LiberationSans-Regular.ttf', 35)
draw=ImageDraw.Draw(depth_target_col)
draw.rectangle((min_y, min_x, max_y, max_x), fill=None, outline='red')
draw.text((min_y, min_x-50), str(target_mse)[0:3], font=font,fill=(255, 0, 0))
draw=ImageDraw.Draw(depth_pred_col)
draw.rectangle((min_y,min_x, max_y, max_x), fill=None, outline='red')
draw.text((min_y, min_x-50), str(pred_mse)[0:3], font=font,fill=(255, 0, 0))
depth_target_col=np.array(depth_target_col)
depth_pred_col=np.array(depth_pred_col)
img_merge=np.hstack([rgb, depth_target_col, depth_pred_col])
return img_merge
def merge_into_row_with_gt(input, depth_input, depth_target, depth_pred):
rgb=255*np.transpose(np.squeeze(input.cpu().numpy()), (1, 2, 0)) # H, W, C
depth_input_cpu=np.squeeze(depth_input.cpu().numpy())
depth_target_cpu=np.squeeze(depth_target.cpu().numpy())
depth_pred_cpu=np.squeeze(depth_pred.data.cpu().numpy())
d_min=min(np.min(depth_input_cpu), np.min(depth_target_cpu), np.min(depth_pred_cpu))
d_max=max(np.max(depth_input_cpu), np.max(depth_target_cpu), np.max(depth_pred_cpu))
depth_input_col=colored_depthmap(depth_input_cpu, d_min, d_max)
depth_target_col=colored_depthmap(depth_target_cpu, d_min, d_max)
depth_pred_col=colored_depthmap(depth_pred_cpu, d_min, d_max)
img_merge=np.hstack([rgb, depth_input_col, depth_target_col, depth_pred_col])
return img_merge
def add_row(img_merge, row):
return np.vstack([img_merge, row])
def save_image(img_merge, filename):
img_merge=Image.fromarray(img_merge.astype('uint8'))
img_merge.save(filename)
class Sobel(nn.Module):
def __init__(self):
super(Sobel, self).__init__()
self.edge_conv=nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1, bias=False)
# edge_kx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
edge_kx=np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
edge_ky=np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
edge_k=np.stack((edge_kx, edge_ky))
edge_k=torch.from_numpy(edge_k).float().view(2, 1, 3, 3)
self.edge_conv.weight=nn.Parameter(edge_k)
for param in self.parameters():
param.requires_grad=False
def forward(self, x):
out=self.edge_conv(x)
out=out.contiguous().view(-1, 2, x.size(2), x.size(3))
return out
def compute_distance_related_errors(gt, pred):
# initialize output
abs_rel_vec_tmp = np.zeros(20, np.float32)
log10_vec_tmp = np.zeros(20, np.float32)
rms_vec_tmp = np.zeros(20, np.float32)
# exclude masked invalid and missing measurements
gt = gt[gt != 0]
pred = pred[pred != 0]
gt_all = gt
pred_all = pred
bot = 0.0
idx = 0
for top in range(1, 21):
mask = np.logical_and(gt_all >= bot, gt_all <= top)
gt_tmp = gt_all[mask]
pred_tmp = pred_all[mask]
# calc errors
abs_rel_vec_tmp[idx], tmp, rms_vec_tmp[idx], log10_vec_tmp[idx], tmp, tmp, tmp = compute_global_errors(gt_tmp,
pred_tmp)
bot = top # re-assign bottom threshold
idx = idx + 1
return abs_rel_vec_tmp, log10_vec_tmp, rms_vec_tmp
def compute_global_errors(gt, pred):
# exclude masked invalid and missing measurements
gt = gt[gt != 0]
pred = pred[pred != 0]
# compute global relative errors
thresh = np.maximum((gt / pred), (pred / gt))
thr1 = (thresh < 1.25).mean()
thr2 = (thresh < 1.25 ** 2).mean()
thr3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
log10 = np.mean(np.abs(np.log10(gt) - np.log10(pred)))
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
return abs_rel, sq_rel, rmse, log10, thr1, thr2, thr3
def compute_directed_depth_error(gt, pred, thr):
# exclude masked invalid and missing measurements
gt = gt[gt != 0]
pred = pred[pred != 0]
# number of valid depth values
nPx = float(len(gt))
gt[gt <= thr] = 1 # assign depths closer as 'thr' as '1s'
gt[gt > thr] = 0 # assign depths farer as 'thr' as '0s'
pred[pred <= thr] = 1
pred[pred > thr] = 0
diff = pred - gt # compute difference map
dde_0 = np.sum(diff == 0) / nPx
dde_m = np.sum(diff == 1) / nPx
dde_p = np.sum(diff == -1) / nPx
return dde_0, dde_m, dde_p
def compute_depth_boundary_error(edges_gt, pred):
# skip dbe if there is no ground truth distinct edge
if np.sum(edges_gt) == 0:
dbe_acc = np.nan
dbe_com = np.nan
edges_est = np.empty(pred.shape).astype(int)
else:
# normalize est depth map from 0 to 1
pred_normalized = pred.copy().astype('f')
pred_normalized[pred_normalized == 0] = np.nan
pred_normalized = pred_normalized - np.nanmin(pred_normalized)
pred_normalized = pred_normalized / np.nanmax(pred_normalized)
# apply canny filter
edges_est = feature.canny(pred_normalized, sigma=np.sqrt(2), low_threshold=0.15, high_threshold=0.3)
# compute distance transform for chamfer metric
D_gt = ndimage.distance_transform_edt(1 - edges_gt)
D_est = ndimage.distance_transform_edt(1 - edges_est)
max_dist_thr = 10. # Threshold for local neighborhood
mask_D_gt = D_gt < max_dist_thr # truncate distance transform map
E_fin_est_filt = edges_est * mask_D_gt # compute shortest distance for all predicted edges
if np.sum(E_fin_est_filt) == 0: # assign MAX value if no edges could be detected in prediction
dbe_acc = max_dist_thr
dbe_com = max_dist_thr
else:
# accuracy: directed chamfer distance of predicted edges towards gt edges
dbe_acc = np.nansum(D_gt * E_fin_est_filt) / np.nansum(E_fin_est_filt)
# completeness: sum of undirected chamfer distances of predicted and gt edges
ch1 = D_gt * edges_est # dist(predicted,gt)
ch1[ch1 > max_dist_thr] = max_dist_thr # truncate distances
ch2 = D_est * edges_gt # dist(gt, predicted)
ch2[ch2 > max_dist_thr] = max_dist_thr # truncate distances
res = ch1 + ch2 # summed distances
dbe_com = np.nansum(res) / (np.nansum(edges_est) + np.nansum(edges_gt)) # normalized
return dbe_acc, dbe_com, edges_est
def compute_planarity_error(gt, pred, paras, mask, calib):
# mask invalid and missing depth values
pred[pred == 0] = np.nan
gt[gt == 0] = np.nan
# number of planes of the current plane type
if(paras.ndim==1):
paras=np.expand_dims(paras, 0);
nr_planes = paras.shape[0]
# initialize PE errors
pe_fla = np.empty(0)
pe_ori = np.empty(0)
for j in range(nr_planes): # loop over number of planes
# only consider depth values for this specific planar mask
curr_plane_mask = mask.copy()
curr_plane_mask[curr_plane_mask < (j + 1)] = 0
curr_plane_mask[curr_plane_mask > (j + 1)] = 0
remain_mask = curr_plane_mask.astype(float)
remain_mask[remain_mask == 0] = np.nan
remain_mask[np.isnan(remain_mask) == 0] = 1
# only consider plane masks which are bigger than 5% of the image dimension
if np.nansum(remain_mask) / (640. * 480.) < 0.05:
flat = np.nan
orie = np.nan
else:
# scale remaining depth map of current plane towards gt depth map
mean_depth_est = np.nanmedian(pred * remain_mask)
mean_depth_gt = np.nanmedian(gt * remain_mask)
est_depth_scaled = pred / (mean_depth_est / mean_depth_gt) * remain_mask
# project masked and scaled depth values to 3D points
fx_d = calib[0, 0]
fy_d = calib[1, 1]
cx_d = calib[2, 0]
cy_d = calib[2, 1]
# c,r = np.meshgrid(range(gt.shape[1]),range(gt.shape[0]))
c, r = np.meshgrid(range(1, gt.shape[1] + 1), range(1, gt.shape[0] + 1))
tmp_x = ((c - cx_d) * est_depth_scaled / fx_d)
tmp_y = est_depth_scaled
tmp_z = (-(r - cy_d) * est_depth_scaled / fy_d)
X = tmp_x.flatten()
Y = tmp_y.flatten()
Z = tmp_z.flatten()
X = X[~np.isnan(X)]
Y = Y[~np.isnan(Y)]
Z = Z[~np.isnan(Z)]
pointCloud = np.stack((X, Y, Z))
# fit 3D plane to 3D points (normal, d)
pca = PCA(n_components=3)
pca.fit(pointCloud.T)
normal = -pca.components_[2, :]
point = np.mean(pointCloud, axis=1)
d = -np.dot(normal, point);
# PE_flat: deviation of fitted 3D plane
flat = np.std(np.dot(pointCloud.T, normal.T) + d) * 100.
n_gt = paras[j, 4:7]
if np.dot(normal, n_gt) < 0:
normal = -normal
# PE_ori: 3D angle error between ground truth plane and normal vector of fitted plane
orie = math.atan2(np.linalg.norm(np.cross(n_gt, normal)), np.dot(n_gt, normal)) * 180. / np.pi
pe_fla = np.append(pe_fla, flat) # append errors
pe_ori = np.append(pe_ori, orie)
return pe_fla, pe_ori | 14,525 | 34.257282 | 120 | py |
BS-Net | BS-Net-main/nyu_transform.py | import torch
import numpy as np
from PIL import Image
import collections
try:
import accimage
except ImportError:
accimage = None
import random
import scipy.ndimage as ndimage
def _is_pil_image(img):
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
class RandomRotate(object):
"""Random rotation of the image from -angle to angle (in degrees)
This is useful for dataAugmentation, especially for geometric problems such as FlowEstimation
angle: max angle of the rotation
interpolation order: Default: 2 (bilinear)
reshape: Default: false. If set to true, image size will be set to keep every pixel in the image.
diff_angle: Default: 0. Must stay less than 10 degrees, or linear approximation of flowmap will be off.
"""
def __init__(self, angle, diff_angle=0, order=2, reshape=False):
self.angle = angle
self.reshape = reshape
self.order = order
def __call__(self, sample):
image, depth = sample['image'], sample['depth']
applied_angle = random.uniform(-self.angle, self.angle)
angle1 = applied_angle
angle1_rad = angle1 * np.pi / 180
image = ndimage.interpolation.rotate(
image, angle1, reshape=self.reshape, order=self.order)
depth = ndimage.interpolation.rotate(
depth, angle1, reshape=self.reshape, order=self.order)
image = Image.fromarray(image)
depth = Image.fromarray(depth)
return {'image': image, 'depth': depth}
class RandomHorizontalFlip(object):
def __call__(self, sample):
image, depth = sample['image'], sample['depth']
if not _is_pil_image(image):
raise TypeError(
'img should be PIL Image. Got {}'.format(type(img)))
if not _is_pil_image(depth):
raise TypeError(
'img should be PIL Image. Got {}'.format(type(depth)))
if random.random() < 0.5:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
depth = depth.transpose(Image.FLIP_LEFT_RIGHT)
return {'image': image, 'depth': depth}
class Scale(object):
""" Rescales the inputs and target arrays to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation order: Default: 2 (bilinear)
"""
def __init__(self, size):
self.size = size
def __call__(self, sample):
image, depth = sample['image'], sample['depth']
image = self.changeScale(image, self.size)
depth = self.changeScale(depth, self.size, Image.NEAREST)
return {'image': image, 'depth': depth}
def changeScale(self, img, size, interpolation=Image.BILINEAR):
if not _is_pil_image(img):
raise TypeError(
'img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::-1], interpolation)
class CenterCrop(object):
def __init__(self, size_image, size_depth):
self.size_image = size_image
self.size_depth = size_depth
def __call__(self, sample):
image, depth = sample['image'], sample['depth']
image = self.centerCrop(image, self.size_image)
depth = self.centerCrop(depth, self.size_image)
ow, oh = self.size_depth
depth = depth.resize((ow, oh))
return {'image': image, 'depth': depth}
def centerCrop(self, image, size):
w1, h1 = image.size
tw, th = size
if w1 == tw and h1 == th:
return image
x1 = int(round((w1 - tw) / 2.))
y1 = int(round((h1 - th) / 2.))
image = image.crop((x1, y1, tw + x1, th + y1))
return image
class ToTensor(object):
"""Convert a ``PIL.Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __init__(self, is_test=False):
self.is_test = is_test
def __call__(self, sample):
image, depth = sample['image'], sample['depth']
"""
Args:
pic (PIL.Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
# ground truth depth of training samples is stored in 8-bit while test samples are saved in 16 bit
image = self.to_tensor(image)
if self.is_test:
depth = self.to_tensor(depth).float() / 1000
else:
depth = self.to_tensor(depth).float() * 10
return {'image': image, 'depth': depth}
def to_tensor(self, pic):
if not (_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError(
'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float().div(255)
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros(
[pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(
torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
class Lighting(object):
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, sample):
image, depth = sample['image'], sample['depth']
if self.alphastd == 0:
return image
alpha = image.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(image).clone() \
.mul(alpha.view(1, 3).expand(3, 3)) \
.mul(self.eigval.view(1, 3).expand(3, 3)) \
.sum(1).squeeze()
image = image.add(rgb.view(3, 1, 1).expand_as(image))
return {'image': image, 'depth': depth}
class Grayscale(object):
def __call__(self, img):
gs = img.clone()
gs[0].mul_(0.299).add_(0.587, gs[1]).add_(0.114, gs[2])
gs[1].copy_(gs[0])
gs[2].copy_(gs[0])
return gs
class Saturation(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = Grayscale()(img)
alpha = random.uniform(-self.var, self.var)
return img.lerp(gs, alpha)
class Brightness(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = img.new().resize_as_(img).zero_()
alpha = random.uniform(-self.var, self.var)
return img.lerp(gs, alpha)
class Contrast(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = Grayscale()(img)
gs.fill_(gs.mean())
alpha = random.uniform(-self.var, self.var)
return img.lerp(gs, alpha)
class RandomOrder(object):
""" Composes several transforms together in random order.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, sample):
image, depth = sample['image'], sample['depth']
if self.transforms is None:
return {'image': image, 'depth': depth}
order = torch.randperm(len(self.transforms))
for i in order:
image = self.transforms[i](image)
return {'image': image, 'depth': depth}
class ColorJitter(RandomOrder):
def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4):
self.transforms = []
if brightness != 0:
self.transforms.append(Brightness(brightness))
if contrast != 0:
self.transforms.append(Contrast(contrast))
if saturation != 0:
self.transforms.append(Saturation(saturation))
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, sample):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
image, depth = sample['image'], sample['depth']
image = self.normalize(image, self.mean, self.std)
return {'image': image, 'depth': depth}
def normalize(self, tensor, mean, std):
"""Normalize a tensor image with mean and standard deviation.
See ``Normalize`` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for R, G, B channels respecitvely.
std (sequence): Sequence of standard deviations for R, G, B channels
respecitvely.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
for t, m, s in zip(tensor, mean, std):
t.sub_(m).div_(s)
return tensor
class Scale_iBims1(object):
""" Rescales the inputs and target arrays to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation order: Default: 2 (bilinear)
"""
def __init__(self, size):
self.size = size
def __call__(self, sample):
image, depth,edges,calib,mask_invalid,mask_transp,mask_wall, \
mask_wall_paras,mask_table,mask_table_paras,mask_floor,mask_floor_paras=sample['image'], sample['depth'],sample['edges'], \
sample['calib'],sample['mask_invalid'], sample['mask_transp'], \
sample['mask_wall'],sample['mask_wall_paras'],sample['mask_table'], \
sample['mask_table_paras'],sample['mask_floor'],sample['mask_floor_paras']
image = self.changeScale(image, self.size)
depth = self.changeScale(depth, self.size, Image.NEAREST)
edges = self.changeScale(edges, self.size, Image.NEAREST)
#calib = self.changeScale(calib, self.size)
mask_invalid = self.changeScale(mask_invalid, self.size, Image.NEAREST)
mask_transp = self.changeScale(mask_transp, self.size, Image.NEAREST)
mask_wall=self.changeScale(mask_wall, self.size, Image.NEAREST)
mask_table=self.changeScale(mask_table, self.size, Image.NEAREST)
mask_floor=self.changeScale(mask_floor, self.size, Image.NEAREST)
return {'image': image, 'depth': depth, 'edges': edges, 'calib': calib,
'mask_invalid': mask_invalid, 'mask_transp': mask_transp,
"mask_wall": mask_wall, "mask_wall_paras": mask_wall_paras, "mask_table": mask_table,
"mask_table_paras": mask_table_paras, "mask_floor": mask_floor, "mask_floor_paras": mask_floor_paras}
def changeScale(self, img, size, interpolation=Image.BILINEAR):
if not _is_pil_image(img):
raise TypeError(
'img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::-1], interpolation)
class CenterCrop_iBims1(object):
def __init__(self, size_image, size_depth):
self.size_image = size_image
self.size_depth = size_depth
def __call__(self, sample):
image, depth, edges, calib, mask_invalid, mask_transp, mask_wall, \
mask_wall_paras, mask_table, mask_table_paras, mask_floor, mask_floor_paras=sample['image'], sample['depth'], \
sample['edges'], \
sample['calib'], sample[
'mask_invalid'], sample[
'mask_transp'], \
sample['mask_wall'], sample[
'mask_wall_paras'], sample[
'mask_table'], \
sample['mask_table_paras'], sample[
'mask_floor'], sample[
'mask_floor_paras']
image = self.centerCrop(image, self.size_image)
depth = self.centerCrop(depth, self.size_image)
edges = self.centerCrop(edges, self.size_image)
#calib = self.centerCrop(calib, self.size_image)
mask_invalid = self.centerCrop(mask_invalid, self.size_image)
mask_transp = self.centerCrop(mask_transp, self.size_image)
mask_wall=self.centerCrop(mask_wall, self.size_image)
mask_table=self.centerCrop(mask_table, self.size_image)
mask_floor=self.centerCrop(mask_floor, self.size_image)
ow, oh = self.size_depth
depth = depth.resize((ow, oh))
edges = edges.resize((ow, oh))
mask_invalid = mask_invalid.resize((ow, oh))
mask_transp = mask_transp.resize((ow, oh))
mask_wall=mask_wall.resize((ow, oh))
mask_table=mask_table.resize((ow, oh))
mask_floor=mask_floor.resize((ow, oh))
return {'image': image, 'depth': depth, 'edges': edges, 'calib': calib,
'mask_invalid': mask_invalid, 'mask_transp': mask_transp,
"mask_wall": mask_wall, "mask_wall_paras": mask_wall_paras, "mask_table": mask_table,
"mask_table_paras": mask_table_paras, "mask_floor": mask_floor, "mask_floor_paras": mask_floor_paras}
def centerCrop(self, image, size):
w1, h1 = image.size
tw, th = size
if w1 == tw and h1 == th:
return image
x1 = int(round((w1 - tw) / 2.))
y1 = int(round((h1 - th) / 2.))
image = image.crop((x1, y1, tw + x1, th + y1))
return image
class ToTensor_iBims1(object):
"""Convert a ``PIL.Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __init__(self,is_test=False):
self.is_test = is_test
def __call__(self, sample):
image, depth, edges, calib, mask_invalid, mask_transp, mask_wall, \
mask_wall_paras, mask_table, mask_table_paras, mask_floor, mask_floor_paras=sample['image'], sample['depth'], \
sample['edges'], sample['calib'], \
sample['mask_invalid'], sample['mask_transp'], \
sample['mask_wall'], sample['mask_wall_paras'], \
sample['mask_table'],sample['mask_table_paras'], \
sample['mask_floor'], sample['mask_floor_paras']
"""
Args:
pic (PIL.Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
# ground truth depth of training samples is stored in 8-bit while test samples are saved in 16 bit
image = self.to_tensor(image)
depth = self.to_tensor(depth).float()
edges=self.to_tensor(edges)
calib=self.to_tensor(calib).float()
mask_invalid=self.to_tensor(mask_invalid)
mask_transp=self.to_tensor(mask_transp)
mask_wall=self.to_tensor(mask_wall)
mask_table=self.to_tensor(mask_table)
mask_floor=self.to_tensor(mask_floor)
mask_wall_paras=torch.from_numpy(mask_wall_paras)
mask_table_paras=torch.from_numpy(mask_table_paras)
mask_floor_paras=torch.from_numpy(mask_floor_paras)
return {'image': image, 'depth': depth, 'edges': edges, 'calib': calib,
'mask_invalid': mask_invalid, 'mask_transp': mask_transp,
"mask_wall": mask_wall,"mask_wall_paras": mask_wall_paras, "mask_table": mask_table,
"mask_table_paras": mask_table_paras,"mask_floor": mask_floor, "mask_floor_paras": mask_floor_paras}
def to_tensor(self, pic):
if not(_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError(
'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float()
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros(
[pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int, copy=False))
elif pic.mode == 'F':
#print np.array(pic, np.uint8, copy=False)
#img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
img = torch.from_numpy(np.array(pic, np.float64, copy=False))
elif pic.mode=='1':
img=torch.from_numpy(np.array(pic, boolen, copy=False))
else:
img = torch.from_numpy(np.array(pic, np.uint8, copy=False))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if pic.mode == 'RGB':
return img.float()/255
else:
return img.float()
class Normalize_iBims1(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, sample):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
image, depth, edges, calib, mask_invalid, mask_transp, mask_wall, \
mask_wall_paras, mask_table, mask_table_paras, mask_floor, mask_floor_paras=sample['image'], sample['depth'], \
sample['edges'], sample['calib'], \
sample['mask_invalid'], sample[
'mask_transp'], \
sample['mask_wall'], sample[
'mask_wall_paras'], \
sample['mask_table'], sample[
'mask_table_paras'], \
sample['mask_floor'], sample[
'mask_floor_paras']
image = self.normalize(image, self.mean, self.std)
return {'image': image, 'depth': depth, 'edges': edges, 'calib': calib,
'mask_invalid': mask_invalid, 'mask_transp': mask_transp,
"mask_wall": mask_wall, "mask_wall_paras": mask_wall_paras, "mask_table": mask_table,
"mask_table_paras": mask_table_paras, "mask_floor": mask_floor, "mask_floor_paras": mask_floor_paras}
def normalize(self, tensor, mean, std):
"""Normalize a tensor image with mean and standard deviation.
See ``Normalize`` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for R, G, B channels respecitvely.
std (sequence): Sequence of standard deviations for R, G, B channels
respecitvely.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
for t, m, s in zip(tensor, mean, std):
t.sub_(m).div_(s)
return tensor | 23,434 | 38.058333 | 137 | py |
BS-Net | BS-Net-main/metrics.py | import torch
import math
import numpy as np
def log10(x):
"""Convert a new tensor with the base-10 logarithm of the elements of x. """
return torch.log(x) / math.log(10)
class Result(object):
def __init__(self):
self.irmse, self.imae = 0, 0
self.mse, self.rmse, self.mae = 0, 0, 0
self.absrel, self.lg10 = 0, 0
self.delta1, self.delta2, self.delta3 = 0, 0, 0
self.data_time, self.gpu_time = 0, 0
def set_to_worst(self):
self.irmse, self.imae = np.inf, np.inf
self.mse, self.rmse, self.mae = np.inf, np.inf, np.inf
self.absrel, self.lg10 = np.inf, np.inf
self.delta1, self.delta2, self.delta3 = 0, 0, 0
self.data_time, self.gpu_time = 0, 0
def update(self, irmse, imae, mse, rmse, mae, absrel, lg10, delta1, delta2, delta3, gpu_time, data_time):
self.irmse, self.imae = irmse, imae
self.mse, self.rmse, self.mae = mse, rmse, mae
self.absrel, self.lg10 = absrel, lg10
self.delta1, self.delta2, self.delta3 = delta1, delta2, delta3
self.data_time, self.gpu_time = data_time, gpu_time
def evaluate(self, output, target):
valid_mask = ((target>0) + (output>0)) > 0
output = output[valid_mask]
target = target[valid_mask]
abs_diff = (output - target).abs()
self.mse = float((torch.pow(abs_diff, 2)).mean())
self.rmse = math.sqrt(self.mse)
self.mae = float(abs_diff.mean())
self.lg10 = float((log10(output) - log10(target)).abs().mean())
self.absrel = float((abs_diff / target).mean())
maxRatio = torch.max(output / target, target / output)
self.delta1 = float((maxRatio < 1.25).float().mean())
self.delta2 = float((maxRatio < 1.25 ** 2).float().mean())
self.delta3 = float((maxRatio < 1.25 ** 3).float().mean())
self.data_time = 0
self.gpu_time = 0
inv_output = 1 / output
inv_target = 1 / target
abs_inv_diff = (inv_output - inv_target).abs()
self.irmse = math.sqrt((torch.pow(abs_inv_diff, 2)).mean())
self.imae = float(abs_inv_diff.mean())
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.count = 0.0
self.count_lg10=0
self.count_rel=0
self.sum_irmse, self.sum_imae = 0, 0
self.sum_mse, self.sum_rmse, self.sum_mae = 0, 0, 0
self.sum_absrel, self.sum_lg10 = 0, 0
self.sum_delta1, self.sum_delta2, self.sum_delta3 = 0, 0, 0
self.sum_data_time, self.sum_gpu_time = 0, 0
def update(self, result, gpu_time, data_time, n=1):
self.count += n
if np.isinf(result.lg10):
self.count_lg10+=n
else:
self.sum_lg10 += n * result.lg10
if np.isinf(result.absrel):
self.count_rel+=n
else:
self.sum_absrel += n * result.absrel
# self.sum_absrel += n * result.absrel
# self.sum_lg10 += n * result.lg10
self.sum_irmse += n*result.irmse
self.sum_imae += n*result.imae
self.sum_mse += n*result.mse
self.sum_rmse += n*result.rmse
self.sum_mae += n*result.mae
self.sum_delta1 += n*result.delta1
self.sum_delta2 += n*result.delta2
self.sum_delta3 += n*result.delta3
self.sum_data_time += n*data_time
self.sum_gpu_time += n*gpu_time
def average(self):
avg = Result()
avg.update(
self.sum_irmse / self.count, self.sum_imae / self.count,
self.sum_mse / self.count, (self.sum_mse / self.count)**0.5, self.sum_mae / self.count,
self.sum_absrel /(self.count-self.count_rel), self.sum_lg10 / (self.count-self.count_lg10),
#self.sum_absrel / self.count, self.sum_lg10 / self.count,
self.sum_delta1 / self.count, self.sum_delta2 / self.count, self.sum_delta3 / self.count,
self.sum_gpu_time / self.count, self.sum_data_time / self.count)
return avg | 4,037 | 37.09434 | 109 | py |
BS-Net | BS-Net-main/train.py | # -*- coding: UTF-8 -*-
import warnings
warnings.filterwarnings("ignore")
import argparse
import time
import os
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import loaddata
import random
import numpy as np
import util
from models import modules as modules, net as net, dilation_resnet as resnet
parser = argparse.ArgumentParser(description='BS-Net training')
parser.add_argument('--epochs', default=20, type=int,
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr', '--learning-rate', default=0.0001, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
help='weight decay (default: 1e-4)')
parser.add_argument('--seed', '--rs', default=1024, type=int,
help='random seed (default: 0)')
parser.add_argument('--resume', '--r', default="", type=str,
help='resume_root (default:"")')
########################################################
def define_model(pre_train=True):
original_model = resnet.resnet50(pretrained=pre_train)
Encoder = modules.E_resnet(original_model)
model = net.model(Encoder, num_features=2048, block_channel=[256, 512, 1024, 2048])
return model
def main():
global args
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed) # Numpy module.
random.seed(args.seed) # Python random module.
torch.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
model = define_model(pre_train=True)
####################load pretrained model
if args.resume!="":
Checkpoint=torch.load(args.resume)
state_dict = Checkpoint['state_dict']
model.load_state_dict(state_dict)
args.start_epoch=Checkpoint["epoch"]+1
print('parameter loaded successfully!!')
if torch.cuda.device_count() == 8:
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda()
batch_size = 64
elif torch.cuda.device_count() == 4:
model = torch.nn.DataParallel(model,device_ids=[0,1,2,3]).cuda()
batch_size = 16
elif torch.cuda.device_count() == 2:
model = torch.nn.DataParallel(model, device_ids=[0,1]).cuda()
batch_size = 8
else:
model = model.cuda()
batch_size = 4 # batch size
optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay)
train_loader = loaddata.getTrainingData(batch_size)
losses={}
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
loss=train(train_loader, model, optimizer, epoch)
losses[str(epoch)]=loss
save_checkpoint({"epoch": epoch, "state_dict": model.state_dict(),"loss_avg":loss},
filename='midCheckpoint_{}.pth.tar'.format(epoch))
def train(train_loader, model, optimizer, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
model.train()
cos = nn.CosineSimilarity(dim=1, eps=0)
get_gradient =util.Sobel().cuda()
end = time.time()
for i, sample_batched in enumerate(train_loader):
image, depth = sample_batched['image'], sample_batched['depth']
depth = depth.cuda()
image = image.cuda()
image = torch.autograd.Variable(image)
depth = torch.autograd.Variable(depth)
ones = torch.ones(depth.size(0), 1, depth.size(2), depth.size(3)).float().cuda()
ones = torch.autograd.Variable(ones)
optimizer.zero_grad()
#pdb.set_trace()
output = model(image)
#pdb.set_trace()
depth_grad = get_gradient(depth)
output_grad = get_gradient(output)
depth_grad_dx = depth_grad[:, 0, :, :].contiguous().view_as(depth)
depth_grad_dy = depth_grad[:, 1, :, :].contiguous().view_as(depth)
output_grad_dx = output_grad[:, 0, :, :].contiguous().view_as(depth)
output_grad_dy = output_grad[:, 1, :, :].contiguous().view_as(depth)
depth_normal = torch.cat((-depth_grad_dx, -depth_grad_dy, ones), 1)
output_normal = torch.cat((-output_grad_dx, -output_grad_dy, ones), 1)
loss_depth = torch.log(torch.abs(output - depth) + 0.5).mean()
loss_dx = torch.log(torch.abs(output_grad_dx - depth_grad_dx) + 0.5).mean()
loss_dy = torch.log(torch.abs(output_grad_dy - depth_grad_dy) + 0.5).mean()
loss_normal = torch.abs(1 - cos(output_normal, depth_normal)).mean()
loss = loss_depth + loss_normal + (loss_dx + loss_dy)
losses.update(loss.data, image.size(0))
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})'
.format(epoch, i, len(train_loader), batch_time=batch_time, loss=losses))
return losses.avg
# adjust the learning rate every 5 epochs
def adjust_learning_rate(optimizer, epoch):
lr = args.lr * (0.1 ** (epoch // 5))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# define a useful data structure
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# save the model parameters
def save_checkpoint(state, filename='res50.pth.tar'):
torch.save(state, filename)
if __name__ == '__main__':
main() | 6,175 | 35.544379 | 93 | py |
BS-Net | BS-Net-main/test_NYUDv2.py | import warnings
warnings.filterwarnings("ignore")
import time
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import loaddata
import numpy as np
from metrics import AverageMeter, Result
from models import modules as modules, net as net, dilation_resnet as resnet
import torch.nn.functional as F
import argparse
import sobel
parser = argparse.ArgumentParser(description='BS-Net NYUDv2 testing')
parser.add_argument('--path', '--p', default="BSN_NYUD.pth.tar", type=str,help='results_root (default:BSN_NYUD.pth.tar)')
def define_model(pre_train=True):
original_model = resnet.resnet50(pretrained=pre_train)
Encoder = modules.E_resnet(original_model)
model = net.model(Encoder, num_features=2048, block_channel=[256, 512, 1024, 2048])
return model
def main():
global args
args = parser.parse_args()
model = define_model(pre_train=False)
cudnn.benchmark = True
val_loader = loaddata.getTestingData(1)
checkpoint = torch.load(args.path)
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict)
model.cuda()
print("=> loaded model (epoch {})".format(checkpoint["epoch"]))
model.eval() # switch to evaluate mode
validate(val_loader,model)
validate_PRF(val_loader,model)
validate_VP(val_loader,model)
def validate(val_loader, model):
average_meter = AverageMeter()
end = time.time()
for i, sample_batched in enumerate(val_loader):
data_time = time.time() - end
input, target = sample_batched['image'], sample_batched['depth']
target = target.cuda(async=True)
input = input.cuda()
#with torch.no_grad():
# compute output
input=torch.autograd.Variable(input, volatile=True)
target=torch.autograd.Variable(target, volatile=True)
end=time.time()
pred=model(input)
pred=torch.nn.functional.interpolate(pred, size=[target.size(2), target.size(3)], mode='bilinear',align_corners=True)
gpu_time=time.time()-end
# measure accuracy and record loss
result = Result()
result.evaluate(pred, target.data)
average_meter.update(result, gpu_time, data_time, input.size(0))
end = time.time()
if (i+1) % 300 == 0:
print('Test: [{0}/{1}]\t'
't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
'MSE={result.mse:.2f}({average.mse:.2f}) '
'MAE={result.mae:.2f}({average.mae:.2f}) '
'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
'REL={result.absrel:.3f}({average.absrel:.3f}) '
'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
i+1, len(val_loader), gpu_time=gpu_time, result=result, average=average_meter.average()))
avg = average_meter.average()
print('\n*\n'
'RMSE={average.rmse:.3f}\n'
'MAE={average.mae:.3f}\n'
'REL={average.absrel:.3f}\n'
'Lg10={average.lg10:.3f}\n'
'Delta1={average.delta1:.3f}\n'
'Delta2={average.delta2:.3f}\n'
'Delta3={average.delta3:.3f}\n'
't_GPU={time:.3f}\n'.format(
average=avg, time=avg.gpu_time))
def validate_PRF(val_loader, model):
for th in [0.25,0.5,1]:
totalNumber = 0
Ae = 0
Pe = 0
Re = 0
Fe = 0
for i, sample_batched in enumerate(val_loader):
input, target = sample_batched['image'], sample_batched['depth']
totalNumber = totalNumber + input.size(0)
target = target.cuda(async=True)
input = input.cuda()
with torch.no_grad():
pred = model(input)
pred=torch.nn.functional.interpolate(pred, size=[target.size(2), target.size(3)], mode='bilinear',align_corners=True)
depth_edge = edge_detection(target)
output_edge = edge_detection(pred)
edge1_valid = (depth_edge > th)
edge2_valid = (output_edge > th)
edge1_valid = np.array(edge1_valid.data.cpu().numpy(), dtype=np.uint8)
edge2_valid = np.array(edge2_valid.data.cpu().numpy(), dtype=np.uint8)
equal=edge1_valid==edge2_valid
nvalid = np.sum(equal)
A = nvalid / (target.size(2) * target.size(3))
nvalid2 = np.sum(((edge1_valid + edge2_valid) == 2))
P = nvalid2 / (np.sum(edge2_valid))
R = nvalid2 / (np.sum(edge1_valid))
F = (2 * P * R) / (P + R)
Ae += A
Pe += P
Re += R
Fe += F
Av = Ae / totalNumber
Pv = Pe / totalNumber
Rv = Re / totalNumber
Fv = Fe / totalNumber
print(th,'###################')
print('avgPV:', Pv)
print('avgRV:', Rv)
print('avgFV:', Fv,end="\n")
def validate_VP(val_loader, model):
totalNumber = 0
De_6 = 0
De_12 = 0
De_24 = 0
for i, sample_batched in enumerate(val_loader):
input, target = sample_batched['image'], sample_batched['depth']
totalNumber = totalNumber + input.size(0)
target = target.cuda(async=True)
input = input.cuda()
with torch.no_grad():
pred = model(input)
pred=torch.nn.functional.interpolate(pred, size=[target.size(2), target.size(3)], mode='bilinear',align_corners=True)
pred_6=torch.nn.functional.adaptive_avg_pool2d(pred,(6,6))
pred_12=torch.nn.functional.adaptive_avg_pool2d(pred,(12,12))
pred_24=torch.nn.functional.adaptive_avg_pool2d(pred,(24,24))
gt_6=torch.nn.functional.adaptive_avg_pool2d(target, (6,6))
gt_12=torch.nn.functional.adaptive_avg_pool2d(target, (12,12))
gt_24=torch.nn.functional.adaptive_avg_pool2d(target, (24,24))
D6=vp_dis(pred_6,gt_6)/8.48
D12=vp_dis(pred_12, gt_12)/16.97
D24=vp_dis(pred_24, gt_24)/33.94
De_6+=D6
De_12+=D12
De_24+=D24
De_6 = De_6 / totalNumber
De_12 = De_12 / totalNumber
De_24 = De_24 / totalNumber
print("###################")
print('De_6:', De_6)
print('De_12:', De_12)
print('De_24:', De_24)
def vp_dis(pred,gt):
pred=pred.squeeze().cpu().detach().numpy()
gt=gt.squeeze().cpu().detach().numpy()
pred_index=np.unravel_index(pred.argmax(), pred.shape)
gt_index=np.unravel_index(gt.argmax(), gt.shape)
return ((pred_index[0]-gt_index[0])**2+(pred_index[1]-gt_index[1])**2)**0.5
def edge_detection(depth):
get_edge = sobel.Sobel().cuda()
edge_xy = get_edge(depth)
edge_sobel = torch.pow(edge_xy[:, 0, :, :], 2) + \
torch.pow(edge_xy[:, 1, :, :], 2)
edge_sobel = torch.sqrt(edge_sobel)
return edge_sobel
if __name__ == '__main__':
main() | 6,987 | 35.395833 | 133 | py |
BS-Net | BS-Net-main/models/dilation_resnet.py | """Dilated ResNet"""
import math
import torch
import torch.utils.model_zoo as model_zoo
import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'BasicBlock', 'Bottleneck']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
"""ResNet BasicBlock
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, previous_dilation=1,
norm_layer=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=previous_dilation, dilation=previous_dilation, bias=False)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1,
downsample=None, previous_dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(
planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def _sum_each(self, x, y):
assert(len(x) == len(y))
z = []
for i in range(len(x)):
z.append(x[i]+y[i])
return z
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Dilated Pre-trained ResNet Model, which preduces the stride of 8 featuremaps at conv5.
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
classes : int, default 1000
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
Reference:
- He, Kaiming, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions."
"""
# pylint: disable=unused-variable
def __init__(self, block, layers, num_classes=1000, dilated=True, norm_layer=nn.BatchNorm2d, multi_grid=False, multi_dilation=None):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer,layer_num=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer,layer_num=2)
if dilated:
if multi_grid:
self.layer3 = self._make_layer(block,256,layers[2],stride=1,
dilation=2, norm_layer=norm_layer,layer_num=3)
self.layer4 = self._make_layer(block,512,layers[3],stride=1,
dilation=4,norm_layer=norm_layer,
multi_grid=multi_grid, multi_dilation=multi_dilation,layer_num=4)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2, norm_layer=norm_layer,layer_num=3)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, norm_layer=norm_layer,layer_num=4)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
norm_layer=norm_layer,layer_num=3)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
norm_layer=norm_layer,layer_num=4)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, norm_layer):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=None, multi_grid=False, multi_dilation=None,layer_num=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
norm_layer(planes * block.expansion),
)
layers = []
if multi_grid == False:
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, planes, stride, dilation=1,
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, planes, stride, dilation=2,
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
else:
layers.append(block(self.inplanes, planes, stride, dilation=multi_dilation[0],
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
self.inplanes = planes * block.expansion
if multi_grid:
div = len(multi_dilation)
for i in range(1,blocks):
layers.append(block(self.inplanes, planes, dilation=multi_dilation[i%div], previous_dilation=dilation,
norm_layer=norm_layer))
else:
for i in range(1, blocks):
if layer_num==4:
#layers.append(block(self.inplanes, planes, dilation=2 ** i, previous_dilation=dilation,norm_layer=norm_layer))
layers.append(block(self.inplanes, planes, dilation=dilation, previous_dilation=dilation,norm_layer=norm_layer))
else:
layers.append(block(self.inplanes, planes, dilation=dilation, previous_dilation=dilation,norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, root='./pretrain_models', **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
# from ..models.model_store import get_model_file
# model.load_state_dict(torch.load(
# get_model_file('resnet50', root=root)), strict=False)
model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], 'pretrained_model/encoder'))
return model
def resnet101(pretrained=False, root='./pretrain_models', **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
#Remove the following lines of comments
#if u want to train from a pretrained model
if pretrained:
# from ..models.model_store import get_model_file
# model.load_state_dict(torch.load(
# get_model_file('resnet101', root=root)), strict=False)
model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], 'pretrained_model/encoder'))
return model
def resnet152(pretrained=False, root='~/.encoding/models', **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
model.load_state_dict(torch.load(
'./pretrain_models/resnet152-b121ed2d.pth'), strict=False)
return model | 11,689 | 37.837209 | 162 | py |
BS-Net | BS-Net-main/models/modules.py | import torch
import torch.nn.functional as F
import torch.nn as nn
class _UpProjection(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_UpProjection, self).__init__()
self.conv1 = nn.Conv2d(num_input_features, num_output_features,
kernel_size=5, stride=1, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(num_output_features)
self.relu = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(num_output_features, num_output_features,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn1_2 = nn.BatchNorm2d(num_output_features)
self.conv2 = nn.Conv2d(num_input_features, num_output_features,
kernel_size=5, stride=1, padding=2, bias=False)
self.bn2 = nn.BatchNorm2d(num_output_features)
def forward(self, x, size):
x = F.upsample(x, size=size, mode='bilinear')
x_conv1 = self.relu(self.bn1(self.conv1(x)))
bran1 = self.bn1_2(self.conv1_2(x_conv1))
bran2 = self.bn2(self.conv2(x))
out = self.relu(bran1 + bran2)
return out
class E_resnet(nn.Module):
def __init__(self, original_model, num_features=2048):
super(E_resnet, self).__init__()
self.conv1 = original_model.conv1
self.bn1 = original_model.bn1
self.relu = original_model.relu
self.maxpool = original_model.maxpool
self.layer1 = original_model.layer1
self.layer2 = original_model.layer2
self.layer3 = original_model.layer3
self.layer4 = original_model.layer4
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x_block1 = self.layer1(x)
x_block2 = self.layer2(x_block1)
x_block3 = self.layer3(x_block2)
x_block4 = self.layer4(x_block3)
return x_block1, x_block2, x_block3, x_block4
class multi_dilated_layer(nn.Module):
def __init__(self, input_channels,dilation_rate=[6, 12, 18]):
super(multi_dilated_layer, self).__init__()
self.rates = dilation_rate
self.layer1 = nn.Sequential(
nn.Conv2d(input_channels, input_channels//4, 1),
nn.ReLU(inplace=True),
nn.Conv2d(input_channels//4, input_channels//4, 1),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Sequential(
nn.Conv2d(input_channels, input_channels//4, 3, padding=6, dilation=self.rates[0]),
nn.ReLU(inplace=True),
nn.Conv2d(input_channels//4, input_channels//4, 1),
nn.ReLU(inplace=True)
)
self.layer3 = nn.Sequential(
nn.Conv2d(input_channels, input_channels//4, 3, padding=12, dilation=self.rates[1]),
nn.ReLU(inplace=True),
nn.Conv2d(input_channels//4, input_channels//4, 1),
nn.ReLU(inplace=True)
)
self.layer4 = nn.Sequential(
nn.Conv2d(input_channels, input_channels//4, 3, padding=18, dilation=self.rates[2]),
nn.ReLU(inplace=True),
nn.Conv2d(input_channels//4, input_channels//4, 1),
nn.ReLU(inplace=True)
)
self.concat_process = nn.Sequential(
nn.Conv2d(input_channels, 1024, 1),
nn.ReLU(inplace=True),
)
def forward(self, x):
x1 = self.layer1(x)
x2 = self.layer2(x)
x3 = self.layer3(x)
x4 = self.layer4(x)
x4_cat = torch.cat((x1, x2, x3, x4), 1)
return x4_cat
class DCE(nn.Module): #DepthCorrelation Encoder
def __init__(self, features, out_features, sizes=(1, 2, 3, 6)):
super(DCE,self).__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, size) for size in sizes])
self.ups = nn.ModuleList([_UpProjection(out_features//2,out_features//2) for i in range(4)])
self.bottleneck = nn.Conv2d(features//4*len(sizes), out_features//2, kernel_size=3,padding=1,bias=False)
self.relu = nn.ReLU(inplace=True)
self.multi_layers = multi_dilated_layer(features)
self.fusion = nn.Sequential(
nn.Conv2d(in_channels=features//4*5, out_channels=features, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(features),
nn.ReLU(inplace=True)
)
def _make_stage(self, features, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, features//4, kernel_size=1, bias=False)
return nn.Sequential(prior, conv)
def forward(self, feats):
# pdb.set_trace()
h, w = feats.size(2), feats.size(3)
x4_cat = self.multi_layers(feats) # 1024
# pdb.set_trace()
priors = [up(stage(feats), [h, w]) for (stage,up) in zip(self.stages,self.ups)]
bottle = self.bottleneck(torch.cat(priors, 1))
psp = self.relu(bottle) # 1024
fusion_feat = torch.cat((psp,x4_cat), 1)
return self.fusion(fusion_feat)
class Decoder(nn.Module):
def __init__(self, num_features=2048):
super(Decoder, self).__init__()
self.conv = nn.Conv2d(num_features, num_features //2, kernel_size=1, stride=1, bias=False)
num_features = num_features // 2
self.bn = nn.BatchNorm2d(num_features)
self.relu = nn.ReLU(inplace=True)
self.up1 = _UpProjection(
num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
self.up2 = _UpProjection(
num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
self.up3 = _UpProjection(
num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
self.up4 = _UpProjection(
num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
def forward(self, x_block1, x_block2, x_block3, x_block4, x_dce):
x_d1 = self.relu(self.bn(self.conv(x_dce)))
x_d1 = self.up1(x_d1, [x_block3.size(2), x_block3.size(3)])
x_d2 = self.up2(x_d1, [x_block2.size(2), x_block2.size(3)])
x_d3 = self.up3(x_d2, [x_block1.size(2), x_block1.size(3)])
x_d4 = self.up4(x_d3, [x_block1.size(2) * 2, x_block1.size(3) * 2])
return x_d4
class SRM(nn.Module):#Stripe Refinement
def __init__(self,num_feature):
super(SRM,self).__init__()
self.ssp = SSP(64+num_feature//32)
self.R = RP(num_feature//32)
def forward(self,x_decoder,x_bubf):
out = self.R(self.ssp(torch.cat((x_decoder, x_bubf), 1)))
return out
class RP(nn.Module): #Residual prediction
def __init__(self, block_channel=184):
super(RP, self).__init__()
num_features = 64 + block_channel
self.conv0 = nn.Conv2d(num_features, num_features,kernel_size=5, stride=1, padding=2, bias=False)
self.bn0 = nn.BatchNorm2d(num_features)
self.conv1 = nn.Conv2d(num_features, num_features,kernel_size=5, stride=1, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(num_features)
self.conv2 = nn.Conv2d(
num_features, 1, kernel_size=5, stride=1, padding=2, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
# import pdb
# pdb.set_trace()
x0 = self.conv0(x)
x0 = self.bn0(x0)
x0 = self.relu(x0)
x1 = self.conv1(x0)
x1 = self.bn1(x1)
x1 = self.relu(x1)
x1 = x + x1
x2 = self.conv2(x1)
return x2
class SSP(nn.Module):#Strip Spatial Perception
def __init__(self,inchannels,midchannels=21, k=11, w=3):
super(SSP,self).__init__()
self.conv1 = nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=(k, w), stride=1,
padding=(5, 1))
self.conv2 = nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=(w, k), stride=1,
padding=(1, 5))
self.conv5 = nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=3, stride=1,padding=1,bias=False)
self.bn = nn.BatchNorm2d(num_features=inchannels)
self.relu = nn.ReLU(inplace=True)
def forward(self,x):
b1 = self.conv1(x)
b2 = self.conv2(x)
x = b1 + b2
x = self.relu(self.bn(self.conv5(x)))
return x
class lRB(nn.Module): #large Eefinement Block
def __init__(self, in_channels, out_channels):
super(lRB, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
self.bn = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x):
x = self.conv1(x)
res = self.conv2(x)
res = self.bn(res)
res = self.relu(res)
res = self.conv3(res)
return self.relu(x + res)
class BUBF(nn.Module): #Bottom-Up Boundary Fusion
def __init__(self, channels, out_channel):
super(BUBF, self).__init__()
self.lrb_1 = lRB(channels//8, out_channel)
self.lrb_2 = lRB(channels//4, out_channel)
self.lrb_3 = lRB(channels//2, out_channel)
self.lrb_4 = lRB(channels, out_channel)
self.lrb_5 = lRB(out_channel, out_channel)
self.lrb_6 = lRB(out_channel, out_channel)
self.lrb_7 = lRB(out_channel, out_channel)
self.up1 = _UpProjection(out_channel, out_channel)
self.up2 = _UpProjection(out_channel, out_channel)
self.up3 = _UpProjection(out_channel, out_channel)
self.up4 = _UpProjection(out_channel, out_channel)
def forward(self, x_block1, x_block2, x_block3, x_block4):
x1 = self.lrb_1(x_block1)
x1 = self.up4(x1, [x_block1.size(2) * 2, x_block1.size(3) * 2])
x2 = self.lrb_2(x_block2)
x2 = self.up1(x2, [x_block1.size(2) * 2, x_block1.size(3) * 2])
x2 = x1 + x2
x2 = self.lrb_5(x2)
x3 = self.lrb_3(x_block3)
x3 = self.up2(x3, [x_block1.size(2) * 2, x_block1.size(3) * 2])
x3 = x2 + x3
x3 = self.lrb_6(x3)
x4 = self.lrb_4(x_block4)
x4 = self.up3(x4, [x_block1.size(2) * 2, x_block1.size(3) * 2])
x4 = x3 + x4
x4 = self.lrb_7(x4)
return x4
| 10,669 | 38.227941 | 125 | py |
BS-Net | BS-Net-main/models/net.py | import torch.nn as nn
import models.modules as modules
class model(nn.Module):
def __init__(self, Encoder, num_features, block_channel):
super(model, self).__init__()
self.E = Encoder #(2048,8,10)
self.DCE = modules.DCE(num_features,num_features//2, sizes=(1, 2, 3, 6))
self.BUBF = modules.BUBF(num_features,64)
self.D = modules.Decoder(num_features)
self.SRM = modules.SRM(num_features)
def forward(self, x):
x_block1, x_block2, x_block3, x_block4 = self.E(x)
x_dce = self.DCE(x_block4)
x_bubf = self.BUBF(x_block1, x_block2, x_block3, x_block4)
x_decoder = self.D(x_block1, x_block2, x_block3, x_block4,x_dce)
out = self.SRM(x_decoder,x_bubf)
return out
| 763 | 35.380952 | 80 | py |
correlate | correlate-master/setup.py | """
Install tigramite
"""
from __future__ import print_function
import pathlib
import os
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
# Handle building against numpy headers before installing numpy
class UseNumpyHeadersBuildExt(build_ext):
"""
Subclassed build_ext command.
Allows for numpy to be imported after it is automatically installed.
This lets us use numpy.get_include() while listing numpy as a needed
dependency.
"""
def run(self):
self.distribution.fetch_build_eggs(["numpy"])
# Import numpy here, only when headers are needed
import numpy
# Add numpy headers to include_dirs
self.include_dirs.append(numpy.get_include())
# Call original build_ext command
build_ext.run(self)
# Handle cythonizing code only in development mode
def define_extension(extension_name, source_files=None):
"""
Will define an extension from the *.c files unless in "setup.py develop"
is called. If this is in develop mode, then it tries to import cython
and regenerate the *.c files from the *.pyx files
:return: single-element list of needed extension
"""
# Default source file
if source_files is None:
source_files = [str((pathlib.Path(__file__).parent / extension_name.replace(".", "/")).with_suffix(".c"))]
# If we are, try to import and use cythonize
try:
from Cython.Build import cythonize
# Return the cythonized extension
pyx_path = str((pathlib.Path(__file__).parent / extension_name.replace(".", "/")).with_suffix(".pyx"))
return cythonize([pyx_path], language_level = "3")
except ImportError:
print(
"Cython cannot be found. Skipping generation of C code from"
+ " cython and using pre-compiled C code instead"
)
return [Extension(extension_name, source_files,
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],)]
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
# Define the minimal classes needed to install and run tigramite
INSTALL_REQUIRES = ["numpy", "scipy", "six"]
# Define all the possible extras needed
EXTRAS_REQUIRE = {
"all": [
"scikit-learn>=0.21", # Gaussian Process (GP) Regression
"matplotlib>=3.4.0", # plotting
"networkx>=2.4", # plotting
"torch>=1.7", # GPDC torch version
"gpytorch>=1.4", # GPDC gpytorch version
"dcor>=0.5.3", # GPDC distance correlation version
]
}
# Define the packages needed for testing
TESTS_REQUIRE = ["nose", "pytest", "networkx>=2.4", "scikit-learn>=0.21",
"torch>=1.7", "gpytorch>=1.4", "dcor>=0.5.3"]
EXTRAS_REQUIRE["test"] = TESTS_REQUIRE
# Define the extras needed for development
EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["all"] + TESTS_REQUIRE + ["cython"]
# Use a custom build to handle numpy.include_dirs() when building
CMDCLASS = {"build_ext": UseNumpyHeadersBuildExt}
# Define the external modules to build
EXT_MODULES = []
EXT_MODULES += define_extension("tigramite.tigramite_cython_code")
# Run the setup
setup(
name="tigramite",
version="4.2.2.1",
packages=["tigramite", "tigramite.independence_tests"],
license="GNU General Public License v3.0",
description="Tigramite causal discovery for time series",
author="Jakob Runge",
author_email="jakob@jakob-runge.com",
url="https://github.com/jakobrunge/tigramite/",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="causal inference, causal discovery, prediction, time series",
cmdclass=CMDCLASS,
ext_modules=EXT_MODULES,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
test_suite="tests",
tests_require=TESTS_REQUIRE,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Mathematics",
"License "
":: OSI Approved "
":: GNU General Public License v3 or later (GPLv3+)",
"Programming Language :: Python",
],
)
| 4,316 | 34.677686 | 114 | py |
correlate | correlate-master/prediction/fully_connected.py | import math
import numpy as np
import torch
import torch.utils.data as data_utils
from sklearn.preprocessing import MinMaxScaler
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from config import target_label, fully_connected_nn_prediction_on
writer = SummaryWriter()
epochs = 4115
lr = 0.0001
torch.manual_seed(0)
weight_decay = 0.101
# Define model
class NeuralNetwork(nn.Module):
def __init__(self, num_features):
super(NeuralNetwork, self).__init__()
# self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(num_features, 16),
nn.LeakyReLU(),
nn.Linear(16, 8),
# nn.LeakyReLU(),
# nn.Linear(16, 8),
# nn.LeakyReLU(),
# nn.Linear(8, 3),
nn.LeakyReLU(),
nn.Linear(8, 1),
)
def forward(self, x):
# x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
def fully_connected_nn_prediction(df):
if fully_connected_nn_prediction_on:
# dataframes to tensors
target_tensor = torch.tensor(df[target_label].values.astype(np.float32))
target_tensor = torch.unsqueeze(target_tensor, 1) # due to one dim target tensor
# print('train_target', train_target)
input_df = df.drop([target_label], axis=1)
num_features = len(input_df.columns)
input_tensor = torch.tensor(input_df.values.astype(np.float32))
# input normalization
scaler = MinMaxScaler()
scaler.fit(input_tensor)
input_tensor = torch.tensor(scaler.transform(input_tensor).astype(np.float32))
tensor_dataset = data_utils.TensorDataset(input_tensor, target_tensor)
# train test split
print('dataset_size:', len(tensor_dataset))
train_size = int(0.9 * len(tensor_dataset))
test_size = len(tensor_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(tensor_dataset, [train_size, test_size])
# load data
batch_size = math.floor(train_size)
train_dataloader = data_utils.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_dataloader = data_utils.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)
for X, y in test_dataset:
print("Shape of X [BatchSize, #params]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break
# Get cpu or gpu device for training.
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
model = NeuralNetwork(num_features).to(device)
print(model)
loss_fn = nn.MSELoss()
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
for epoch in range(epochs):
print(f"Epoch {epoch + 1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer, epoch + 1, device)
test(test_dataloader, model, loss_fn, epoch + 1, device)
writer.flush()
writer.close()
print("Done Training!")
# save model
torch.save(model.state_dict(), "model.pth")
print("Saved PyTorch Model State to model.pth")
# load model
model = NeuralNetwork(num_features)
model.load_state_dict(torch.load("model.pth"))
model.eval()
for day in range(len(test_dataset)):
x = test_dataset[day][0]
y = test_dataset[day][1]
with torch.no_grad():
pred = model(x)
predicted, actual = pred[0], y
# print(f'Predicted: {predicted}; Actual: {actual[0]}')
def train(dataloader, model, loss_fn, optimizer, epoch, device):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"train loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
writer.add_scalar("Loss/train", loss, epoch)
def test(dataloader, model, loss_fn, epoch, device):
num_batches = len(dataloader)
model.eval()
test_loss = 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
test_loss /= num_batches
print(f"Avg test loss: {test_loss:>8f} \n")
writer.add_scalar("Loss/test", test_loss, epoch)
| 4,817 | 32.227586 | 108 | py |
DeepOnto | DeepOnto-main/src/deeponto/subs/bertsubs/pipeline_inter.py | # Copyright 2023 Jiaoyan Chen. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# @paper(
# "Contextual Semantic Embeddings for Ontology Subsumption Prediction (World Wide Web Journal)",
# )
import os
import sys
import random
import datetime
import warnings
import math
from yacs.config import CfgNode
from typing import List
import numpy as np
import torch
from transformers import TrainingArguments
from deeponto.onto import Ontology
from .bert_classifier import BERTSubsumptionClassifierTrainer
from .text_semantics import SubsumptionSampler
from .pipeline_intra import BERTSubsIntraPipeline
DEFAULT_CONFIG_FILE_INTER = os.path.join(os.path.dirname(__file__), "default_config_inter.yaml")
class BERTSubsInterPipeline:
r"""Class for the model training and prediction/validation pipeline of inter-ontology subsumption of BERTSubs.
Attributes:
src_onto (Ontology): Source ontology (the sub-class side).
tgt_onto (Ontology): Target ontology (the super-class side).
config (CfgNode): Configuration.
src_sampler (SubsumptionSampler): Object for sampling-related functions of the source ontology.
tgt_sampler (SubsumptionSampler): Object for sampling-related functions of the target ontology.
"""
def __init__(self, src_onto: Ontology, tgt_onto: Ontology, config: CfgNode):
self.src_onto = src_onto
self.tgt_onto = tgt_onto
self.config = config
self.config.label_property = self.config.src_label_property
self.src_sampler = SubsumptionSampler(onto=self.src_onto, config=self.config)
self.config.label_property = self.config.tgt_label_property
self.tgt_sampler = SubsumptionSampler(onto=self.tgt_onto, config=self.config)
start_time = datetime.datetime.now()
read_subsumptions = lambda file_name: [line.strip().split(',') for line in open(file_name).readlines()]
test_subsumptions = None if config.test_subsumption_file is None or config.test_subsumption_file == 'None' \
else read_subsumptions(config.test_subsumption_file)
valid_subsumptions = None if config.valid_subsumption_file is None or config.valid_subsumption_file == 'None' \
else read_subsumptions(config.valid_subsumption_file)
if config.use_ontology_subsumptions_training:
src_subsumptions = BERTSubsIntraPipeline.extract_subsumptions_from_ontology(onto=self.src_onto,
subsumption_type=config.subsumption_type)
tgt_subsumptions = BERTSubsIntraPipeline.extract_subsumptions_from_ontology(onto=self.tgt_onto,
subsumption_type=config.subsumption_type)
src_subsumptions0, tgt_subsumptions0 = [], []
if config.subsumption_type == 'named_class':
for subs in src_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
src_subsumptions0.append([str(c1.getIRI()), str(c2.getIRI())])
for subs in tgt_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
tgt_subsumptions0.append([str(c1.getIRI()), str(c2.getIRI())])
elif config.subsumption_type == 'restriction':
for subs in src_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
src_subsumptions0.append([str(c1.getIRI()), str(c2)])
for subs in tgt_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
tgt_subsumptions0.append([str(c1.getIRI()), str(c2)])
restrictions = BERTSubsIntraPipeline.extract_restrictions_from_ontology(onto=self.tgt_onto)
print('restrictions in the target ontology: %d' % len(restrictions))
else:
warnings.warn('Unknown subsumption type %s' % config.subsumption_type)
sys.exit(0)
print('Positive train subsumptions from the source/target ontology: %d/%d' % (
len(src_subsumptions0), len(tgt_subsumptions0)))
src_tr = self.src_sampler.generate_samples(subsumptions=src_subsumptions0)
tgt_tr = self.tgt_sampler.generate_samples(subsumptions=tgt_subsumptions0)
else:
src_tr, tgt_tr = [], []
if config.train_subsumption_file is None or config.train_subsumption_file == 'None':
tr = src_tr + tgt_tr
else:
train_subsumptions = read_subsumptions(config.train_subsumption_file)
tr = self.inter_ontology_sampling(subsumptions=train_subsumptions, pos_dup=config.fine_tune.train_pos_dup,
neg_dup=config.fine_tune.train_neg_dup)
tr = tr + src_tr + tgt_tr
if len(tr) == 0:
warnings.warn('No training samples extracted')
if config.fine_tune.do_fine_tune:
sys.exit(0)
end_time = datetime.datetime.now()
print('data pre-processing costs %.1f minutes' % ((end_time - start_time).seconds / 60))
start_time = datetime.datetime.now()
torch.cuda.empty_cache()
bert_trainer = BERTSubsumptionClassifierTrainer(config.fine_tune.pretrained, train_data=tr,
val_data=tr[0:int(len(tr) / 5)],
max_length=config.prompt.max_length,
early_stop=config.fine_tune.early_stop)
epoch_steps = len(bert_trainer.tra) // config.fine_tune.batch_size # total steps of an epoch
logging_steps = int(epoch_steps * 0.02) if int(epoch_steps * 0.02) > 0 else 5
eval_steps = 5 * logging_steps
training_args = TrainingArguments(
output_dir=config.fine_tune.output_dir,
num_train_epochs=config.fine_tune.num_epochs,
per_device_train_batch_size=config.fine_tune.batch_size,
per_device_eval_batch_size=config.fine_tune.batch_size,
warmup_ratio=config.fine_tune.warm_up_ratio,
weight_decay=0.01,
logging_steps=logging_steps,
logging_dir=f"{config.fine_tune.output_dir}/tb",
eval_steps=eval_steps,
evaluation_strategy="steps",
do_train=True,
do_eval=True,
save_steps=eval_steps,
load_best_model_at_end=True,
save_total_limit=1,
metric_for_best_model="accuracy",
greater_is_better=True
)
if config.fine_tune.do_fine_tune and (config.prompt.prompt_type == 'traversal' or (
config.prompt.prompt_type == 'path' and config.prompt.use_sub_special_token)):
bert_trainer.add_special_tokens(['<SUB>'])
bert_trainer.train(train_args=training_args, do_fine_tune=config.fine_tune.do_fine_tune)
if config.fine_tune.do_fine_tune:
bert_trainer.trainer.save_model(
output_dir=os.path.join(config.fine_tune.output_dir, 'fine-tuned-checkpoint'))
print('fine-tuning done, fine-tuned model saved')
else:
print('pretrained or fine-tuned model loaded.')
end_time = datetime.datetime.now()
print('Fine-tuning costs %.1f minutes' % ((end_time - start_time).seconds / 60))
bert_trainer.model.eval()
self.device = torch.device(f"cuda") if torch.cuda.is_available() else torch.device("cpu")
bert_trainer.model.to(self.device)
self.tokenize = lambda x: bert_trainer.tokenizer(x, max_length=config.prompt.max_length, truncation=True,
padding=True, return_tensors="pt")
softmax = torch.nn.Softmax(dim=1)
self.classifier = lambda x: softmax(bert_trainer.model(**x).logits)[:, 1]
if valid_subsumptions is not None:
self.evaluate(target_subsumptions=valid_subsumptions, test_type='valid')
if test_subsumptions is not None:
if config.test_type == 'evaluation':
self.evaluate(target_subsumptions=test_subsumptions, test_type='test')
elif config.test_type == 'prediction':
self.predict(target_subsumptions=test_subsumptions)
else:
warnings.warn("Unknown test_type: %s" % config.test_type)
print('\n ------------------------- done! ---------------------------\n\n\n')
def inter_ontology_sampling(self, subsumptions: List[List], pos_dup: int = 1, neg_dup: int = 1):
r"""Transform inter-ontology subsumptions to two-string samples
Args:
subsumptions (List[List]): A list of subsumptions; each subsumption is composed of two IRIs.
pos_dup (int): Positive sample duplication.
neg_dup (int): Negative sample duplication.
"""
pos_samples = list()
for subs in subsumptions:
sub_strs = self.src_sampler.subclass_to_strings(subcls=subs[0])
sup_strs = self.tgt_sampler.supclass_to_strings(supcls=subs[1],
subsumption_type=self.config.subsumption_type)
for sub_str in sub_strs:
for sup_str in sup_strs:
pos_samples.append([sub_str, sup_str, 1])
pos_samples = pos_dup * pos_samples
neg_subsumptions = list()
for subs in subsumptions:
for _ in range(neg_dup):
neg_c = self.tgt_sampler.get_negative_sample(subclass_iri=subs[1],
subsumption_type=self.config.subsumption_type)
neg_subsumptions.append([subs[0], neg_c])
neg_samples = list()
for subs in neg_subsumptions:
sub_strs = self.src_sampler.subclass_to_strings(subcls=subs[0])
sup_strs = self.tgt_sampler.supclass_to_strings(supcls=subs[1],
subsumption_type=self.config.subsumption_type)
for sub_str in sub_strs:
for sup_str in sup_strs:
neg_samples.append([sub_str, sup_str, 0])
if len(neg_samples) < len(pos_samples):
neg_samples = neg_samples + [random.choice(neg_samples) for _ in range(len(pos_samples) - len(neg_samples))]
if len(neg_samples) > len(pos_samples):
pos_samples = pos_samples + [random.choice(pos_samples) for _ in range(len(neg_samples) - len(pos_samples))]
print('training mappings, pos_samples: %d, neg_samples: %d' % (len(pos_samples), len(neg_samples)))
all_samples = [s for s in pos_samples + neg_samples if s[0] != '' and s[1] != '']
return all_samples
def inter_ontology_subsumption_to_sample(self, subsumption: List):
r"""Transform an inter ontology subsumption into a sample (a two-string list).
Args:
subsumption (List): a subsumption composed of two IRIs.
"""
subcls, supcls = subsumption[0], subsumption[1]
substrs = self.src_sampler.subclass_to_strings(subcls=subcls)
supstrs = self.tgt_sampler.supclass_to_strings(supcls=supcls, subsumption_type='named_class')
samples = list()
for substr in substrs:
for supstr in supstrs:
samples.append([substr, supstr])
return samples
def score(self, samples):
r"""Score the samples with the classifier.
Args:
samples (List[List]): Each item is a list with two strings (input).
"""
sample_size = len(samples)
scores = np.zeros(sample_size)
batch_num = math.ceil(sample_size / self.config.evaluation.batch_size)
for i in range(batch_num):
j = (i + 1) * self.config.evaluation.batch_size \
if (i + 1) * self.config.evaluation.batch_size <= sample_size else sample_size
inputs = self.tokenize(samples[i * self.config.evaluation.batch_size:j])
inputs.to(self.device)
with torch.no_grad():
batch_scores = self.classifier(inputs)
scores[i * self.config.evaluation.batch_size:j] = batch_scores.cpu().numpy()
return scores
def evaluate(self, target_subsumptions: List[List], test_type: str = 'test'):
r"""Test and calculate the metrics according to a given list of subsumptions.
Args:
target_subsumptions (List[List]): A list of subsumptions, each of which of is a two-component list `(subclass_iri, super_class_iri_or_str)`.
test_type (str): `"test"` or `"valid"`.
"""
MRR_sum, hits1_sum, hits5_sum, hits10_sum = 0, 0, 0, 0
MRR, Hits1, Hits5, Hits10 = 0, 0, 0, 0
size_sum, size_n = 0, 0
for k0, test in enumerate(target_subsumptions):
subcls, gt = test[0], test[1]
candidates = test[1:]
candidate_subsumptions = [[subcls, c] for c in candidates]
candidate_scores = np.zeros(len(candidate_subsumptions))
for k1, candidate_subsumption in enumerate(candidate_subsumptions):
samples = self.inter_ontology_subsumption_to_sample(subsumption=candidate_subsumption)
size_sum += len(samples)
size_n += 1
scores = self.score(samples=samples)
candidate_scores[k1] = np.average(scores)
sorted_indexes = np.argsort(candidate_scores)[::-1]
sorted_classes = [candidates[i] for i in sorted_indexes]
rank = sorted_classes.index(gt) + 1
MRR_sum += 1.0 / rank
hits1_sum += 1 if gt in sorted_classes[:1] else 0
hits5_sum += 1 if gt in sorted_classes[:5] else 0
hits10_sum += 1 if gt in sorted_classes[:10] else 0
num = k0 + 1
MRR, Hits1, Hits5, Hits10 = MRR_sum / num, hits1_sum / num, hits5_sum / num, hits10_sum / num
if num % 500 == 0:
print('\n%d tested, MRR: %.3f, Hits@1: %.3f, Hits@5: %.3f, Hits@10: %.3f\n' % (
num, MRR, Hits1, Hits5, Hits10))
print('\n[%s], MRR: %.3f, Hits@1: %.3f, Hits@5: %.3f, Hits@10: %.3f\n' % (test_type, MRR, Hits1, Hits5, Hits10))
print('%.2f samples per testing subsumption' % (size_sum / size_n))
def predict(self, target_subsumptions: List[List]):
r"""Predict a score for each given subsumption.
The scores will be saved in `test_subsumption_scores.csv`.
Args:
target_subsumptions (List[List]): Each item is a list with the first element as the sub-class,
and the remaining elements as n candidate super-classes.
"""
out_lines = []
for test in target_subsumptions:
subcls, candidates = test[0], test[1:]
candidate_subsumptions = [[subcls, c] for c in candidates]
candidate_scores = []
for candidate_subsumption in candidate_subsumptions:
samples = self.inter_ontology_subsumption_to_sample(subsumption=candidate_subsumption)
scores = self.score(samples=samples)
candidate_scores.append(np.average(scores))
out_lines.append(','.join([str(i) for i in candidate_scores]))
out_file = 'test_subsumption_scores.csv'
with open(out_file, 'w') as f:
for line in out_lines:
f.write('%s\n' % line)
print('Predicted subsumption scores are saved to %s' % out_file)
| 16,303 | 50.432177 | 152 | py |
DeepOnto | DeepOnto-main/src/deeponto/subs/bertsubs/pipeline_intra.py | # Copyright 2023 Jiaoyan Chen. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# @paper(
# "Contextual Semantic Embeddings for Ontology Subsumption Prediction (World Wide Web Journal)",
# )
import os
import sys
import warnings
import random
import torch
import math
import datetime
import numpy as np
from typing import List
from transformers import TrainingArguments
from yacs.config import CfgNode
from deeponto.onto import Ontology
from .bert_classifier import BERTSubsumptionClassifierTrainer
from .text_semantics import SubsumptionSampler
DEFAULT_CONFIG_FILE_INTRA = os.path.join(os.path.dirname(__file__), "default_config_intra.yaml")
class BERTSubsIntraPipeline:
r"""Class for the intra-ontology subsumption prediction setting of BERTSubs.
Attributes:
onto (Ontology): The target ontology.
config (CfgNode): The configuration for BERTSubs.
sampler (SubsumptionSample): The subsumption sampler for BERTSubs.
"""
def __init__(self, onto: Ontology, config: CfgNode):
self.onto = onto
self.config = config
self.sampler = SubsumptionSampler(onto=onto, config=config)
start_time = datetime.datetime.now()
n = 0
for k in self.sampler.named_classes:
n += len(self.sampler.iri_label[k])
print(
"%d named classes, %.1f labels per class"
% (len(self.sampler.named_classes), n / len(self.sampler.named_classes))
)
read_subsumptions = lambda file_name: [line.strip().split(",") for line in open(file_name).readlines()]
test_subsumptions = (
None
if config.test_subsumption_file is None or config.test_subsumption_file == "None"
else read_subsumptions(config.test_subsumption_file)
)
# The train/valid subsumptions are not given. They will be extracted from the given ontology:
if config.train_subsumption_file is None or config.train_subsumption_file == "None":
subsumptions0 = self.extract_subsumptions_from_ontology(
onto=onto, subsumption_type=config.subsumption_type
)
random.shuffle(subsumptions0)
valid_size = int(len(subsumptions0) * config.valid.valid_ratio)
train_subsumptions0, valid_subsumptions0 = subsumptions0[valid_size:], subsumptions0[0:valid_size]
train_subsumptions, valid_subsumptions = [], []
if config.subsumption_type == "named_class":
for subs in train_subsumptions0:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
train_subsumptions.append([str(c1.getIRI()), str(c2.getIRI())])
size_sum = 0
for subs in valid_subsumptions0:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
neg_candidates = BERTSubsIntraPipeline.get_test_neg_candidates_named_class(
subclass=c1, gt=c2, max_neg_size=config.valid.max_neg_size, onto=onto
)
size = len(neg_candidates)
size_sum += size
if size > 0:
item = [str(c1.getIRI()), str(c2.getIRI())] + [str(c.getIRI()) for c in neg_candidates]
valid_subsumptions.append(item)
print("\t average neg candidate size in validation: %.2f" % (size_sum / len(valid_subsumptions)))
elif config.subsumption_type == "restriction":
for subs in train_subsumptions0:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
train_subsumptions.append([str(c1.getIRI()), str(c2)])
restrictions = BERTSubsIntraPipeline.extract_restrictions_from_ontology(onto=onto)
print("restrictions: %d" % len(restrictions))
size_sum = 0
for subs in valid_subsumptions0:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
c2_neg = BERTSubsIntraPipeline.get_test_neg_candidates_restriction(
subcls=c1, max_neg_size=config.valid.max_neg_size, restrictions=restrictions, onto=onto
)
size_sum += len(c2_neg)
item = [str(c1.getIRI()), str(c2)] + [str(r) for r in c2_neg]
valid_subsumptions.append(item)
print("valid candidate negative avg. size: %.1f" % (size_sum / len(valid_subsumptions)))
else:
warnings.warn("Unknown subsumption type %s" % config.subsumption_type)
sys.exit(0)
# The train/valid subsumptions are given:
else:
train_subsumptions = read_subsumptions(config.train_subsumption_file)
valid_subsumptions = read_subsumptions(config.valid_subsumption_file)
print("Positive train/valid subsumptions: %d/%d" % (len(train_subsumptions), len(valid_subsumptions)))
tr = self.sampler.generate_samples(subsumptions=train_subsumptions)
va = self.sampler.generate_samples(subsumptions=valid_subsumptions, duplicate=False)
end_time = datetime.datetime.now()
print("data pre-processing costs %.1f minutes" % ((end_time - start_time).seconds / 60))
start_time = datetime.datetime.now()
torch.cuda.empty_cache()
bert_trainer = BERTSubsumptionClassifierTrainer(
config.fine_tune.pretrained,
train_data=tr,
val_data=va,
max_length=config.prompt.max_length,
early_stop=config.fine_tune.early_stop,
)
epoch_steps = len(bert_trainer.tra) // config.fine_tune.batch_size # total steps of an epoch
logging_steps = int(epoch_steps * 0.02) if int(epoch_steps * 0.02) > 0 else 5
eval_steps = 5 * logging_steps
training_args = TrainingArguments(
output_dir=config.fine_tune.output_dir,
num_train_epochs=config.fine_tune.num_epochs,
per_device_train_batch_size=config.fine_tune.batch_size,
per_device_eval_batch_size=config.fine_tune.batch_size,
warmup_ratio=config.fine_tune.warm_up_ratio,
weight_decay=0.01,
logging_steps=logging_steps,
logging_dir=f"{config.fine_tune.output_dir}/tb",
eval_steps=eval_steps,
evaluation_strategy="steps",
do_train=True,
do_eval=True,
save_steps=eval_steps,
load_best_model_at_end=True,
save_total_limit=1,
metric_for_best_model="accuracy",
greater_is_better=True,
)
if config.fine_tune.do_fine_tune and (
config.prompt.prompt_type == "traversal"
or (config.prompt.prompt_type == "path" and config.prompt.use_sub_special_token)
):
bert_trainer.add_special_tokens(["<SUB>"])
bert_trainer.train(train_args=training_args, do_fine_tune=config.fine_tune.do_fine_tune)
if config.fine_tune.do_fine_tune:
bert_trainer.trainer.save_model(
output_dir=os.path.join(config.fine_tune.output_dir, "fine-tuned-checkpoint")
)
print("fine-tuning done, fine-tuned model saved")
else:
print("pretrained or fine-tuned model loaded.")
end_time = datetime.datetime.now()
print("Fine-tuning costs %.1f minutes" % ((end_time - start_time).seconds / 60))
bert_trainer.model.eval()
self.device = torch.device(f"cuda") if torch.cuda.is_available() else torch.device("cpu")
bert_trainer.model.to(self.device)
self.tokenize = lambda x: bert_trainer.tokenizer(
x, max_length=config.prompt.max_length, truncation=True, padding=True, return_tensors="pt"
)
softmax = torch.nn.Softmax(dim=1)
self.classifier = lambda x: softmax(bert_trainer.model(**x).logits)[:, 1]
self.evaluate(target_subsumptions=valid_subsumptions, test_type="valid")
if test_subsumptions is not None:
if config.test_type == "evaluation":
self.evaluate(target_subsumptions=test_subsumptions, test_type="test")
elif config.test_type == "prediction":
self.predict(target_subsumptions=test_subsumptions)
else:
warnings.warn("Unknown test_type: %s" % config.test_type)
print("\n ------------------------- done! ---------------------------\n\n\n")
def score(self, samples: List[List]):
r"""The scoring function based on the fine-tuned BERT classifier.
Args:
samples (List[Tuple]): A list of input sentence pairs to be scored.
"""
sample_size = len(samples)
scores = np.zeros(sample_size)
batch_num = math.ceil(sample_size / self.config.evaluation.batch_size)
for i in range(batch_num):
j = (
(i + 1) * self.config.evaluation.batch_size
if (i + 1) * self.config.evaluation.batch_size <= sample_size
else sample_size
)
inputs = self.tokenize(samples[i * self.config.evaluation.batch_size : j])
inputs.to(self.device)
with torch.no_grad():
batch_scores = self.classifier(inputs)
scores[i * self.config.evaluation.batch_size : j] = batch_scores.cpu().numpy()
return scores
def evaluate(self, target_subsumptions: List[List], test_type: str = "test"):
r"""Test and calculate the metrics for a given list of subsumption pairs.
Args:
target_subsumptions (List[Tuple]): A list of subsumption pairs.
test_type (str): `test` for testing or `valid` for validation.
"""
MRR_sum, hits1_sum, hits5_sum, hits10_sum = 0, 0, 0, 0
MRR, Hits1, Hits5, Hits10 = 0, 0, 0, 0
size_sum, size_n = 0, 0
for k0, test in enumerate(target_subsumptions):
subcls, gt = test[0], test[1]
candidates = test[1:]
candidate_subsumptions = [[subcls, c] for c in candidates]
candidate_scores = np.zeros(len(candidate_subsumptions))
for k1, candidate_subsumption in enumerate(candidate_subsumptions):
samples = self.sampler.subsumptions_to_samples(subsumptions=[candidate_subsumption], sample_label=None)
size_sum += len(samples)
size_n += 1
scores = self.score(samples=samples)
candidate_scores[k1] = np.average(scores)
sorted_indexes = np.argsort(candidate_scores)[::-1]
sorted_classes = [candidates[i] for i in sorted_indexes]
rank = sorted_classes.index(gt) + 1
MRR_sum += 1.0 / rank
hits1_sum += 1 if gt in sorted_classes[:1] else 0
hits5_sum += 1 if gt in sorted_classes[:5] else 0
hits10_sum += 1 if gt in sorted_classes[:10] else 0
num = k0 + 1
MRR, Hits1, Hits5, Hits10 = MRR_sum / num, hits1_sum / num, hits5_sum / num, hits10_sum / num
if num % 500 == 0:
print(
"\n%d tested, MRR: %.3f, Hits@1: %.3f, Hits@5: %.3f, Hits@10: %.3f\n"
% (num, MRR, Hits1, Hits5, Hits10)
)
print(
"\n[%s], MRR: %.3f, Hits@1: %.3f, Hits@5: %.3f, Hits@10: %.3f\n" % (test_type, MRR, Hits1, Hits5, Hits10)
)
print("%.2f samples per testing subsumption" % (size_sum / size_n))
def predict(self, target_subsumptions: List[List]):
r"""Predict a score for each given subsumption in the list.
The scores will be saved in `test_subsumption_scores.csv`.
Args:
target_subsumptions (List[List]): Each item is a list where the first element is a fixed ontology class $C$,
and the remaining elements are potential (candidate) super-classes of $C$.
"""
out_lines = []
for test in target_subsumptions:
subcls, candidates = test[0], test[1:]
candidate_subsumptions = [[subcls, c] for c in candidates]
candidate_scores = []
for candidate_subsumption in candidate_subsumptions:
samples = self.sampler.subsumptions_to_samples(subsumptions=[candidate_subsumption], sample_label=None)
scores = self.score(samples=samples)
candidate_scores.append(np.average(scores))
out_lines.append(",".join([str(i) for i in candidate_scores]))
out_file = "test_subsumption_scores.csv"
with open(out_file, "w") as f:
for line in out_lines:
f.write("%s\n" % line)
print("Predicted subsumption scores are saved to %s" % out_file)
@staticmethod
def extract_subsumptions_from_ontology(onto: Ontology, subsumption_type: str):
r"""Extract target subsumptions from a given ontology.
Args:
onto (Ontology): The target ontology.
subsumption_type (str): the type of subsumptions, options are `"named_class"` or `"restriction"`.
"""
all_subsumptions = onto.get_subsumption_axioms(entity_type="Classes")
subsumptions = []
if subsumption_type == "restriction":
for subs in all_subsumptions:
if (
not onto.check_deprecated(owl_object=subs.getSubClass())
and not onto.check_named_entity(owl_object=subs.getSuperClass())
and SubsumptionSampler.is_basic_existential_restriction(
complex_class_str=str(subs.getSuperClass())
)
):
subsumptions.append(subs)
elif subsumption_type == "named_class":
for subs in all_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
if (
onto.check_named_entity(owl_object=c1)
and not onto.check_deprecated(owl_object=c1)
and onto.check_named_entity(owl_object=c2)
and not onto.check_deprecated(owl_object=c2)
):
subsumptions.append(subs)
else:
warnings.warn("\nUnknown subsumption type: %s\n" % subsumption_type)
return subsumptions
@staticmethod
def extract_restrictions_from_ontology(onto: Ontology):
r"""Extract basic existential restriction from an ontology.
Args:
onto (Ontology): The target ontology.
Returns:
restrictions (List): a list of existential restrictions.
"""
restrictions = []
for complexC in onto.get_asserted_complex_classes():
if SubsumptionSampler.is_basic_existential_restriction(complex_class_str=str(complexC)):
restrictions.append(complexC)
return restrictions
@staticmethod
def get_test_neg_candidates_restriction(subcls, max_neg_size, restrictions, onto):
"""Get a list of negative candidate class restrictions for testing."""
neg_restrictions = list()
n = max_neg_size * 2 if max_neg_size * 2 <= len(restrictions) else len(restrictions)
for r in random.sample(restrictions, n):
if not onto.reasoner.check_subsumption(sub_entity=subcls, super_entity=r):
neg_restrictions.append(r)
if len(neg_restrictions) >= max_neg_size:
break
return neg_restrictions
@staticmethod
def get_test_neg_candidates_named_class(subclass, gt, max_neg_size, onto, max_depth=3, max_width=8):
"""Get a list of negative candidate named classes for testing."""
all_nebs, seeds = set(), [gt]
depth = 1
while depth <= max_depth:
new_seeds = set()
for seed in seeds:
nebs = set()
for nc_iri in onto.reasoner.get_inferred_sub_entities(
seed, direct=True
) + onto.reasoner.get_inferred_super_entities(seed, direct=True):
nc = onto.owl_classes[nc_iri]
if onto.check_named_entity(owl_object=nc) and not onto.check_deprecated(owl_object=nc):
nebs.add(nc)
new_seeds = new_seeds.union(nebs)
all_nebs = all_nebs.union(nebs)
depth += 1
seeds = random.sample(new_seeds, max_width) if len(new_seeds) > max_width else new_seeds
all_nebs = (
all_nebs
- {onto.owl_classes[iri] for iri in onto.reasoner.get_inferred_super_entities(subclass, direct=False)}
- {subclass}
)
if len(all_nebs) > max_neg_size:
return random.sample(all_nebs, max_neg_size)
else:
return list(all_nebs)
| 17,435 | 44.76378 | 120 | py |
DeepOnto | DeepOnto-main/src/deeponto/utils/logging.py | # Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional
import logging
import datetime
import time
import torch
import xml.etree.ElementTree as ET
import subprocess
# subclass of logging.Formatter
class RuntimeFormatter(logging.Formatter):
"""Auxiliary class for runtime formatting in the logger."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_time = time.time()
def formatTime(self, record, datefmt=None):
"""Record relative runtime in hr:min:sec format。"""
duration = datetime.datetime.utcfromtimestamp(record.created - self.start_time)
elapsed = duration.strftime("%H:%M:%S")
return "{}".format(elapsed)
def create_logger(model_name: str, saved_path: str):
"""Create logger for both console info and saved info.
The pre-existed log file will be cleared before writing into new messages.
"""
logger = logging.getLogger(model_name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(f"{saved_path}/{model_name}.log", mode="w") # "w" means clear the log file before writing
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = RuntimeFormatter("[Time: %(asctime)s] - [PID: %(process)d] - [Model: %(name)s] \n%(message)s")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def banner_message(message: str, sym="^"):
"""Print a banner message surrounded by special symbols."""
print()
message = message.upper()
banner_len = len(message) + 4
message = " " * ((banner_len - len(message)) // 2) + message
message = message + " " * (banner_len - len(message))
print(message)
print(sym * banner_len)
print()
| 2,609 | 34.27027 | 119 | py |
DeepOnto | DeepOnto-main/src/deeponto/align/bertmap/mapping_prediction.py | # Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional, List, Set, Tuple
from yacs.config import CfgNode
import os
from textdistance import levenshtein
from logging import Logger
import itertools
import torch
import pandas as pd
import enlighten
from deeponto.align.mapping import EntityMapping
from deeponto.onto import Ontology
from deeponto.utils import FileUtils, Tokenizer
from .bert_classifier import BERTSynonymClassifier
# @paper(
# "BERTMap: A BERT-based Ontology Alignment System (AAAI-2022)",
# "https://ojs.aaai.org/index.php/AAAI/article/view/20510",
# )
class MappingPredictor:
r"""Class for the mapping prediction module of $\textsf{BERTMap}$ and $\textsf{BERTMapLt}$ models.
Attributes:
tokenizer (Tokenizer): The tokenizer used for constructing the inverted annotation index and candidate selection.
src_annotation_index (dict): A dictionary that stores the `(class_iri, class_annotations)` pairs from `src_onto` according to `annotation_property_iris`.
tgt_annotation_index (dict): A dictionary that stores the `(class_iri, class_annotations)` pairs from `tgt_onto` according to `annotation_property_iris`.
tgt_inverted_annotation_index (InvertedIndex): The inverted index built from `tgt_annotation_index` used for target class candidate selection.
bert_synonym_classifier (BERTSynonymClassifier, optional): The BERT synonym classifier fine-tuned on text semantics corpora.
num_raw_candidates (int): The maximum number of selected target class candidates for a source class.
num_best_predictions (int): The maximum number of best scored mappings presevred for a source class.
batch_size_for_prediction (int): The batch size of class annotation pairs for computing synonym scores.
"""
def __init__(
self,
output_path: str,
tokenizer_path: str,
src_annotation_index: dict,
tgt_annotation_index: dict,
bert_synonym_classifier: Optional[BERTSynonymClassifier],
num_raw_candidates: Optional[int],
num_best_predictions: Optional[int],
batch_size_for_prediction: int,
logger: Logger,
enlighten_manager: enlighten.Manager,
enlighten_status: enlighten.StatusBar,
):
self.logger = logger
self.enlighten_manager = enlighten_manager
self.enlighten_status = enlighten_status
self.tokenizer = Tokenizer.from_pretrained(tokenizer_path)
self.logger.info("Build inverted annotation index for candidate selection.")
self.src_annotation_index = src_annotation_index
self.tgt_annotation_index = tgt_annotation_index
self.tgt_inverted_annotation_index = Ontology.build_inverted_annotation_index(
tgt_annotation_index, self.tokenizer
)
# the fundamental judgement for whether bertmap or bertmaplt is loaded
self.bert_synonym_classifier = bert_synonym_classifier
self.num_raw_candidates = num_raw_candidates
self.num_best_predictions = num_best_predictions
self.batch_size_for_prediction = batch_size_for_prediction
self.output_path = output_path
self.init_class_mapping = lambda head, tail, score: EntityMapping(head, tail, "<EquivalentTo>", score)
def bert_mapping_score(
self,
src_class_annotations: Set[str],
tgt_class_annotations: Set[str],
):
r"""$\textsf{BERTMap}$'s main mapping score module which utilises the fine-tuned BERT synonym
classifier.
Compute the **synonym score** for each pair of src-tgt class annotations, and return
the **average** score as the mapping score. Apply string matching before applying the
BERT module to filter easy mappings (with scores $1.0$).
"""
# apply string matching before applying the bert module
prelim_score = self.edit_similarity_mapping_score(
src_class_annotations,
tgt_class_annotations,
string_match_only=True,
)
if prelim_score == 1.0:
return prelim_score
# apply BERT classifier and define mapping score := Average(SynonymScores)
class_annotation_pairs = list(itertools.product(src_class_annotations, tgt_class_annotations))
synonym_scores = self.bert_synonym_classifier.predict(class_annotation_pairs)
# only one element tensor is able to be extracted as a scalar by .item()
return float(torch.mean(synonym_scores).item())
@staticmethod
def edit_similarity_mapping_score(
src_class_annotations: Set[str],
tgt_class_annotations: Set[str],
string_match_only: bool = False,
):
r"""$\textsf{BERTMap}$'s string match module and $\textsf{BERTMapLt}$'s mapping prediction function.
Compute the **normalised edit similarity** `(1 - normalised edit distance)` for each pair
of src-tgt class annotations, and return the **maximum** score as the mapping score.
"""
# edge case when src and tgt classes have an exact match of annotation
if len(src_class_annotations.intersection(tgt_class_annotations)) > 0:
return 1.0
# a shortcut to save time for $\textsf{BERTMap}$
if string_match_only:
return 0.0
annotation_pairs = itertools.product(src_class_annotations, tgt_class_annotations)
sim_scores = [levenshtein.normalized_similarity(src, tgt) for src, tgt in annotation_pairs]
return max(sim_scores)
def mapping_prediction_for_src_class(self, src_class_iri: str) -> List[EntityMapping]:
r"""Predict $N$ best scored mappings for a source ontology class, where
$N$ is specified in `self.num_best_predictions`.
1. Apply the **string matching** module to compute "easy" mappings.
2. Return the mappings if found any, or if there is no BERT synonym classifier
as in $\textsf{BERTMapLt}$.
3. If using the BERT synonym classifier module:
- Generate batches for class annotation pairs. Each batch contains the combinations of the
source class annotations and $M$ target candidate classes' annotations. $M$ is determined
by `batch_size_for_prediction`, i.e., stop adding annotations of a target class candidate into
the current batch if this operation will cause the size of current batch to exceed the limit.
- Compute the synonym scores for each batch and aggregate them into mapping scores; preserve
$N$ best scored candidates and update them in the next batch. By this dynamic process, we eventually
get $N$ best scored mappings for a source ontology class.
"""
src_class_annotations = self.src_annotation_index[src_class_iri]
# previously wrongly put tokenizer again !!!
tgt_class_candidates = self.tgt_inverted_annotation_index.idf_select(
list(src_class_annotations), pool_size=self.num_raw_candidates
) # [(tgt_class_iri, idf_score)]
best_scored_mappings = []
# for string matching: save time if already found string-matched candidates
def string_match():
"""Compute string-matched mappings."""
string_matched_mappings = []
for tgt_candidate_iri, _ in tgt_class_candidates:
tgt_candidate_annotations = self.tgt_annotation_index[tgt_candidate_iri]
prelim_score = self.edit_similarity_mapping_score(
src_class_annotations,
tgt_candidate_annotations,
string_match_only=True,
)
if prelim_score > 0.0:
# if src_class_annotations.intersection(tgt_candidate_annotations):
string_matched_mappings.append(
self.init_class_mapping(src_class_iri, tgt_candidate_iri, prelim_score)
)
return string_matched_mappings
best_scored_mappings += string_match()
# return string-matched mappings if found or if there is no bert module (bertmaplt)
if best_scored_mappings or not self.bert_synonym_classifier:
self.logger.info(f"The best scored class mappings for {src_class_iri} are\n{best_scored_mappings}")
return best_scored_mappings
def generate_batched_annotations(batch_size: int):
"""Generate batches of class annotations for the input source class and its
target candidates.
"""
batches = []
# the `nums`` parameter determines how the annotations are grouped
current_batch = CfgNode({"annotations": [], "nums": []})
for i, (tgt_candidate_iri, _) in enumerate(tgt_class_candidates):
tgt_candidate_annotations = self.tgt_annotation_index[tgt_candidate_iri]
annotation_pairs = list(itertools.product(src_class_annotations, tgt_candidate_annotations))
current_batch.annotations += annotation_pairs
num_annotation_pairs = len(annotation_pairs)
current_batch.nums.append(num_annotation_pairs)
# collect when the batch is full or for the last target class candidate
if sum(current_batch.nums) > batch_size or i == len(tgt_class_candidates) - 1:
batches.append(current_batch)
current_batch = CfgNode({"annotations": [], "nums": []})
return batches
def bert_match():
"""Compute mappings with fine-tuned BERT synonym classifier."""
bert_matched_mappings = []
class_annotation_batches = generate_batched_annotations(self.batch_size_for_prediction)
batch_base_candidate_idx = (
0 # after each batch, the base index will be increased by # of covered target candidates
)
device = self.bert_synonym_classifier.device
# intialize N prediction scores and N corresponding indices w.r.t `tgt_class_candidates`
final_best_scores = torch.tensor([-1] * self.num_best_predictions).to(device)
final_best_idxs = torch.tensor([-1] * self.num_best_predictions).to(device)
for annotation_batch in class_annotation_batches:
synonym_scores = self.bert_synonym_classifier.predict(annotation_batch.annotations)
# aggregating to mappings cores
grouped_synonym_scores = torch.split(
synonym_scores,
split_size_or_sections=annotation_batch.nums,
)
mapping_scores = torch.stack([torch.mean(chunk) for chunk in grouped_synonym_scores])
assert len(mapping_scores) == len(annotation_batch.nums)
# preserve N best scored mappings
# scale N in case there are less than N tgt candidates in this batch
N = min(len(mapping_scores), self.num_best_predictions)
batch_best_scores, batch_best_idxs = torch.topk(mapping_scores, k=N)
batch_best_idxs += batch_base_candidate_idx
# we do the substitution for every batch to prevent from memory overflow
final_best_scores, _idxs = torch.topk(
torch.cat([batch_best_scores, final_best_scores]),
k=self.num_best_predictions,
)
final_best_idxs = torch.cat([batch_best_idxs, final_best_idxs])[_idxs]
# update the index for target candidate classes
batch_base_candidate_idx += len(annotation_batch.nums)
for candidate_idx, mapping_score in zip(final_best_idxs, final_best_scores):
# ignore intial values (-1.0) for dummy mappings
# the threshold 0.9 is for mapping extension
if mapping_score.item() >= 0.9:
tgt_candidate_iri = tgt_class_candidates[candidate_idx.item()][0]
bert_matched_mappings.append(
self.init_class_mapping(
src_class_iri,
tgt_candidate_iri,
mapping_score.item(),
)
)
assert len(bert_matched_mappings) <= self.num_best_predictions
self.logger.info(f"The best scored class mappings for {src_class_iri} are\n{bert_matched_mappings}")
return bert_matched_mappings
return bert_match()
def mapping_prediction(self):
r"""Apply global matching for each class in the source ontology.
See [`mapping_prediction_for_src_class`][deeponto.align.bertmap.mapping_prediction.MappingPredictor.mapping_prediction_for_src_class].
If this process is accidentally stopped, it can be resumed from already saved predictions. The progress
bar keeps track of the number of source ontology classes that have been matched.
"""
self.logger.info("Start global matching for each class in the source ontology.")
match_dir = os.path.join(self.output_path, "match")
try:
mapping_index = FileUtils.load_file(os.path.join(match_dir, "raw_mappings.json"))
self.logger.info("Load the existing mapping prediction file.")
except:
mapping_index = dict()
FileUtils.create_path(match_dir)
progress_bar = self.enlighten_manager.counter(
total=len(self.src_annotation_index), desc="Mapping Prediction", unit="per src class"
)
self.enlighten_status.update(demo="Mapping Prediction")
for i, src_class_iri in enumerate(self.src_annotation_index.keys()):
if src_class_iri in mapping_index.keys():
self.logger.info(f"[Class {i}] Skip matching {src_class_iri} as already computed.")
progress_bar.update()
continue
mappings = self.mapping_prediction_for_src_class(src_class_iri)
mapping_index[src_class_iri] = [m.to_tuple(with_score=True) for m in mappings]
if i % 100 == 0 or i == len(self.src_annotation_index) - 1:
FileUtils.save_file(mapping_index, os.path.join(match_dir, "raw_mappings.json"))
# also save a .tsv version
mapping_in_tuples = list(itertools.chain.from_iterable(mapping_index.values()))
mapping_df = pd.DataFrame(mapping_in_tuples, columns=["SrcEntity", "TgtEntity", "Score"])
mapping_df.to_csv(os.path.join(match_dir, "raw_mappings.tsv"), sep="\t", index=False)
self.logger.info("Save currently computed mappings to prevent undesirable loss.")
progress_bar.update()
self.logger.info("Finished mapping prediction for each class in the source ontology.")
progress_bar.close()
| 15,548 | 49.980328 | 161 | py |
DeepOnto | DeepOnto-main/src/deeponto/align/bertmap/bert_classifier.py | # Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, List, Optional, Union
import torch
from transformers import TrainingArguments, AutoModelForSequenceClassification, Trainer
from datasets import Dataset
from sklearn.metrics import accuracy_score
import numpy as np
import random
from deeponto.utils import Tokenizer, FileUtils
from deeponto.utils.decorators import paper
# @paper(
# "BERTMap: A BERT-based Ontology Alignment System (AAAI-2022)",
# "https://ojs.aaai.org/index.php/AAAI/article/view/20510",
# )
class BERTSynonymClassifier:
r"""Class for BERT synonym classifier.
The main scoring module of $\textsf{BERTMap}$ consisting of a BERT model and a binary synonym classifier.
Attributes:
loaded_path (str): The path to the checkpoint of a pre-trained BERT model.
output_path (str): The path to the output BERT model (usually fine-tuned).
eval_mode (bool): Set to `False` if the model is loaded for training.
max_length_for_input (int): The maximum length of an input sequence.
num_epochs_for_training (int): The number of epochs for training a BERT model.
batch_size_for_training (int): The batch size for training a BERT model.
batch_size_for_prediction (int): The batch size for making predictions.
training_data (Dataset, optional): Data for training the model if `for_training` is set to `True`. Defaults to `None`.
validation_data (Dataset, optional): Data for validating the model if `for_training` is set to `True`. Defaults to `None`.
training_args (TrainingArguments, optional): Training arguments for training the model if `for_training` is set to `True`. Defaults to `None`.
trainer (Trainer, optional): The model trainer fed with `training_args` and data samples. Defaults to `None`.
softmax (torch.nn.SoftMax, optional): The softmax layer used for normalising synonym scores. Defaults to `None`.
"""
def __init__(
self,
loaded_path: str,
output_path: str,
eval_mode: bool,
max_length_for_input: int,
num_epochs_for_training: Optional[float] = None,
batch_size_for_training: Optional[int] = None,
batch_size_for_prediction: Optional[int] = None,
training_data: Optional[List[Tuple[str, str, int]]] = None, # (sentence1, sentence2, label)
validation_data: Optional[List[Tuple[str, str, int]]] = None,
):
# Load the pretrained BERT model from the given path
self.loaded_path = loaded_path
print(f"Loading a BERT model from: {self.loaded_path}.")
self.model = AutoModelForSequenceClassification.from_pretrained(
self.loaded_path, output_hidden_states=eval_mode
)
self.tokenizer = Tokenizer.from_pretrained(loaded_path)
self.output_path = output_path
self.eval_mode = eval_mode
self.max_length_for_input = max_length_for_input
self.num_epochs_for_training = num_epochs_for_training
self.batch_size_for_training = batch_size_for_training
self.batch_size_for_prediction = batch_size_for_prediction
self.training_data = None
self.validation_data = None
self.data_stat = {}
self.training_args = None
self.trainer = None
self.softmax = None
# load the pre-trained BERT model and set it to eval mode (static)
if self.eval_mode:
self.eval()
# load the pre-trained BERT model for fine-tuning
else:
if not training_data:
raise RuntimeError("Training data should be provided when `for_training` is `True`.")
if not validation_data:
raise RuntimeError("Validation data should be provided when `for_training` is `True`.")
# load data (max_length is used for truncation)
self.training_data = self.load_dataset(training_data, "training")
self.validation_data = self.load_dataset(validation_data, "validation")
self.data_stat = {
"num_training": len(self.training_data),
"num_validation": len(self.validation_data),
}
# generate training arguments
epoch_steps = len(self.training_data) // self.batch_size_for_training # total steps of an epoch
if torch.cuda.device_count() > 0:
epoch_steps = epoch_steps // torch.cuda.device_count() # to deal with multi-gpus case
# keep logging steps consisitent even for small batch size
# report logging on every 0.02 epoch
logging_steps = int(epoch_steps * 0.02)
# eval on every 0.2 epoch
eval_steps = 10 * logging_steps
# generate the training arguments
self.training_args = TrainingArguments(
output_dir=self.output_path,
num_train_epochs=self.num_epochs_for_training,
per_device_train_batch_size=self.batch_size_for_training,
per_device_eval_batch_size=self.batch_size_for_training,
warmup_ratio=0.0,
weight_decay=0.01,
logging_steps=logging_steps,
logging_dir=f"{self.output_path}/tensorboard",
eval_steps=eval_steps,
evaluation_strategy="steps",
do_train=True,
do_eval=True,
save_steps=eval_steps,
save_total_limit=2,
load_best_model_at_end=True,
)
# build the trainer
self.trainer = Trainer(
model=self.model,
args=self.training_args,
train_dataset=self.training_data,
eval_dataset=self.validation_data,
compute_metrics=self.compute_metrics,
tokenizer=self.tokenizer._tokenizer,
)
def train(self, resume_from_checkpoint: Optional[Union[bool, str]] = None):
"""Start training the BERT model."""
if self.eval_mode:
raise RuntimeError("Training cannot be started in `eval` mode.")
self.trainer.train(resume_from_checkpoint=resume_from_checkpoint)
def eval(self):
"""To eval mode."""
print("The BERT model is set to eval mode for making predictions.")
self.model.eval()
# TODO: to implement multi-gpus for inference
self.device = self.get_device(device_num=0)
self.model.to(self.device)
self.softmax = torch.nn.Softmax(dim=1).to(self.device)
def predict(self, sent_pairs: List[Tuple[str, str]]):
r"""Run prediction pipeline for synonym classification.
Return the `softmax` probailities of predicting pairs as synonyms (`index=1`).
"""
inputs = self.process_inputs(sent_pairs)
with torch.no_grad():
return self.softmax(self.model(**inputs).logits)[:, 1]
def load_dataset(self, data: List[Tuple[str, str, int]], split: str) -> Dataset:
r"""Load the list of `(annotation1, annotation2, label)` samples into a `datasets.Dataset`."""
def iterate():
for sample in data:
yield {"annotation1": sample[0], "annotation2": sample[1], "labels": sample[2]}
dataset = Dataset.from_generator(iterate)
# NOTE: no padding here because the Trainer class supports dynamic padding
dataset = dataset.map(
lambda examples: self.tokenizer._tokenizer(
examples["annotation1"], examples["annotation2"], max_length=self.max_length_for_input, truncation=True
),
batched=True,
desc=f"Load {split} data:",
)
return dataset
def process_inputs(self, sent_pairs: List[Tuple[str, str]]):
r"""Process input sentence pairs for the BERT model.
Transform the sentences into BERT input embeddings and load them into the device.
This function is called only when the BERT model is about to make predictions (`eval` mode).
"""
return self.tokenizer._tokenizer(
sent_pairs,
return_tensors="pt",
max_length=self.max_length_for_input,
padding=True,
truncation=True,
).to(self.device)
@staticmethod
def compute_metrics(pred):
"""Add more evaluation metrics into the training log."""
# TODO: currently only accuracy is added, will expect more in the future if needed
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
acc = accuracy_score(labels, preds)
return {"accuracy": acc}
@staticmethod
def get_device(device_num: int = 0):
"""Get a device (GPU or CPU) for the torch model"""
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device(f"cuda:{device_num}")
print("There are %d GPU(s) available." % torch.cuda.device_count())
print("We will use the GPU:", torch.cuda.get_device_name(device_num))
# If not...
else:
print("No GPU available, using the CPU instead.")
device = torch.device("cpu")
return device
@staticmethod
def set_seed(seed_val: int = 888):
"""Set random seed for reproducible results."""
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
| 10,098 | 44.084821 | 150 | py |
ACE | ACE-main/example.py | import torch
import torch.nn.functional as F
import timm
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from ace import attack_confidence_estimation
def attack_example(file_name, true_label, transform, normalization):
image = Image.open(f'./images/{file_name}.jpg').convert('RGB')
input = transform(image).unsqueeze(0).cuda() # transform and add batch dimension
with torch.no_grad():
output = model(normalization(input))
orig_prediction = torch.nn.functional.softmax(output, dim=1).max(1)
print(f'Ground truth label is {true_label}. The predicted label is {orig_prediction[1].item()} with a confidence of {orig_prediction[0].item()}')
adversarial_example = attack_confidence_estimation(model=model, input=input, label=torch.tensor(true_label), normalization=normalization)
with torch.no_grad():
attacked_prediction = torch.nn.functional.softmax(model(normalization(adversarial_example)), dim=1).max(1)
print(f'After using ACE, the predicted label is still {attacked_prediction[1].item()} with a confidence of {attacked_prediction[0].item()}')
if __name__ == '__main__':
model = timm.create_model('efficientnet_b0', pretrained=True).cuda()
model.eval()
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
normalization = transform.transforms.pop(3)
# A correct prediction example
print('=============== A correct prediction example: ===============')
attack_example(file_name='tank', true_label=847, transform=transform, normalization=normalization)
# An incorrect prediction example
print('=============== An incorrect prediction example: ===============')
attack_example(file_name='binoculars', true_label=447, transform=transform, normalization=normalization) | 1,864 | 57.28125 | 149 | py |
ACE | ACE-main/ace.py | import torch
def softmax_response(logits):
return torch.nn.functional.softmax(logits, dim=1)
def attack_confidence_estimation(model, input, label, normalization, proxy=None, epsilon=0.005, epsilon_decay=0.5, max_iterations=15, confidence_score_function=softmax_response, device='cuda'):
input = input.to(device)
label = label.to(device)
model = model.to(device)
data = normalization(input)
data.requires_grad = True
if proxy:
# Black-box setting, use proxy to calculate the gradients
proxy = proxy.to(device)
output = proxy(data)
proxy.zero_grad()
with torch.no_grad():
model_output = model(normalization(input))
else:
# White-box setting, use model itself to calculate the gradients
output = model(data)
model.zero_grad()
model_output = output
init_prediction = model_output.argmax()
output = confidence_score_function(output)
# Calculate gradients of model in backward pass
output[0][init_prediction.item()].backward(retain_graph=True)
# Collect gradients
jacobian = data.grad.data
if init_prediction == label:
# If the model is correct, we wish to make it less confident of its prediction
attack_direction = -1
else:
# Otherwise, we wish to make it more confident of its misprediction
attack_direction = 1
with torch.no_grad():
for i in range(max_iterations):
jacobian_sign = jacobian.sign()
perturbed_image = input + epsilon * jacobian_sign * attack_direction
perturbed_image = torch.clamp(perturbed_image, 0, 1)
new_output = model(normalization(perturbed_image))
if new_output.argmax() == init_prediction:
# This adversarial example does not change the prediction as required, return it
return perturbed_image
else:
epsilon = epsilon * epsilon_decay
# The attack has failed; either the epsilon was too large, epsilon_decay too small,
# or max_iterations was insufficient. Return original input.
return input | 2,151 | 42.04 | 193 | py |
Fengshenbang-LM | Fengshenbang-LM-main/setup.py | from setuptools import setup, find_packages
setup(
name="fengshen",
version="0.0.1",
description="fengshen",
long_description="fengshen",
license="MIT Licence",
url="https://idea.edu.cn",
author="gaoxinyu",
author_email="gaoxinyu@idea.edu.cn",
packages=find_packages(),
include_package_data=True,
platforms="any",
install_requires=[
'transformers >= 4.17.0',
'datasets >= 2.0.0',
'pytorch_lightning >= 1.5.10',
'deepspeed >= 0.5.10',
'jieba-fast >= 0.53',
'jieba >= 0.40.0',
],
scripts=[],
entry_points={
'console_scripts': [
'fengshen-pipeline = fengshen.cli.fengshen_pipeline:main'
]
}
)
| 733 | 21.9375 | 69 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/strategies/megatron_deepspeed.py | # Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
import torch
from torch.nn import Module
from torch.optim import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.plugins import ClusterEnvironment
from pytorch_lightning.strategies.deepspeed import _DEEPSPEED_AVAILABLE
from pytorch_lightning.utilities.types import _PATH, LRSchedulerTypeUnion
from pytorch_lightning.utilities.optimizer import optimizers_to_device
from pytorch_lightning.plugins.precision import PrecisionPlugin
from pytorch_lightning.strategies.deepspeed import DeepSpeedStrategy as OriginDeepSpeedStrategy
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from fengshen.models.megatron import mpu, fused_kernels
log = logging.getLogger(__name__)
if _DEEPSPEED_AVAILABLE:
import deepspeed
def remove_module_hooks(model: torch.nn.Module) -> None:
# todo (tchaton) awaiting this feature to move upstream to DeepSpeed
for module in model.modules():
module._backward_hooks = OrderedDict()
module._is_full_backward_hook = None
module._forward_hooks = OrderedDict()
module._forward_pre_hooks = OrderedDict()
module._state_dict_hooks = OrderedDict()
module._load_state_dict_pre_hooks = OrderedDict()
class DeepSpeedStrategy(OriginDeepSpeedStrategy):
strategy_name = "megatron_deepspeed"
DEEPSPEED_ENV_VAR = "PL_DEEPSPEED_CONFIG_PATH"
def __init__(
self,
pipe_model_parallel_size,
tensor_model_parallel_size,
mpu_seed,
accelerator: Optional["pl.accelerators.Accelerator"] = None,
zero_optimization: bool = True,
stage: int = 2,
remote_device: str = "cpu",
offload_optimizer: bool = False,
offload_parameters: bool = False,
offload_params_device: str = "cpu",
nvme_path: str = "/local_nvme",
params_buffer_count: int = 5,
params_buffer_size: int = 100_000_000,
max_in_cpu: int = 1_000_000_000,
offload_optimizer_device: str = "cpu",
optimizer_buffer_count: int = 4,
block_size: int = 1048576,
queue_depth: int = 8,
single_submit: bool = False,
overlap_events: bool = True,
thread_count: int = 1,
pin_memory: bool = False,
sub_group_size: int = 1_000_000_000_000,
contiguous_gradients: bool = True,
overlap_comm: bool = True,
allgather_partitions: bool = True,
reduce_scatter: bool = True,
allgather_bucket_size: int = 200_000_000,
reduce_bucket_size: int = 200_000_000,
zero_allow_untested_optimizer: bool = True,
logging_batch_size_per_gpu: Union[str, int] = "auto",
config: Optional[Union[_PATH, Dict[str, Any]]] = None,
logging_level: int = logging.WARN,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
loss_scale: float = 0,
initial_scale_power: int = 16,
loss_scale_window: int = 1000,
hysteresis: int = 2,
min_loss_scale: int = 1,
partition_activations: bool = False,
cpu_checkpointing: bool = False,
contiguous_memory_optimization: bool = False,
synchronize_checkpoint_boundary: bool = False,
load_full_weights: bool = False,
precision_plugin: Optional[PrecisionPlugin] = None,
process_group_backend: Optional[str] = None,
) -> None:
"""Provides capabilities to run training using the DeepSpeed library, with training optimizations for large
billion parameter models. `For more information: https://pytorch-
lightning.readthedocs.io/en/stable/advanced/model_parallel.html#deepspeed`.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
Defaults have been set to enable ZeRO-Offload and some have been taken from the link below.
These defaults have been set generally, but may require tuning for optimum performance based on your model size.
`For more information: https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training`.
Arguments:
zero_optimization: Enable ZeRO optimization. This is compatible with either `precision="16-mixed"` or
`precision="bf16-mixed"`.
stage: Different stages of the ZeRO Optimizer. 0 is disabled,
1 is optimizer state partitioning, 2 is optimizer+gradient state partitioning,
3 is optimizer+gradient_parameter partitioning using the infinity engine.
remote_device: Device to instantiate the model on initially (``cpu`` or ``nvme``).
offload_optimizer: Enable offloading optimizer memory and computation to CPU or NVMe
based on ``offload_optimizer_device``.
offload_parameters: When using ZeRO Stage 3, Enable offloading parameter memory and computation
to CPU or NVMe based on ``offload_params_device``.
offload_params_device: When offloading parameters choose the device to offload to, ``cpu`` or ``nvme``.
offload_optimizer_device: When offloading optimizer state choose the device to offload to,
``cpu`` or ``nvme``.
params_buffer_count: Number of buffers in buffer pool for
parameter offloading when ``offload_params_device`` is ``nvme``.
params_buffer_size: Size of buffers in buffer pool for parameter offloading
when ``offload_params_device`` is ``nvme``.
max_in_cpu: Number of parameter elements to maintain in CPU memory when offloading to NVMe is enabled.
nvme_path: Filesystem path for NVMe device for optimizer/parameter state offloading.
optimizer_buffer_count: Number of buffers in buffer pool for optimizer state offloading
when ``offload_optimizer_device`` is set to to ``nvme``.
This should be at least the number of states maintained per parameter by the optimizer.
For example, Adam optimizer has 4 states (parameter, gradient, momentum, and variance).
block_size: When using NVMe Offloading, the I/O block size in bytes.
queue_depth: When using NVMe Offloading, the I/O queue depth.
single_submit: When using NVMe Offloading,
submit requests to storage device as multiple individual requests,
as opposed to one block of requests.
overlap_events: When using NVMe Offloading,
submit requests to storage device in an overlapped fashion
without waiting for completion of earlier requests.
thread_count: When using NVMe Offloading,
Intra-request parallelism for each read/write submitted by a user thread.
pin_memory: When using ZeRO stage 3, pin optimizer state memory on CPU.
This could boost throughput at the cost of extra memory overhead.
sub_group_size: When using ZeRO stage 3, defines the number of parameters
within a sub group to offload at a time.
Smaller numbers require more communication, but improve memory efficiency.
contiguous_gradients: Copies gradients to a continuous buffer as they are produced.
Avoids memory fragmentation during backwards. Useful when training large models.
overlap_comm: Overlap the reduction (synchronization) of gradients with the backwards computation.
This is a speed optimization when training across multiple GPUs/machines.
allgather_partitions: All gather updated parameters at the end of training step,
instead of using a series of broadcast collectives.
reduce_scatter: Use reduce/scatter instead of allreduce to average gradients.
allgather_bucket_size: Number of elements to allgather at once.
Used to limit the memory required for larger model sizes, with a tradeoff with speed.
reduce_bucket_size: Number of elements to reduce at once.
Used to limit the memory required for larger model sizes, with a tradeoff with speed.
zero_allow_untested_optimizer: Allow untested optimizers to be used with ZeRO. Currently only Adam is a
DeepSpeed supported optimizer when using ZeRO.
logging_batch_size_per_gpu: Config used in DeepSpeed to calculate verbose timing for logging
on a per sample per second basis (only displayed if logging=logging.INFO).
If set to "auto", the plugin tries to infer this from
the train DataLoader's BatchSampler, else defaults to 1.
To obtain accurate logs when using datasets that do not support batch samplers,
set this to the actual per gpu batch size (trainer.batch_size).
config: Pass in a deepspeed formatted config dict,
or path to a deepspeed config: https://www.deepspeed.ai/docs/config-json.
All defaults will be ignored if a config is passed in.
logging_level: Set logging level for deepspeed.
loss_scale: Loss scaling value for FP16 training.
0.0 results in dynamic loss scaling, otherwise static.
initial_scale_power: Power of the initial dynamic loss scale value. Loss scale is computed
by ``2^initial_scale_power``.
loss_scale_window: Window in which to raise/lower the dynamic FP16 loss scaling value.
hysteresis: FP16 Delay shift in Dynamic Loss scaling.
min_loss_scale: The minimum FP16 dynamic loss scaling value.
partition_activations: Enables partition activation when used with ZeRO stage 3 and model parallelism.
Still requires you to wrap your forward functions in deepspeed.checkpointing.checkpoint.
See `deepspeed tutorial
<https://www.deepspeed.ai/tutorials/megatron/#deepspeed-activation-checkpoints-optional>`_.
cpu_checkpointing: Offloads partitioned activations to CPU if ``partition_activations`` is enabled.
contiguous_memory_optimization: Copies partitioned activations so that they are contiguous in memory.
Not supported by all models.
synchronize_checkpoint_boundary: Insert :func:`torch.cuda.synchronize` at each checkpoint boundary.
load_full_weights: True when loading a single checkpoint file containing the model state dict
when using ZeRO Stage 3. This differs from the DeepSpeed checkpoint which contains shards
per worker.
"""
if not _DEEPSPEED_AVAILABLE:
raise MisconfigurationException(
"To use the `DeepSpeedStrategy`, you must have DeepSpeed installed."
" Install it by running `pip install -U deepspeed`."
)
super().__init__(
accelerator=accelerator,
parallel_devices=parallel_devices,
cluster_environment=cluster_environment,
precision_plugin=precision_plugin,
process_group_backend=process_group_backend,
)
self.config = self._load_config(config)
if self.config is None:
# User has not overridden config, set defaults
self.config = self._create_default_config(
zero_optimization,
zero_allow_untested_optimizer,
logging_batch_size_per_gpu,
offload_optimizer=offload_optimizer,
offload_parameters=offload_parameters,
nvme_path=nvme_path,
offload_params_device=offload_params_device,
params_buffer_count=params_buffer_count,
params_buffer_size=params_buffer_size,
max_in_cpu=max_in_cpu,
pin_memory=pin_memory,
offload_optimizer_device=offload_optimizer_device,
optimizer_buffer_count=optimizer_buffer_count,
block_size=block_size,
queue_depth=queue_depth,
single_submit=single_submit,
overlap_events=overlap_events,
thread_count=thread_count,
partition_activations=partition_activations,
cpu_checkpointing=cpu_checkpointing,
contiguous_memory_optimization=contiguous_memory_optimization,
synchronize_checkpoint_boundary=synchronize_checkpoint_boundary,
stage=stage,
contiguous_gradients=contiguous_gradients,
overlap_comm=overlap_comm,
allgather_partitions=allgather_partitions,
reduce_scatter=reduce_scatter,
allgather_bucket_size=allgather_bucket_size,
reduce_bucket_size=reduce_bucket_size,
sub_group_size=sub_group_size,
)
import deepspeed
self._config_initialized = False
deepspeed.utils.logging.logger.setLevel(logging_level)
self.remote_device = remote_device
self.load_full_weights = load_full_weights
# default FP16 parameters.
self.loss_scale = loss_scale
self.initial_scale_power = initial_scale_power
self.loss_scale_window = loss_scale_window
self.hysteresis = hysteresis
self.min_loss_scale = min_loss_scale
self.pipe_model_parallel_size = pipe_model_parallel_size
self.tensor_model_parallel_size = tensor_model_parallel_size
self.mpu_seed = mpu_seed
def _setup_model_and_optimizer(
self, model: Module, optimizer: Optimizer, lr_scheduler: Optional[LRSchedulerTypeUnion] = None
):
"""Initialize one model and one optimizer with an optional learning rate scheduler.
This calls :func:`deepspeed.initialize` internally.
"""
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
deepspeed_engine, deepspeed_optimizer, _, _ = deepspeed.initialize(
args=argparse.Namespace(device_rank=self.root_device.index),
config=self.config,
model=model,
model_parameters=model_parameters, # type: ignore
optimizer=optimizer,
lr_scheduler=lr_scheduler,
dist_init_required=False,
mpu=mpu
)
return deepspeed_engine, deepspeed_optimizer
def _set_deepspeed_activation_checkpointing(self) -> None:
import deepspeed
assert isinstance(self.config, dict)
assert self.config.get(
"activation_checkpointing"), 'megatron_deepspeed stratygy need activation_checkpointing config'
if self.config.get("activation_checkpointing"):
checkpoint_config = self.config["activation_checkpointing"]
deepspeed.checkpointing.configure(
mpu_=mpu,
num_checkpoints=checkpoint_config.get("num_checkpoints"),
partition_activations=checkpoint_config.get("partition_activations"),
contiguous_checkpointing=checkpoint_config.get("contiguous_memory_optimization"),
checkpoint_in_cpu=checkpoint_config.get("cpu_checkpointing"),
profile=checkpoint_config.get("profile"),
)
def setup_environment(self) -> None:
super().setup_environment()
self.setup_mpu()
def setup_mpu(self) -> None:
fused_kernels.load_fused_kernels()
rank = self.cluster_environment.global_rank()
world_size = self.cluster_environment.world_size()
from deepspeed.runtime.pipe.topology import PipeModelDataParallelTopology
# this does pipe on the most outside, then data, then model.
# PipeModelDataParallelTopology is just a wrapper over ProcessTopology that predefines this order.
dp = world_size // self.pipe_model_parallel_size // self.tensor_model_parallel_size
topo = PipeModelDataParallelTopology(num_pp=self.pipe_model_parallel_size,
num_mp=self.tensor_model_parallel_size,
num_dp=dp)
# Offset base seeds for the interior pipeline stages.
# TODO: adjust last stage too once IO is improved.
stage_id = topo.get_coord(rank=rank).pipe
if 0 < stage_id < topo.get_dim("pipe") - 1:
offset = seed + 1138
seed = offset + (stage_id * self.tensor_model_parallel_size)
mpu.initialize_model_parallel(
self.tensor_model_parallel_size,
topology=topo,
fp32_allreduce=False)
self._set_deepspeed_activation_checkpointing()
mpu.model_parallel_cuda_manual_seed(self.mpu_seed)
def _initialize_deepspeed_inference(self, model: Module) -> None:
import deepspeed
assert isinstance(self.config, dict)
# todo: this is required for DeepSpeed throughput timers
inference_config = {"train_micro_batch_size_per_gpu": 1}
if "fp16" in self.config:
inference_config.update({"fp16": self.config["fp16"]})
if self.zero_stage_3:
inference_config.update(
{
"zero_allow_untested_optimizer": self.config["zero_allow_untested_optimizer"],
"zero_optimization": self.config["zero_optimization"],
}
)
# Remove all module hooks before initializing new model
remove_module_hooks(model)
model, _, _, _ = deepspeed.initialize(
args=argparse.Namespace(device_rank=self.root_device.index),
config=inference_config,
model=model,
optimizer=None,
lr_scheduler=None,
model_parameters=[],
dist_init_required=False,
mpu=mpu
)
self.model = model
| 18,750 | 45.8775 | 120 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/pretrain_t5/pretrain_t5.py | import time
from builtins import print
import sys
import os
import torch
import argparse
import json
import pytorch_lightning as pl
from transformers import MT5Config, MT5Tokenizer
from pytorch_lightning import Trainer, loggers
from transformers import MT5ForConditionalGeneration
from pytorch_lightning.callbacks import LearningRateMonitor
# os.environ["CUDA_VISIBLE_DEVICES"] = '3'
class MT5PretrainModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--keep_tokens_path', default=None, type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
if args.tokenizer_type == 't5_tokenizer':
if args.new_vocab_path is not None:
# 用于从mt5继续训练,此时只保留中英文词表,spm采用新模型
assert args.keep_tokens_path is not None
keep_tokens = json.load(open(args.keep_tokens_path))
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path)
new_config = self.model.config
new_config.vocab_size = len(keep_tokens)
print('vocab_size:', new_config.vocab_size)
new_state_dict = self.model.state_dict()
select_index = torch.tensor(keep_tokens)
new_state_dict['encoder.embed_tokens.weight'] = torch.index_select(
new_state_dict['encoder.embed_tokens.weight'], dim=0, index=select_index)
new_state_dict['shared.weight'] = torch.index_select(
new_state_dict['shared.weight'], dim=0, index=select_index)
new_state_dict['decoder.embed_tokens.weight'] = torch.index_select(
new_state_dict['decoder.embed_tokens.weight'], dim=0, index=select_index)
new_state_dict['lm_head.weight'] = torch.index_select(
new_state_dict['lm_head.weight'], dim=0, index=select_index)
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path, config=new_config, state_dict=new_state_dict)
# self.model = MT5ForConditionalGeneration(config=new_config)
else:
# 用于继续训练
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path
)
else:
self.model = MT5ForConditionalGeneration(
MT5Config.from_pretrained(args.pretrained_model_path)
)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
def training_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'], labels=batch['labels'])
acc = self.comput_metrix(output.logits, batch['labels'])
self.log('train_loss', output.loss, sync_dist=True)
self.log('train_acc', acc, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx):
# print('is out of index: ', batch['input_ids'][batch['input_ids'] >= 32598])
output = self.model(
input_ids=batch['input_ids'], labels=batch['labels'])
acc = self.comput_metrix(output.logits, batch['labels'])
self.log('val_loss', output.loss, sync_dist=True)
self.log('val_acc', acc, sync_dist=True)
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/y_true.shape[0]
return acc
def on_save_checkpoint(self, checkpoint) -> None:
# Save the current loop info in the mid of epoch
# if you lightning <= 1.6.0 uncomment the line below
# checkpoint['loops'] = self.trainer.checkpoint_connector._get_loops_state_dict()
if self.trainer.global_rank == 0 and self.trainer.global_step % self.hparams.every_n_train_steps == 0:
self.model.save_pretrained(os.path.join(
self.trainer.checkpoint_callback.dirpath,
'hf_pretrained_epoch{}_step{}'.format(self.trainer.current_epoch, self.trainer.global_step)))
def on_load_checkpoint(self, checkpoint) -> None:
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
def get_time_str():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def main():
total_parser = argparse.ArgumentParser("Pretrain Unsupervise.")
total_parser.add_argument(
'--do_eval_only', action='store_true', default=False)
total_parser.add_argument(
'--pretrained_model_path', default=None, type=str)
total_parser.add_argument(
'--new_vocab_path', default=None, type=str)
total_parser.add_argument('--max_seq_length', default=1024, type=int)
total_parser.add_argument('--ckpt_path', default=None, type=str)
sys.path.append('../../../')
from fengshen.data.t5_dataloader.t5_datasets import UnsuperviseT5DataModel
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
# * Args for data preprocessing
total_parser = UnsuperviseT5DataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = MT5PretrainModel.add_model_specific_args(total_parser)
# * Args for base model
args = total_parser.parse_args()
print('Argument parse success.')
print('UnsuperviseT5DataModel load start {}'.format(get_time_str()))
data_model = UnsuperviseT5DataModel(args)
print('UnsuperviseT5DataModel load end {}'.format(get_time_str()))
if not args.do_eval_only:
model = MT5PretrainModel(args)
checkpoint_callback = UniversalCheckpoint(args)
lr_monitor = LearningRateMonitor(logging_interval='step')
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'logs/'))
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[checkpoint_callback, lr_monitor]
)
trainer.fit(model, data_model, ckpt_path=args.ckpt_path)
else:
tokenizer = MT5Tokenizer.from_pretrained(args.new_vocab_path, extra_ids=0)
model = MT5PretrainModel(args=args, num_data=len(data_model.predict_dataloader()))
trainer = Trainer.from_argparse_args(args)
result = trainer.predict(model, data_model)
result = result[0]
for i in range(4):
print(tokenizer.batch_decode(result['input_ids'][i]))
print(tokenizer.batch_decode(result['predict_ids'][i]))
print(tokenizer.batch_decode(result['labels'][i]))
if __name__ == '__main__':
main()
| 8,139 | 45.25 | 110 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/pretrain_t5/convert_ckpt_to_bin.py | import time
from builtins import print
import argparse
import torch
# os.environ["CUDA_VISIBLE_DEVICES"] = '3'
def get_time_str():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def main():
total_parser = argparse.ArgumentParser("Pretrain Unsupervise.")
total_parser.add_argument('--ckpt_path', default=None, type=str)
total_parser.add_argument('--bin_path', default=None, type=str)
total_parser.add_argument('--rm_prefix', default=None, type=str)
# * Args for base model
args = total_parser.parse_args()
print('Argument parse success.')
state_dict = torch.load(args.ckpt_path)['module']
new_state_dict = {}
if args.rm_prefix is not None:
prefix_len = len(args.rm_prefix)
for k, v in state_dict.items():
if k[:prefix_len] == args.rm_prefix:
new_state_dict[k[prefix_len:]] = v
else:
new_state_dict[k] = v
else:
new_state_dict = state_dict
torch.save(new_state_dict, args.bin_path)
if __name__ == '__main__':
main()
| 1,071 | 27.210526 | 68 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/pretrain_t5/finetune_t5.py | import time
from builtins import print
import sys
import os
import torch
import argparse
import pytorch_lightning as pl
from pytorch_lightning import Trainer, loggers
from transformers import MT5ForConditionalGeneration
from pytorch_lightning.callbacks import LearningRateMonitor
# os.environ["CUDA_VISIBLE_DEVICES"] = '3'
class MT5FinetuneModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--keep_tokens_path', default=None, type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path
)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
def training_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'])
acc = self.comput_metrix(output.logits, batch['labels'])
self.log('train_loss', output.loss, sync_dist=True)
self.log('train_acc', acc, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx):
# print('is out of index: ', batch['input_ids'][batch['input_ids'] >= 32598])
output = self.model(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'])
acc = self.comput_metrix(output.logits, batch['labels'])
cond_output = self.model.generate(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
force_words_ids=batch['force_words_ids'],
num_beams=2,
)
cond_acc = self.comput_metrix(cond_output, batch['labels'])
self.log('val_loss', output.loss, sync_dist=True)
self.log('val_acc', acc, sync_dist=True)
self.log('cond_acc', cond_acc, sync_dist=True)
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/y_true.shape[0]
return acc
def on_save_checkpoint(self, checkpoint) -> None:
# Save the current loop info in the mid of epoch
# if you lightning <= 1.6.0 uncomment the line below
# checkpoint['loops'] = self.trainer.checkpoint_connector._get_loops_state_dict()
if self.trainer.global_rank == 0 and self.trainer.global_step % self.hparams.every_n_train_steps == 0:
self.model.save_pretrained(os.path.join(
self.trainer.checkpoint_callback.dirpath,
'hf_pretrained_epoch{}_step{}'.format(self.trainer.current_epoch, self.trainer.global_step)))
def on_load_checkpoint(self, checkpoint) -> None:
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
def get_time_str():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def main():
total_parser = argparse.ArgumentParser("Pretrain Unsupervise.")
total_parser.add_argument(
'--do_eval_only', action='store_true', default=False)
total_parser.add_argument(
'--pretrained_model_path', default=None, type=str)
total_parser.add_argument(
'--new_vocab_path', default=None, type=str)
total_parser.add_argument('--max_seq_length', default=1024, type=int)
total_parser.add_argument('--ckpt_path', default=None, type=str)
sys.path.append('../../../')
from fengshen.data.t5_dataloader.t5_datasets import TaskT5DataModel
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
# * Args for data preprocessing
total_parser = TaskT5DataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = MT5FinetuneModel.add_model_specific_args(total_parser)
# * Args for base model
args = total_parser.parse_args()
print('Argument parse success.')
print('TaskT5DataModel load start {}'.format(get_time_str()))
data_model = TaskT5DataModel(args)
print('TaskT5DataModel load end {}'.format(get_time_str()))
if not args.do_eval_only:
model = MT5FinetuneModel(args)
checkpoint_callback = UniversalCheckpoint(args)
lr_monitor = LearningRateMonitor(logging_interval='step')
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'logs/'))
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[checkpoint_callback, lr_monitor]
)
trainer.fit(model, data_model, ckpt_path=args.ckpt_path)
if __name__ == '__main__':
main()
| 6,184 | 41.655172 | 110 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/stable_diffusion_dreambooth/train.py | # -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : train.py
@Time : 2022/11/09 22:27
@Author : Gan Ruyi
@Version : 1.0
@Contact : ganruyi@idea.edu.cn
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
import hashlib
import itertools
import os
from pathlib import Path
from tqdm.auto import tqdm
import torch
import argparse
from pytorch_lightning import (
LightningModule,
Trainer,
)
from pytorch_lightning.callbacks import (
LearningRateMonitor,
)
from transformers import BertTokenizer, BertModel, CLIPTokenizer, CLIPTextModel
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from torch.nn import functional as F
from fengshen.data.dreambooth_datasets.dreambooth_datasets import PromptDataset, DreamBoothDataset
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.models.model_utils import (
add_module_args,
configure_optimizers,
get_total_steps,
)
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.data.dreambooth_datasets.dreambooth_datasets import add_data_args
class StableDiffusionDreamBooth(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('Taiyi Stable Diffusion Module')
parser.add_argument('--train_text_encoder', action='store_true', default=False)
# dreambooth train unet only default
parser.add_argument('--train_unet', action='store_true', default=True)
return parent_parser
def __init__(self, args):
super().__init__()
if 'Taiyi-Stable-Diffusion-1B-Chinese-v0.1' in args.model_path:
self.tokenizer = BertTokenizer.from_pretrained(
args.model_path, subfolder="tokenizer")
self.text_encoder = BertModel.from_pretrained(
args.model_path, subfolder="text_encoder") # load from taiyi_finetune-v0
else:
self.tokenizer = CLIPTokenizer.from_pretrained(
args.model_path, subfolder="tokenizer")
self.text_encoder = CLIPTextModel.from_pretrained(
args.model_path, subfolder="text_encoder")
self.vae = AutoencoderKL.from_pretrained(
args.model_path, subfolder="vae")
self.unet = UNet2DConditionModel.from_pretrained(
args.model_path, subfolder="unet")
self.noise_scheduler = DDPMScheduler.from_config(
args.model_path, subfolder="scheduler")
# set model
self.vae.requires_grad_(False)
if not args.train_text_encoder:
self.requires_grad_(False)
if not args.train_unet:
self.requires_grad_(False)
self.save_hyperparameters(args)
def generate_extra_data(self):
global_rank = self.global_rank
device = self.trainer.device_ids[global_rank]
print('generate on device {} of global_rank {}'.format(device, global_rank))
class_images_dir = Path(self.hparams.class_data_dir)
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if cur_class_images < self.hparams.num_class_images:
pipeline = StableDiffusionPipeline.from_pretrained(
self.hparams.model_path,
safety_checker=None,
)
pipeline.set_progress_bar_config(disable=True)
num_new_images = self.hparams.num_class_images - cur_class_images
print(f"Number of class images to sample: {num_new_images}.")
sample_dataset = PromptDataset(self.hparams.class_prompt, num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=self.hparams.sample_batch_size)
pipeline.to(device)
for example in tqdm(
sample_dataloader, desc="Generating class images", disable=global_rank != 0
):
images = pipeline(example["prompt"]).images
for i, image in enumerate(images):
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
image.save(image_filename)
del pipeline
# if torch.cuda.is_available():
# torch.cuda.empty_cache()
def setup(self, stage) -> None:
if self.hparams.with_prior_preservation:
self.generate_extra_data()
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
model_params = []
if self.hparams.train_unet and self.hparams.train_text_encoder:
model_params = itertools.chain(self.unet.parameters(), self.text_encoder.parameters())
elif self.hparams.train_unet:
model_params = self.unet.parameters()
elif self.hparams.train_text_encoder:
model_params = self.text_encoder.parameters()
return configure_optimizers(self, model_params=model_params)
def training_step(self, batch, batch_idx):
if self.hparams.train_text_encoder:
self.text_encoder.train()
if self.hparams.train_unet:
self.unet.train()
latents = self.vae.encode(batch["pixel_values"]).latent_dist.sample()
latents = latents * 0.18215
# Sample noise that we'll add to the latents
noise = torch.randn(latents.shape).to(latents.device)
noise = noise.to(dtype=self.unet.dtype)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0, self.noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = self.noise_scheduler.add_noise(latents, noise, timesteps)
noisy_latents = noisy_latents.to(dtype=self.unet.dtype)
# Get the text embedding for conditioning
# with torch.no_grad():
encoder_hidden_states = self.text_encoder(batch["input_ids"])[0]
# Predict the noise residual
noise_pred = self.unet(noisy_latents, timesteps, encoder_hidden_states).sample
if self.hparams.with_prior_preservation:
# Chunk the noise and noise_pred into two parts and compute the loss on each part separately.
noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0)
noise, noise_prior = torch.chunk(noise, 2, dim=0)
# Compute instance loss
loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
# Compute prior loss
prior_loss = F.mse_loss(noise_pred_prior, noise_prior, reduction="mean")
# Add the prior loss to the instance loss.
loss = loss + args.prior_loss_weight * prior_loss
else:
loss = F.mse_loss(noise_pred, noise, reduction="mean")
self.log("train_loss", loss.item(), on_epoch=False, prog_bar=True, logger=True)
if self.trainer.global_rank == 0:
if (self.global_step+1) % 5000 == 0:
print('saving model...')
pipeline = StableDiffusionPipeline.from_pretrained(
args.model_path, unet=self.unet, text_encoder=self.text_encoder, tokenizer=self.tokenizer,
)
pipeline.save_pretrained(os.path.join(
args.default_root_dir, f'hf_out_{self.trainer.current_epoch}'))
return {"loss": loss}
def on_train_end(self) -> None:
if self.trainer.global_rank == 0:
print('saving model...')
pipeline = StableDiffusionPipeline.from_pretrained(
args.model_path, unet=self.unet, text_encoder=self.text_encoder, tokenizer=self.tokenizer,
)
pipeline.save_pretrained(os.path.join(
args.default_root_dir, f'hf_out_{self.trainer.current_epoch}'))
def on_load_checkpoint(self, checkpoint) -> None:
# 兼容低版本lightning,低版本lightning从ckpt起来时steps数会被重置为0
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = add_data_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = StableDiffusionDreamBooth.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
model = StableDiffusionDreamBooth(args)
tokenizer = model.tokenizer
datasets = DreamBoothDataset(
instance_data_dir=args.instance_data_dir,
instance_prompt=args.instance_prompt,
tokenizer=tokenizer,
class_data_dir=args.class_data_dir,
class_prompt=args.class_prompt,
size=512,
center_crop=args.center_crop,
)
# construct the datasets to a dict for universal_datamodule
datasets = {'train': datasets}
def collate_fn(examples):
# print(examples)
input_ids = [example["instance_prompt_ids"] for example in examples]
pixel_values = [example["instance_images"] for example in examples]
# Concat class and instance examples for prior preservation.
# We do this to avoid doing two forward passes.
if args.with_prior_preservation:
input_ids += [example["class_prompt_ids"] for example in examples]
pixel_values += [example["class_images"] for example in examples]
pixel_values = torch.stack(pixel_values)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
input_ids = tokenizer.pad(
{"input_ids": input_ids},
padding="max_length",
max_length=tokenizer.model_max_length,
return_tensors="pt",
).input_ids
batch = {
"input_ids": input_ids,
"pixel_values": pixel_values,
}
return batch
datamodule = UniversalDataModule(
tokenizer=tokenizer, collate_fn=collate_fn, args=args, datasets=datasets)
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callback = UniversalCheckpoint(args)
trainer = Trainer.from_argparse_args(args,
callbacks=[
lr_monitor,
checkpoint_callback])
trainer.fit(model, datamodule, ckpt_path=args.load_ckpt_path)
| 11,678 | 41.162455 | 118 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/zen2_finetune/fengshen_token_level_ft_task.py | # coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fengshen.models.zen2.modeling import ZenForTokenClassification
from fengshen.metric.metric import SeqEntityScore
from fengshen.models.zen2.tokenization import BertTokenizer
from fengshen.models.zen2.ngram_utils import ZenNgramDict
from pytorch_lightning.callbacks import LearningRateMonitor
from dataclasses import dataclass
import logging
import math
import numpy as np
import os
import json
import torch
import pytorch_lightning as pl
import argparse
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.ERROR)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, ngram_ids, ngram_positions, ngram_lengths,
ngram_tuples, ngram_seg_ids, ngram_masks, valid_ids=None, label_mask=None, b_use_valid_filter=False):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.valid_ids = valid_ids
self.label_mask = label_mask
self.ngram_ids = ngram_ids
self.ngram_positions = ngram_positions
self.ngram_lengths = ngram_lengths
self.ngram_tuples = ngram_tuples
self.ngram_seg_ids = ngram_seg_ids
self.ngram_masks = ngram_masks
self.b_use_valid_filter = b_use_valid_filter
def convert_examples_to_features(examples, label_map, max_seq_length, tokenizer, ngram_dict):
"""Loads a data file into a list of `InputBatch`s."""
# label_map = {label: i for i, label in enumerate(label_list, 1)}
# label_map["[PAD]"] = 0
features = []
b_use_valid_filter = False
for (ex_index, example) in enumerate(examples):
textlist = example.text_a
labellist = example.label
tokens = []
labels = []
valid = []
label_mask = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
if len(tokens) + len(token) > max_seq_length - 2:
break
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
valid.append(1)
label_mask.append(1)
else:
valid.append(0)
b_use_valid_filter = True
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
valid.insert(0, 1)
label_mask.insert(0, 1)
label_ids.append(label_map["[CLS]"])
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if len(labels) > i:
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]")
segment_ids.append(0)
valid.append(1)
label_mask.append(1)
label_ids.append(label_map["[SEP]"])
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
label_mask = [1] * len(label_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
valid.append(1)
label_mask.append(0)
while len(label_ids) < max_seq_length:
label_ids.append(0)
label_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(valid) == max_seq_length
assert len(label_mask) == max_seq_length
# ----------- code for ngram BEGIN-----------
ngram_matches = []
# Filter the ngram segment from 2 to 7 to check whether there is a ngram
max_gram_n = ngram_dict.max_ngram_len
for p in range(2, max_gram_n):
for q in range(0, len(tokens) - p + 1):
character_segment = tokens[q:q + p]
# j is the starting position of the ngram
# i is the length of the current ngram
character_segment = tuple(character_segment)
if character_segment in ngram_dict.ngram_to_id_dict:
ngram_index = ngram_dict.ngram_to_id_dict[character_segment]
ngram_freq = ngram_dict.ngram_to_freq_dict[character_segment]
ngram_matches.append([ngram_index, q, p, character_segment, ngram_freq])
ngram_matches = sorted(ngram_matches, key=lambda s: s[0])
max_ngram_in_seq_proportion = math.ceil((len(tokens) / max_seq_length) * ngram_dict.max_ngram_in_seq)
if len(ngram_matches) > max_ngram_in_seq_proportion:
ngram_matches = ngram_matches[:max_ngram_in_seq_proportion]
ngram_ids = [ngram[0] for ngram in ngram_matches]
ngram_positions = [ngram[1] for ngram in ngram_matches]
ngram_lengths = [ngram[2] for ngram in ngram_matches]
ngram_tuples = [ngram[3] for ngram in ngram_matches]
ngram_freqs = [ngram[4] for ngram in ngram_matches]
ngram_seg_ids = [0 if position < (len(tokens) + 2) else 1 for position in ngram_positions]
ngram_mask_array = np.zeros(ngram_dict.max_ngram_in_seq, dtype=np.bool)
ngram_mask_array[:len(ngram_ids)] = 1
# record the masked positions
ngram_positions_matrix = np.zeros(shape=(max_seq_length, ngram_dict.max_ngram_in_seq), dtype=np.int32)
for i in range(len(ngram_ids)):
ngram_positions_matrix[ngram_positions[i]:ngram_positions[i] + ngram_lengths[i], i] = ngram_freqs[i]
ngram_positions_matrix = torch.from_numpy(ngram_positions_matrix.astype(np.float))
ngram_positions_matrix = torch.div(ngram_positions_matrix, torch.stack(
[torch.sum(ngram_positions_matrix, 1)] * ngram_positions_matrix.size(1)).t() + 1e-10)
ngram_positions_matrix = ngram_positions_matrix.numpy()
# Zero-pad up to the max ngram in seq length.
padding = [0] * (ngram_dict.max_ngram_in_seq - len(ngram_ids))
ngram_ids += padding
ngram_lengths += padding
ngram_seg_ids += padding
# ----------- code for ngram END-----------
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %s)" % (",".join([str(x) for x in example.label]), ",".join([str(x) for x in label_ids])))
logger.info("valid: %s" % " ".join([str(x) for x in valid]))
logger.info("b_use_valid_filter: %s" % str(b_use_valid_filter))
logger.info("ngram_ids: %s" % " ".join([str(x) for x in ngram_ids]))
logger.info("ngram_positions: %s" % " ".join([str(x) for x in ngram_positions]))
logger.info("ngram_lengths: %s" % " ".join([str(x) for x in ngram_lengths]))
logger.info("ngram_tuples: %s" % " ".join([str(x) for x in ngram_tuples]))
logger.info("ngram_seg_ids: %s" % " ".join([str(x) for x in ngram_seg_ids]))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
ngram_ids=ngram_ids,
ngram_positions=ngram_positions_matrix,
ngram_lengths=ngram_lengths,
ngram_tuples=ngram_tuples,
ngram_seg_ids=ngram_seg_ids,
ngram_masks=ngram_mask_array,
valid_ids=valid,
label_mask=label_mask,
b_use_valid_filter=b_use_valid_filter))
return features
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_examples(self, data_path, set_type, quotechar=' '):
"""See base class."""
return self._create_examples(
self._read_tsv(data_path, self.get_quotechar()), set_type)
def _create_examples(self, lines, set_type):
examples = []
for i, (sentence, label) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = sentence
label = label
examples.append(InputExample(guid=guid, text_a=text_a, label=label))
return examples
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def get_quotechar(self):
return ' '
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
'''
read file
return format :
[ ['EU', 'B-ORG'], ['rejects', 'O'], ['German', 'B-MISC'], ['call', 'O'], ['to', 'O'], ['boycott', 'O'], ['British', 'B-MISC'], ['lamb', 'O'], ['.', 'O'] ]
'''
f = open(input_file)
data = []
sentence = []
label = []
for line in f:
if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == "\n":
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
continue
splits = line.split(quotechar)
sentence.append(splits[0])
label.append(splits[-1][:-1])
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
return data
class MSRAProcessor(DataProcessor):
"""Processor for the msra data set."""
def get_labels(self):
return ['B-NR', 'B-NS', 'B-NT', 'E-NR', 'E-NS', 'E-NT', 'M-NR',
'M-NS', 'M-NT', 'O', 'S-NR', 'S-NS', 'S-NT', '[CLS]', '[SEP]']
class OntoNotes4Processor(DataProcessor):
"""Processor for the OntoNotes4 data set."""
def get_labels(self):
return ['B-GPE', 'B-LOC', 'B-ORG', 'B-PER', 'E-GPE', 'E-LOC',
'E-ORG', 'E-PER', 'M-GPE', 'M-LOC', 'M-ORG', 'M-PER', 'O',
'S-GPE', 'S-LOC', 'S-ORG', 'S-PER', '[CLS]', '[SEP]']
class WeiboProcessor(DataProcessor):
"""Processor for the Weibo data set."""
def get_labels(self):
return ['B-GPE.NAM', 'B-GPE.NOM', 'B-LOC.NAM', 'B-LOC.NOM',
'B-ORG.NAM', 'B-ORG.NOM', 'B-PER.NAM', 'B-PER.NOM', 'E-GPE.NAM',
'E-GPE.NOM', 'E-LOC.NAM', 'E-LOC.NOM', 'E-ORG.NAM', 'E-ORG.NOM',
'E-PER.NAM', 'E-PER.NOM', 'M-GPE.NAM', 'M-LOC.NAM', 'M-LOC.NOM',
'M-ORG.NAM', 'M-ORG.NOM', 'M-PER.NAM', 'M-PER.NOM', 'O',
'S-GPE.NAM', 'S-LOC.NOM', 'S-PER.NAM', 'S-PER.NOM', '[CLS]', '[SEP]']
class ResumeProcessor(DataProcessor):
"""Processor for the resume data set."""
def get_labels(self):
return ['B-CONT', 'B-EDU', 'B-LOC', 'B-NAME', 'B-ORG', 'B-PRO',
'B-RACE', 'B-TITLE', 'E-CONT', 'E-EDU', 'E-LOC', 'E-NAME',
'E-ORG', 'E-PRO', 'E-RACE', 'E-TITLE', 'M-CONT', 'M-EDU',
'M-LOC', 'M-NAME', 'M-ORG', 'M-PRO', 'M-RACE', 'M-TITLE',
'O', 'S-NAME', 'S-ORG', 'S-RACE', '[CLS]', '[SEP]']
class CMeEEProcessor(DataProcessor):
"""Processor for the CMeEE data set."""
def get_quotechar(self):
return '\t'
def get_labels(self):
return ['B-临床表现', 'B-医学检验项目', 'B-医疗程序', 'B-医疗设备',
'B-微生物类', 'B-疾病', 'B-科室', 'B-药物', 'B-身体', 'I-临床表现',
'I-医学检验项目', 'I-医疗程序', 'I-医疗设备', 'I-微生物类',
'I-疾病', 'I-科室', 'I-药物', 'I-身体', 'O', '[CLS]', '[SEP]']
class CLUENERProcessor(DataProcessor):
"""Processor for the CLUENER data set."""
def get_quotechar(self):
return '\t'
def get_labels(self):
return ['B-书名', 'B-公司', 'B-地址', 'B-姓名', 'B-政府', 'B-景点',
'B-游戏', 'B-电影', 'B-组织机构', 'B-职位', 'I-书名', 'I-公司',
'I-地址', 'I-姓名', 'I-政府', 'I-景点', 'I-游戏', 'I-电影',
'I-组织机构', 'I-职位', 'O', '[CLS]', '[SEP]']
class TaskDataset(Dataset):
def __init__(self, data_path, processor, mode='train'):
super().__init__()
self.data = self.load_data(data_path, processor, mode)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path, processor, mode):
if mode == "train":
examples = processor.get_examples(data_path, mode)
elif mode == "test":
examples = processor.get_examples(data_path, mode)
elif mode == "dev":
examples = processor.get_examples(data_path, mode)
return examples
@dataclass
class TaskCollator:
args = None
tokenizer = None
ngram_dict = None
label2id = None
def __call__(self, samples):
features = convert_examples_to_features(samples, self.label2id, self.args.max_seq_length, self.tokenizer, self.ngram_dict)
# logger.info(" Num examples = %d", len(samples))
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
valid_ids = torch.tensor([f.valid_ids for f in features], dtype=torch.long)
ngram_ids = torch.tensor([f.ngram_ids for f in features], dtype=torch.long)
ngram_positions = torch.tensor([f.ngram_positions for f in features], dtype=torch.long)
# ngram_lengths = torch.tensor([f.ngram_lengths for f in features], dtype=torch.long)
# ngram_seg_ids = torch.tensor([f.ngram_seg_ids for f in features], dtype=torch.long)
# ngram_masks = torch.tensor([f.ngram_masks for f in features], dtype=torch.long)
# label_mask = torch.tensor([f.label_mask for f in features], dtype=torch.long)
b_use_valid_filter = torch.tensor([f.b_use_valid_filter for f in features], dtype=torch.bool)
# 取第一个出来?
# b_use_valid_filter = b_use_valid_filter.detach().cpu().numpy()[0]
b_use_valid_filter = b_use_valid_filter[0]
return {
'input_ids': input_ids,
'input_ngram_ids': ngram_ids,
'ngram_position_matrix': ngram_positions,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'labels': label_ids,
'valid_ids': valid_ids,
'b_use_valid_filter': b_use_valid_filter,
}
class TaskDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--data_dir', default='./data', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.json', type=str)
parser.add_argument('--valid_data', default='dev.json', type=str)
parser.add_argument('--test_data', default='test.json', type=str)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--valid_batchsize', default=32, type=int)
parser.add_argument('--max_seq_length', default=128, type=int)
parser.add_argument('--texta_name', default='text', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--id_name', default='id', type=str)
parser.add_argument('--dataset_name', default=None, type=str)
parser.add_argument('--vocab_file',
type=str, default=None,
help="Vocabulary mapping/file BERT was pretrainined on")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--task_name', default='weibo', type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
self.collator = TaskCollator()
self.collator.args = args
self.collator.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path, do_lower_case=args.do_lower_case)
self.collator.ngram_dict = ZenNgramDict.from_pretrained(args.pretrained_model_path, tokenizer=self.collator.tokenizer)
processors = {
'weibo': WeiboProcessor,
'resume': ResumeProcessor,
'msra': MSRAProcessor,
'ontonotes4': OntoNotes4Processor,
'cmeee': CMeEEProcessor,
'cluener': CLUENERProcessor,
}
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
# 生成id映射
label_list = processor.get_labels()
label2id = {label: i for i, label in enumerate(label_list, 1)}
label2id["[PAD]"] = 0
self.id2label = {v: k for k, v in label2id.items()}
self.collator.label2id = label2id
if args.dataset_name is None:
self.train_data = TaskDataset(os.path.join(
args.data_dir, args.train_data), processor, mode='train')
self.valid_data = TaskDataset(os.path.join(
args.data_dir, args.valid_data), processor, mode='dev')
self.test_data = TaskDataset(os.path.join(
args.data_dir, args.test_data), processor, mode='test')
else:
import datasets
ds = datasets.load_dataset(args.dataset_name)
self.train_data = ds['train']
self.valid_data = ds['validation']
self.test_data = ds['test']
self.save_hyperparameters(args)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False,
collate_fn=self.collator)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
class LitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--markup', default='bios', type=str)
parser.add_argument('--middle_prefix', default='I-', type=str)
return parent_args
def __init__(self, args, id2label):
super().__init__()
# config = ZenConfig(os.path.join(args.pretrained_model_path, 'config.json'))
self.model = ZenForTokenClassification.from_pretrained(args.pretrained_model_path, num_labels=len(id2label))
self.seq_entity_score = SeqEntityScore(id2label, markup=args.markup, middle_prefix=args.middle_prefix)
self.train_seq_entity_score = SeqEntityScore(id2label, markup=args.markup, middle_prefix=args.middle_prefix)
self.id2label = id2label
self.label2id = {v: k for k, v in id2label.items()}
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
# logits = outputs.logits
# preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
# preds = preds.detach().cpu().numpy()
# labels = batch['labels'].detach().cpu().numpy()
# num_labels = len(self.label2id)
# y_true = []
# y_pred = []
# for i, label in enumerate(labels):
# temp_1 = []
# temp_2 = []
# for j, m in enumerate(label):
# if j == 0:
# continue
# elif labels[i][j] == num_labels - 1:
# y_true.append(temp_1)
# y_pred.append(temp_2)
# break
# else:
# temp_1.append(self.id2label[labels[i][j]])
# temp_2.append(self.id2label[preds[i][j]])
# self.train_seq_entity_score.update(y_true, y_pred)
# result = self.train_seq_entity_score.result()
# self.train_seq_entity_score.reset()
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
logits = outputs.logits
preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
preds = preds.detach().cpu().numpy()
labels = batch['labels'].detach().cpu().numpy()
num_labels = len(self.label2id)
y_true = []
y_pred = []
for i, label in enumerate(labels):
temp_1 = []
temp_2 = []
for j, m in enumerate(label):
if j == 0:
continue
elif labels[i][j] == num_labels - 1:
y_true.append(temp_1)
y_pred.append(temp_2)
break
else:
temp_1.append(self.id2label[labels[i][j]])
temp_2.append(self.id2label[preds[i][j]])
self.seq_entity_score.update(y_true, y_pred)
self.log('val_loss', loss)
def validation_epoch_end(self, outputs):
# compute metric for all process
score_dict, _ = self.seq_entity_score.result()
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
print('score_dict:\n', score_dict)
# reset the metric after once validation
self.seq_entity_score.reset()
for k, v in score_dict.items():
self.log('val_{}'.format(k), v)
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename)
def save_test(data, args, data_model):
with open(args.output_save_path, 'w', encoding='utf-8') as f:
idx = 0
for i in range(len(data)):
batch = data[i]
for sample in batch:
tmp_result = dict()
label_id = np.argmax(sample.numpy())
tmp_result['id'] = data_model.test_data.data[idx]['id']
tmp_result['label'] = data_model.id2label[label_id]
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data+'\n')
idx += 1
print('save the result to '+args.output_save_path)
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = TaskDataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = pl.Trainer.add_argparse_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
# * Args for base model
from fengshen.models.model_utils import add_module_args
total_parser = add_module_args(total_parser)
total_parser = LitModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
checkpoint_callback = TaskModelCheckpoint(args).callbacks
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(args,
callbacks=[checkpoint_callback, lr_monitor]
)
data_model = TaskDataModel(args)
id2label = data_model.id2label
print('id2label:', id2label)
model = LitModel(args, id2label)
trainer.fit(model, data_model)
if __name__ == "__main__":
main()
| 28,463 | 40.920471 | 163 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/zen2_finetune/fengshen_sequence_level_ft_task.py | # coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fengshen.models.zen2.modeling import ZenForSequenceClassification
from fengshen.models.zen2.ngram_utils import ZenNgramDict
from fengshen.models.zen2.tokenization import BertTokenizer
from pytorch_lightning.callbacks import LearningRateMonitor
import csv
from dataclasses import dataclass
import logging
import math
import numpy as np
import os
from tqdm import tqdm
import json
import torch
import pytorch_lightning as pl
import argparse
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import Dataset, DataLoader
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, qid=0):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.qid = qid
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id,
ngram_ids, ngram_starts, ngram_lengths, ngram_tuples, ngram_seg_ids, ngram_masks, ngram_freqs,
qid=-1):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.qid = qid
self.ngram_ids = ngram_ids
self.ngram_starts = ngram_starts
self.ngram_lengths = ngram_lengths
self.ngram_tuples = ngram_tuples
self.ngram_seg_ids = ngram_seg_ids
self.ngram_masks = ngram_masks
self.ngram_freqs = ngram_freqs
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_examples(self, data_path, mode):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
# if sys.version_info[0] == 2:
# line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
@classmethod
def _read_json(cls, input_file):
"""Reads a jsonl file."""
with open(input_file, "r", encoding="utf-8") as f:
lines = f.readlines()
samples = []
for line in tqdm(lines):
data = json.loads(line)
samples.append(data)
return samples
class TnewsProcessor(DataProcessor):
"""Processor for the tnews data set (HIT version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence']
label = line['label'] if 'label' in line.keys() else None
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class OcnliProcessor(DataProcessor):
"""Processor for the ocnli or cmnli data set (HIT version)."""
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence1']
text_b = line['sentence2']
label = line['label'] if 'label' in line.keys() else None
# 特殊处理,cmnli有label为-的
if label == '-':
label = None
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class IflytekProcessor(DataProcessor):
"""Processor for the iflytek data set (HIT version)."""
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence']
label = line['label'] if 'label' in line.keys() else None
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
def convert_examples_to_features(examples, label_map, max_seq_length, tokenizer, ngram_dict):
"""Loads a data file into a list of `InputBatch`s."""
# label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# ----------- code for ngram BEGIN-----------
ngram_matches = []
# Filter the word segment from 2 to max_ngram_len to check whether there is a word
max_gram_n = ngram_dict.max_ngram_len
for p in range(2, max_gram_n):
for q in range(0, len(tokens) - p + 1):
character_segment = tokens[q:q + p]
# j is the starting position of the word
# i is the length of the current word
character_segment = tuple(character_segment)
if character_segment in ngram_dict.ngram_to_id_dict:
ngram_index = ngram_dict.ngram_to_id_dict[character_segment]
ngram_freq = ngram_dict.ngram_to_freq_dict[character_segment]
ngram_matches.append([ngram_index, q, p, character_segment, ngram_freq])
# shuffle(ngram_matches)
ngram_matches = sorted(ngram_matches, key=lambda s: s[0])
# max_word_in_seq_proportion = max_word_in_seq
max_word_in_seq_proportion = math.ceil((len(tokens) / max_seq_length) * ngram_dict.max_ngram_in_seq)
if len(ngram_matches) > max_word_in_seq_proportion:
ngram_matches = ngram_matches[:max_word_in_seq_proportion]
ngram_ids = [ngram[0] for ngram in ngram_matches]
ngram_positions = [ngram[1] for ngram in ngram_matches]
ngram_lengths = [ngram[2] for ngram in ngram_matches]
ngram_tuples = [ngram[3] for ngram in ngram_matches]
ngram_freqs = [ngram[4] for ngram in ngram_matches]
ngram_seg_ids = [0 if position < len([id for id in segment_ids if id == 0]) else 1 for position in
ngram_positions]
ngram_mask_array = np.zeros(ngram_dict.max_ngram_in_seq, dtype=np.bool)
ngram_mask_array[:len(ngram_ids)] = 1
# Zero-pad up to the max word in seq length.
padding = [0] * (ngram_dict.max_ngram_in_seq - len(ngram_ids))
ngram_ids += padding
ngram_positions += padding
ngram_lengths += padding
ngram_seg_ids += padding
ngram_freqs += padding
# ----------- code for ngram END-----------
label_id = label_map[example.label] if example.label is not None else 0
# if ex_index < 5:
# logger.info("*** Example ***")
# logger.info("guid: %s" % (example.guid))
# logger.info("tokens: %s" % " ".join(
# [str(x) for x in tokens]))
# logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
# logger.info(
# "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# logger.info("label: %s (id = %d)" % (example.label, label_id))
# logger.info("ngram_ids: %s" % " ".join([str(x) for x in ngram_ids]))
# logger.info("ngram_positions: %s" % " ".join([str(x) for x in ngram_positions]))
# logger.info("ngram_lengths: %s" % " ".join([str(x) for x in ngram_lengths]))
# logger.info("ngram_tuples: %s" % " ".join([str(x) for x in ngram_tuples]))
# logger.info("ngram_seg_ids: %s" % " ".join([str(x) for x in ngram_seg_ids]))
# logger.info("ngram_freqs: %s" % " ".join([str(x) for x in ngram_freqs]))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
ngram_ids=ngram_ids,
ngram_starts=ngram_positions,
ngram_lengths=ngram_lengths,
ngram_tuples=ngram_tuples,
ngram_seg_ids=ngram_seg_ids,
ngram_masks=ngram_mask_array,
ngram_freqs=ngram_freqs,
qid=example.qid))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class TaskDataset(Dataset):
def __init__(self, data_path, processor, mode='train'):
super().__init__()
self.data = self.load_data(data_path, processor, mode)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path, processor, mode):
if mode == "train":
examples = processor.get_examples(data_path, mode)
elif mode == "test":
examples = processor.get_examples(data_path, mode)
elif mode == "dev":
examples = processor.get_examples(data_path, mode)
return examples
@dataclass
class TaskCollator:
args = None
tokenizer = None
ngram_dict = None
label2id = None
def __call__(self, samples):
features = convert_examples_to_features(samples, self.label2id, self.args.max_seq_length, self.tokenizer, self.ngram_dict)
# logger.info(" Num examples = %d", len(samples))
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
# qids = torch.tensor([f.qid for f in features], dtype=torch.long)
ngram_ids = torch.tensor([f.ngram_ids for f in features], dtype=torch.long)
ngram_starts = torch.tensor([f.ngram_starts for f in features], dtype=torch.long)
ngram_lengths = torch.tensor([f.ngram_lengths for f in features], dtype=torch.long)
# ngram_seg_ids = torch.tensor([f.ngram_seg_ids for f in features], dtype=torch.long)
# ngram_masks = torch.tensor([f.ngram_masks for f in features], dtype=torch.long)
ngram_freqs = torch.tensor([f.ngram_freqs for f in features], dtype=torch.long)
batch_size = len(samples)
ngram_positions_matrix = torch.zeros(
size=(batch_size, self.args.max_seq_length, self.ngram_dict.max_ngram_in_seq),
dtype=torch.int)
for batch_id in range(batch_size):
ngram_id = ngram_ids[batch_id]
ngram_start = ngram_starts[batch_id]
ngram_length = ngram_lengths[batch_id]
for i in range(len(ngram_id)):
ngram_positions_matrix[batch_id][ngram_start[i]:ngram_start[i] + ngram_length[i], i] = ngram_freqs[batch_id][i]
ngram_positions_matrix[batch_id] \
= torch.div(ngram_positions_matrix[batch_id],
torch.stack([torch.sum(ngram_positions_matrix[batch_id], 1)] *
ngram_positions_matrix[batch_id].size(1)).t() + 1e-10)
return {
'input_ids': input_ids,
'input_ngram_ids': ngram_ids,
'ngram_position_matrix': ngram_positions_matrix,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'labels': label_ids
}
# return default_collate(sample_list)
class TaskDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--data_dir', default='./data', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.json', type=str)
parser.add_argument('--valid_data', default='dev.json', type=str)
parser.add_argument('--test_data', default='test.json', type=str)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--valid_batchsize', default=32, type=int)
parser.add_argument('--max_seq_length', default=128, type=int)
parser.add_argument('--texta_name', default='text', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--id_name', default='id', type=str)
parser.add_argument('--dataset_name', default=None, type=str)
parser.add_argument('--vocab_file',
type=str, default=None,
help="Vocabulary mapping/file BERT was pretrainined on")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--task_name', default='tnews', type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
self.collator = TaskCollator()
self.collator.args = args
self.collator.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path, do_lower_case=args.do_lower_case)
self.collator.ngram_dict = ZenNgramDict.from_pretrained(args.pretrained_model_path, tokenizer=self.collator.tokenizer)
processors = {
'afqmc': OcnliProcessor,
'tnews': TnewsProcessor,
'ocnli': OcnliProcessor,
'cmnli': OcnliProcessor,
'iflytek': IflytekProcessor,
}
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
if args.dataset_name is None:
self.label2id, self.id2label = self.load_schema(os.path.join(
args.data_dir, args.train_data), args)
self.train_data = TaskDataset(os.path.join(
args.data_dir, args.train_data), processor, mode='train')
self.valid_data = TaskDataset(os.path.join(
args.data_dir, args.valid_data), processor, mode='dev')
self.test_data = TaskDataset(os.path.join(
args.data_dir, args.test_data), processor, mode='test')
self.collator.label2id = self.label2id
else:
import datasets
ds = datasets.load_dataset(args.dataset_name)
self.train_data = ds['train']
self.valid_data = ds['validation']
self.test_data = ds['test']
self.save_hyperparameters(args)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False,
collate_fn=self.collator)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def load_schema(self, data_path, args):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
label_list = []
for line in tqdm(lines):
data = json.loads(line)
labels = data[args.label_name] if args.label_name in data.keys(
) else 0
if labels not in label_list:
label_list.append(labels)
label2id, id2label = {}, {}
for i, k in enumerate(label_list):
label2id[k] = i
id2label[i] = k
return label2id, id2label
class LitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--num_labels', default=2, type=int)
return parent_args
def __init__(self, args):
super().__init__()
self.model = ZenForSequenceClassification.from_pretrained(args.pretrained_model_path, num_labels=args.num_labels)
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
loss, logits = self.model(**batch)
acc = self.comput_metrix(logits, batch['labels'])
self.log('train_loss', loss)
self.log('train_acc', acc)
return loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
loss, logits = self.model(**batch)
acc = self.comput_metrix(logits, batch['labels'])
self.log('val_loss', loss)
self.log('val_acc', acc)
def predict_step(self, batch, batch_idx):
output = self.model(**batch)
return output.logits
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename)
def save_test(data, args, data_model):
with open(args.output_save_path, 'w', encoding='utf-8') as f:
idx = 0
for i in range(len(data)):
batch = data[i]
for sample in batch:
tmp_result = dict()
label_id = np.argmax(sample.numpy())
tmp_result['id'] = data_model.test_data.data[idx]['id']
tmp_result['label'] = data_model.id2label[label_id]
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data+'\n')
idx += 1
print('save the result to '+args.output_save_path)
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = TaskDataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = pl.Trainer.add_argparse_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
# * Args for base model
from fengshen.models.model_utils import add_module_args
total_parser = add_module_args(total_parser)
total_parser = LitModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
checkpoint_callback = TaskModelCheckpoint(args).callbacks
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(args,
callbacks=[checkpoint_callback, lr_monitor]
)
data_model = TaskDataModel(args)
model = LitModel(args)
trainer.fit(model, data_model)
if __name__ == "__main__":
main()
| 27,189 | 40.830769 | 130 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/classification/finetune_classification.py | # coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from fengshen.models.zen1 import ZenModel
from dataclasses import dataclass
from fengshen.models.megatron_t5 import T5EncoderModel
from fengshen.models.roformer import RoFormerModel
from fengshen.models.longformer import LongformerModel
# from fengshen.models.cocolm.modeling_cocolm import COCOLMForSequenceClassification
import numpy as np
import os
from tqdm import tqdm
import json
import torch
import pytorch_lightning as pl
import argparse
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor
from torch.utils.data import Dataset, DataLoader
from torch.utils.data._utils.collate import default_collate
from transformers import (
BertModel,
BertConfig,
MegatronBertModel,
MegatronBertConfig,
AutoModel,
AutoConfig,
AutoTokenizer,
AutoModelForSequenceClassification,
)
# os.environ["CUDA_VISIBLE_DEVICES"] = '6'
model_dict = {'huggingface-bert': BertModel,
'fengshen-roformer': RoFormerModel,
'huggingface-megatron_bert': MegatronBertModel,
'fengshen-megatron_t5': T5EncoderModel,
'fengshen-longformer': LongformerModel,
# 'fengshen-zen1': ZenModel,
'huggingface-auto': AutoModelForSequenceClassification,
}
class TaskDataset(Dataset):
def __init__(self, data_path, args, label2id):
super().__init__()
self.args = args
self.label2id = label2id
self.max_length = args.max_length
self.data = self.load_data(data_path, args)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path, args):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
samples = []
for line in tqdm(lines):
data = json.loads(line)
text_id = int(data[args.id_name]
) if args.id_name in data.keys() else 0
texta = data[args.texta_name] if args.texta_name in data.keys(
) else ''
textb = data[args.textb_name] if args.textb_name in data.keys(
) else ''
labels = self.label2id[data[args.label_name]
] if args.label_name in data.keys() else 0
samples.append({args.texta_name: texta, args.textb_name: textb,
args.label_name: labels, 'id': text_id})
return samples
@dataclass
class TaskCollator:
args = None
tokenizer = None
def __call__(self, samples):
sample_list = []
for item in samples:
if item[self.args.texta_name] != '' and item[self.args.textb_name] != '':
if self.args.model_type != 'fengshen-roformer':
encode_dict = self.tokenizer.encode_plus(
[item[self.args.texta_name], item[self.args.textb_name]],
max_length=self.args.max_length,
padding='max_length',
truncation='longest_first')
else:
encode_dict = self.tokenizer.encode_plus(
[item[self.args.texta_name] +
self.tokenizer.eos_token+item[self.args.textb_name]],
max_length=self.args.max_length,
padding='max_length',
truncation='longest_first')
else:
encode_dict = self.tokenizer.encode_plus(
item[self.args.texta_name],
max_length=self.args.max_length,
padding='max_length',
truncation='longest_first')
sample = {}
for k, v in encode_dict.items():
sample[k] = torch.tensor(v)
sample['labels'] = torch.tensor(item[self.args.label_name]).long()
sample['id'] = item['id']
sample_list.append(sample)
return default_collate(sample_list)
class TaskDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--data_dir', default='./data', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.json', type=str)
parser.add_argument('--valid_data', default='dev.json', type=str)
parser.add_argument('--test_data', default='test.json', type=str)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--valid_batchsize', default=32, type=int)
parser.add_argument('--max_length', default=128, type=int)
parser.add_argument('--texta_name', default='text', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--id_name', default='id', type=str)
parser.add_argument('--dataset_name', default=None, type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
self.tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_path)
self.collator = TaskCollator()
self.collator.args = args
self.collator.tokenizer = self.tokenizer
if args.dataset_name is None:
self.label2id, self.id2label = self.load_schema(os.path.join(
args.data_dir, args.train_data), args)
self.train_data = TaskDataset(os.path.join(
args.data_dir, args.train_data), args, self.label2id)
self.valid_data = TaskDataset(os.path.join(
args.data_dir, args.valid_data), args, self.label2id)
self.test_data = TaskDataset(os.path.join(
args.data_dir, args.test_data), args, self.label2id)
else:
import datasets
ds = datasets.load_dataset(args.dataset_name)
self.train_data = ds['train']
self.valid_data = ds['validation']
self.test_data = ds['test']
self.save_hyperparameters(args)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False,
collate_fn=self.collator)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def load_schema(self, data_path, args):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
label_list = []
for line in tqdm(lines):
data = json.loads(line)
labels = data[args.label_name] if args.label_name in data.keys(
) else 0
if labels not in label_list:
label_list.append(labels)
label2id, id2label = {}, {}
for i, k in enumerate(label_list):
label2id[k] = i
id2label[i] = k
return label2id, id2label
class taskModel(torch.nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
print('args mode type:', args.model_type)
self.bert_encoder = model_dict[args.model_type].from_pretrained(
args.pretrained_model_path)
self.config = self.bert_encoder.config
self.cls_layer = torch.nn.Linear(
in_features=self.config.hidden_size, out_features=self.args.num_labels)
self.loss_func = torch.nn.CrossEntropyLoss()
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
if self.args.model_type == 'fengshen-megatron_t5':
bert_output = self.bert_encoder(
input_ids=input_ids, attention_mask=attention_mask) # (bsz, seq, dim)
encode = bert_output.last_hidden_state[:, 0, :]
else:
bert_output = self.bert_encoder(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) # (bsz, seq, dim)
encode = bert_output[1]
logits = self.cls_layer(encode)
if labels is not None:
loss = self.loss_func(logits, labels.view(-1,))
return loss, logits
else:
return 0, logits
class LitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--num_labels', default=2, type=int)
return parent_args
def __init__(self, args, num_data):
super().__init__()
self.args = args
self.num_data = num_data
self.model = model_dict[args.model_type].from_pretrained(
args.pretrained_model_path)
self.save_hyperparameters(args)
def setup(self, stage) -> None:
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
del batch['id']
output = self.model(**batch)
loss, logits = output[0], output[1]
acc = self.comput_metrix(logits, batch['labels'])
self.log('train_loss', loss)
self.log('train_acc', acc)
return loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
del batch['id']
output = self.model(**batch)
loss, logits = output[0], output[1]
acc = self.comput_metrix(logits, batch['labels'])
self.log('val_loss', loss)
self.log('val_acc', acc, sync_dist=True)
def predict_step(self, batch, batch_idx):
ids = batch['id']
del batch['id']
output = self.model(**batch)
return {ids, output.logits}
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
every_n_epochs=1,
filename=args.filename)
def save_test(data, args, data_model, rank):
file_name = args.output_save_path + f'.{rank}'
with open(file_name, 'w', encoding='utf-8') as f:
idx = 0
for i in range(len(data)):
ids, batch = data[i]
for id, sample in zip(ids, batch):
tmp_result = dict()
label_id = np.argmax(sample.cpu().numpy())
tmp_result['id'] = id.item()
tmp_result['label'] = data_model.id2label[label_id]
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data+'\n')
idx += 1
print('save the result to '+file_name)
def main():
pl.seed_everything(42)
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
total_parser.add_argument('--model_type',
default='huggingface-bert', type=str)
# * Args for data preprocessing
total_parser = TaskDataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = pl.Trainer.add_argparse_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
# * Args for base model
from fengshen.models.model_utils import add_module_args
total_parser = add_module_args(total_parser)
total_parser = LitModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
print(args.pretrained_model_path)
checkpoint_callback = TaskModelCheckpoint(args).callbacks
early_stop_callback = EarlyStopping(
monitor="val_acc", min_delta=0.00, patience=5, verbose=False, mode="max")
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(args,
callbacks=[
checkpoint_callback,
lr_monitor,
early_stop_callback]
)
data_model = TaskDataModel(args)
model = LitModel(args, len(data_model.train_dataloader()))
trainer.fit(model, data_model)
result = trainer.predict(
model, data_model, ckpt_path=trainer.checkpoint_callback.best_model_path)
save_test(result, args, data_model, trainer.global_rank)
if __name__ == "__main__":
main()
| 15,787 | 39.482051 | 117 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/DAVAE/generate.py | # -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : generate.py
@Time : 2022/11/04 19:17
@Author : Liang Yuxin
@Version : 1.0
@Contact : liangyuxin@idea.edu.cn
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
# here put the import lib
import torch
from fengshen.models.DAVAE.DAVAEModel import DAVAEModel
from transformers import BertTokenizer,T5Tokenizer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder_tokenizer = BertTokenizer.from_pretrained("IDEA-CCNL/Randeng-DAVAE-1.2B-General-Chinese")
decoder_tokenizer = T5Tokenizer.from_pretrained("IDEA-CCNL/Randeng-DAVAE-1.2B-General-Chinese", eos_token = '<|endoftext|>', pad_token = '<pad>',extra_ids=0)
decoder_tokenizer.add_special_tokens({'bos_token':'<bos>'})
vae_model = DAVAEModel.from_pretrained("IDEA-CCNL/Randeng-DAVAE-1.2B-General-Chinese").to(device)
input_texts = [
"针对电力系统中的混沌振荡对整个互联电网的危害问题,提出了一种基于非线性光滑函数的滑模控制方法.",
"超市面积不算大.挺方便附近的居民购买的. 生活用品也比较齐全.价格适用中.",
]
output_texts = vae_model.simulate_batch(encoder_tokenizer,decoder_tokenizer,input_texts)
print(output_texts)
| 1,595 | 42.135135 | 157 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/disco_project/disco.py | import os
import sys
# sys.path.insert(0, f'{PROJECT_DIR}/guided-diffusion') # 加在前面,不再读取库文件的东西。
import subprocess
import io
import torch.nn as nn
from torch.nn import functional as F
import torch
import torchvision.transforms.functional as TF
import torchvision.transforms as T
import math
import requests
import cv2
from resize_right import resize
from guided_diffusion.guided_diffusion.script_util import model_and_diffusion_defaults
from types import SimpleNamespace
from PIL import Image
import argparse
from guided_diffusion.guided_diffusion.unet import HFUNetModel
from tqdm.notebook import tqdm
from datetime import datetime
from guided_diffusion.guided_diffusion.script_util import create_model_and_diffusion
import clip
from transformers import BertForSequenceClassification, BertTokenizer
import gc
import random
# ======================== GLOBAL SETTING ========================
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
useCPU = False # @param {type:"boolean"}
skip_augs = False # @param{type: 'boolean'}
perlin_init = False # @param{type: 'boolean'}
use_secondary_model = False
diffusion_model = "custom"
# Dimensions must by multiples of 64.
side_x = 512
side_y = 512
diffusion_sampling_mode = 'ddim' # @param ['plms','ddim']
use_checkpoint = True # @param {type: 'boolean'}
ViTB32 = False # @param{type:"boolean"}
ViTB16 = False # @param{type:"boolean"}
ViTL14 = True # @param{type:"boolean"}
ViTL14_336px = False # @param{type:"boolean"}
RN101 = False # @param{type:"boolean"}
RN50 = False # @param{type:"boolean"}
RN50x4 = False # @param{type:"boolean"}
RN50x16 = False # @param{type:"boolean"}
RN50x64 = False # @param{type:"boolean"}
# @markdown #####**OpenCLIP settings:**
ViTB32_laion2b_e16 = False # @param{type:"boolean"}
ViTB32_laion400m_e31 = False # @param{type:"boolean"}
ViTB32_laion400m_32 = False # @param{type:"boolean"}
ViTB32quickgelu_laion400m_e31 = False # @param{type:"boolean"}
ViTB32quickgelu_laion400m_e32 = False # @param{type:"boolean"}
ViTB16_laion400m_e31 = False # @param{type:"boolean"}
ViTB16_laion400m_e32 = False # @param{type:"boolean"}
RN50_yffcc15m = False # @param{type:"boolean"}
RN50_cc12m = False # @param{type:"boolean"}
RN50_quickgelu_yfcc15m = False # @param{type:"boolean"}
RN50_quickgelu_cc12m = False # @param{type:"boolean"}
RN101_yfcc15m = False # @param{type:"boolean"}
RN101_quickgelu_yfcc15m = False # @param{type:"boolean"}
# @markdown ####**Basic Settings:**
# NOTE steps可以改这里,需要重新初始化模型,我懒得改接口了orz
steps = 100 # @param [25,50,100,150,250,500,1000]{type: 'raw', allow-input: true}
tv_scale = 0 # @param{type: 'number'}
range_scale = 150 # @param{type: 'number'}
sat_scale = 0 # @param{type: 'number'}
cutn_batches = 1 # @param{type: 'number'} # NOTE 这里会对图片做数据增强,累计计算n次CLIP的梯度,以此作为guidance。
skip_augs = False # @param{type: 'boolean'}
# @markdown ####**Saving:**
intermediate_saves = 0 # @param{type: 'raw'}
intermediates_in_subfolder = True # @param{type: 'boolean'}
# perlin_init = False # @param{type: 'boolean'}
perlin_mode = 'mixed' # @param ['mixed', 'color', 'gray']
set_seed = 'random_seed' # @param{type: 'string'}
eta = 0.8 # @param{type: 'number'}
clamp_grad = True # @param{type: 'boolean'}
clamp_max = 0.05 # @param{type: 'number'}
# EXTRA ADVANCED SETTINGS:
randomize_class = True
clip_denoised = False
fuzzy_prompt = False
rand_mag = 0.05
# @markdown ---
cut_overview = "[12]*400+[4]*600" # @param {type: 'string'}
cut_innercut = "[4]*400+[12]*600" # @param {type: 'string'}
cut_ic_pow = "[1]*1000" # @param {type: 'string'}
cut_icgray_p = "[0.2]*400+[0]*600" # @param {type: 'string'}
# @markdown ####**Transformation Settings:**
use_vertical_symmetry = False # @param {type:"boolean"}
use_horizontal_symmetry = False # @param {type:"boolean"}
transformation_percent = [0.09] # @param
display_rate = 3 # @param{type: 'number'}
n_batches = 1 # @param{type: 'number'}
# @markdown If you're having issues with model downloads, check this to compare SHA's:
check_model_SHA = False # @param{type:"boolean"}
interp_spline = 'Linear' # Do not change, currently will not look good. param ['Linear','Quadratic','Cubic']{type:"string"}
resume_run = False
batch_size = 1
def createPath(filepath):
os.makedirs(filepath, exist_ok=True)
def wget(url, outputdir):
res = subprocess.run(['wget', url, '-P', f'{outputdir}'], stdout=subprocess.PIPE).stdout.decode('utf-8')
print(res)
def alpha_sigma_to_t(alpha, sigma):
return torch.atan2(sigma, alpha) * 2 / math.pi
def interp(t):
return 3 * t**2 - 2 * t ** 3
def perlin(width, height, scale=10, device=None):
gx, gy = torch.randn(2, width + 1, height + 1, 1, 1, device=device)
xs = torch.linspace(0, 1, scale + 1)[:-1, None].to(device)
ys = torch.linspace(0, 1, scale + 1)[None, :-1].to(device)
wx = 1 - interp(xs)
wy = 1 - interp(ys)
dots = 0
dots += wx * wy * (gx[:-1, :-1] * xs + gy[:-1, :-1] * ys)
dots += (1 - wx) * wy * (-gx[1:, :-1] * (1 - xs) + gy[1:, :-1] * ys)
dots += wx * (1 - wy) * (gx[:-1, 1:] * xs - gy[:-1, 1:] * (1 - ys))
dots += (1 - wx) * (1 - wy) * (-gx[1:, 1:] * (1 - xs) - gy[1:, 1:] * (1 - ys))
return dots.permute(0, 2, 1, 3).contiguous().view(width * scale, height * scale)
def perlin_ms(octaves, width, height, grayscale, device=None):
out_array = [0.5] if grayscale else [0.5, 0.5, 0.5]
# out_array = [0.0] if grayscale else [0.0, 0.0, 0.0]
for i in range(1 if grayscale else 3):
scale = 2 ** len(octaves)
oct_width = width
oct_height = height
for oct in octaves:
p = perlin(oct_width, oct_height, scale, device)
out_array[i] += p * oct
scale //= 2
oct_width *= 2
oct_height *= 2
return torch.cat(out_array)
def fetch(url_or_path):
if str(url_or_path).startswith('http://') or str(url_or_path).startswith('https://'):
r = requests.get(url_or_path)
r.raise_for_status()
fd = io.BytesIO()
fd.write(r.content)
fd.seek(0)
return fd
return open(url_or_path, 'rb')
def read_image_workaround(path):
"""OpenCV reads images as BGR, Pillow saves them as RGB. Work around
this incompatibility to avoid colour inversions."""
im_tmp = cv2.imread(path)
return cv2.cvtColor(im_tmp, cv2.COLOR_BGR2RGB)
def parse_prompt(prompt):
if prompt.startswith('http://') or prompt.startswith('https://'):
vals = prompt.rsplit(':', 2)
vals = [vals[0] + ':' + vals[1], *vals[2:]]
else:
vals = prompt.rsplit(':', 1)
vals = vals + ['', '1'][len(vals):]
return vals[0], float(vals[1])
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x / a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
def resample(input, size, align_corners=True):
n, c, h, w = input.shape
dh, dw = size
input = input.reshape([n * c, 1, h, w])
if dh < h:
kernel_h = lanczos(ramp(dh / h, 2), 2).to(input.device, input.dtype)
pad_h = (kernel_h.shape[0] - 1) // 2
input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')
input = F.conv2d(input, kernel_h[None, None, :, None])
if dw < w:
kernel_w = lanczos(ramp(dw / w, 2), 2).to(input.device, input.dtype)
pad_w = (kernel_w.shape[0] - 1) // 2
input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')
input = F.conv2d(input, kernel_w[None, None, None, :])
input = input.reshape([n, c, h, w])
return F.interpolate(input, size, mode='bicubic', align_corners=align_corners)
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, skip_augs=False):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.skip_augs = skip_augs
self.augs = T.Compose([
T.RandomHorizontalFlip(p=0.5),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.RandomAffine(degrees=15, translate=(0.1, 0.1)),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.RandomPerspective(distortion_scale=0.4, p=0.7),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.RandomGrayscale(p=0.15),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
# T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
])
def forward(self, input):
input = T.Pad(input.shape[2] // 4, fill=0)(input)
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
cutouts = []
for ch in range(self.cutn):
if ch > self.cutn - self.cutn // 4:
cutout = input.clone()
else:
size = int(max_size * torch.zeros(1,).normal_(mean=.8, std=.3).clip(float(self.cut_size / max_size), 1.))
offsetx = torch.randint(0, abs(sideX - size + 1), ())
offsety = torch.randint(0, abs(sideY - size + 1), ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
if not self.skip_augs:
cutout = self.augs(cutout)
cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
del cutout
cutouts = torch.cat(cutouts, dim=0)
return cutouts
class MakeCutoutsDango(nn.Module):
def __init__(self, cut_size, args,
Overview=4,
InnerCrop=0, IC_Size_Pow=0.5, IC_Grey_P=0.2,
):
super().__init__()
self.padargs = {}
self.cutout_debug = False
self.cut_size = cut_size
self.Overview = Overview
self.InnerCrop = InnerCrop
self.IC_Size_Pow = IC_Size_Pow
self.IC_Grey_P = IC_Grey_P
self.augs = T.Compose([
T.RandomHorizontalFlip(p=0.5),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.RandomAffine(degrees=10, translate=(0.05, 0.05), interpolation=T.InterpolationMode.BILINEAR),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.RandomGrayscale(p=0.1),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
])
def forward(self, input):
cutouts = []
gray = T.Grayscale(3)
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
output_shape = [1, 3, self.cut_size, self.cut_size]
pad_input = F.pad(input, ((sideY - max_size) // 2, (sideY - max_size) // 2, (sideX - max_size) // 2, (sideX - max_size) // 2), **self.padargs)
cutout = resize(pad_input, out_shape=output_shape)
if self.Overview > 0:
if self.Overview <= 4:
if self.Overview >= 1:
cutouts.append(cutout)
if self.Overview >= 2:
cutouts.append(gray(cutout))
if self.Overview >= 3:
cutouts.append(TF.hflip(cutout))
if self.Overview == 4:
cutouts.append(gray(TF.hflip(cutout)))
else:
cutout = resize(pad_input, out_shape=output_shape)
for _ in range(self.Overview):
cutouts.append(cutout)
if self.cutout_debug:
# if is_colab:
# TF.to_pil_image(cutouts[0].clamp(0, 1).squeeze(0)).save("/content/cutout_overview0.jpg",quality=99)
# else:
TF.to_pil_image(cutouts[0].clamp(0, 1).squeeze(0)).save("cutout_overview0.jpg", quality=99)
if self.InnerCrop > 0:
for i in range(self.InnerCrop):
size = int(torch.rand([])**self.IC_Size_Pow * (max_size - min_size) + min_size)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
if i <= int(self.IC_Grey_P * self.InnerCrop):
cutout = gray(cutout)
cutout = resize(cutout, out_shape=output_shape)
cutouts.append(cutout)
if self.cutout_debug:
# if is_colab:
# TF.to_pil_image(cutouts[-1].clamp(0, 1).squeeze(0)).save("/content/cutout_InnerCrop.jpg",quality=99)
# else:
TF.to_pil_image(cutouts[-1].clamp(0, 1).squeeze(0)).save("cutout_InnerCrop.jpg", quality=99)
cutouts = torch.cat(cutouts)
if skip_augs is not True:
cutouts = self.augs(cutouts)
return cutouts
def spherical_dist_loss(x, y):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def tv_loss(input):
"""L2 total variation loss, as in Mahendran et al."""
input = F.pad(input, (0, 1, 0, 1), 'replicate')
x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]
y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]
return (x_diff**2 + y_diff**2).mean([1, 2, 3])
def range_loss(input):
return (input - input.clamp(-1, 1)).pow(2).mean([1, 2, 3])
def symmetry_transformation_fn(x):
# NOTE 强制图像对称
use_horizontal_symmetry = False
if use_horizontal_symmetry:
[n, c, h, w] = x.size()
x = torch.concat((x[:, :, :, :w // 2], torch.flip(x[:, :, :, :w // 2], [-1])), -1)
print("horizontal symmetry applied")
if use_vertical_symmetry:
[n, c, h, w] = x.size()
x = torch.concat((x[:, :, :h // 2, :], torch.flip(x[:, :, :h // 2, :], [-2])), -2)
print("vertical symmetry applied")
return x
# def split_prompts(prompts):
# prompt_series = pd.Series([np.nan for a in range(max_frames)])
# for i, prompt in prompts.items():
# prompt_series[i] = prompt
# # prompt_series = prompt_series.astype(str)
# prompt_series = prompt_series.ffill().bfill()
# return prompt_series
"""
other chaos settings
"""
# dir settings
outDirPath = f'{PROJECT_DIR}/images_out'
createPath(outDirPath)
model_path = f'{PROJECT_DIR}/models'
createPath(model_path)
# GPU setup
DEVICE = torch.device('cuda:0' if (torch.cuda.is_available() and not useCPU) else 'cpu')
print('Using device:', DEVICE)
device = DEVICE # At least one of the modules expects this name..
if not useCPU:
if torch.cuda.get_device_capability(DEVICE) == (8, 0): # A100 fix thanks to Emad
print('Disabling CUDNN for A100 gpu', file=sys.stderr)
torch.backends.cudnn.enabled = False
model_config = model_and_diffusion_defaults()
model_config.update({
'attention_resolutions': '32, 16, 8',
'class_cond': False,
'diffusion_steps': 1000, # No need to edit this, it is taken care of later.
'rescale_timesteps': True,
'timestep_respacing': 250, # No need to edit this, it is taken care of later.
'image_size': 512,
'learn_sigma': True,
'noise_schedule': 'linear',
'num_channels': 256,
'num_head_channels': 64,
'num_res_blocks': 2,
'resblock_updown': True,
'use_checkpoint': use_checkpoint,
'use_fp16': not useCPU,
'use_scale_shift_norm': True,
})
model_default = model_config['image_size']
normalize = T.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711])
# Make folder for batch
steps_per_checkpoint = steps + 10
# Update Model Settings
timestep_respacing = f'ddim{steps}'
diffusion_steps = (1000 // steps) * steps if steps < 1000 else steps
model_config.update({
'timestep_respacing': timestep_respacing,
'diffusion_steps': diffusion_steps,
})
start_frame = 0
print('Starting Run:')
if set_seed == 'random_seed':
random.seed()
seed = random.randint(0, 2**32)
# print(f'Using seed: {seed}')
else:
seed = int(set_seed)
args = {
# 'seed': seed,
'display_rate': display_rate,
'n_batches': n_batches,
'batch_size': batch_size,
'steps': steps,
'diffusion_sampling_mode': diffusion_sampling_mode,
# 'width_height': width_height,
'tv_scale': tv_scale,
'range_scale': range_scale,
'sat_scale': sat_scale,
'cutn_batches': cutn_batches,
# 'side_x': side_x,
# 'side_y': side_y,
'timestep_respacing': timestep_respacing,
'diffusion_steps': diffusion_steps,
'cut_overview': eval(cut_overview),
'cut_innercut': eval(cut_innercut),
'cut_ic_pow': eval(cut_ic_pow),
'cut_icgray_p': eval(cut_icgray_p),
'intermediate_saves': intermediate_saves,
'intermediates_in_subfolder': intermediates_in_subfolder,
'steps_per_checkpoint': steps_per_checkpoint,
'set_seed': set_seed,
'eta': eta,
'clamp_grad': clamp_grad,
'clamp_max': clamp_max,
'skip_augs': skip_augs,
'randomize_class': randomize_class,
'clip_denoised': clip_denoised,
'fuzzy_prompt': fuzzy_prompt,
'rand_mag': rand_mag,
'use_vertical_symmetry': use_vertical_symmetry,
'use_horizontal_symmetry': use_horizontal_symmetry,
'transformation_percent': transformation_percent,
}
args = SimpleNamespace(**args)
# ======================== GLOBAL SETTING END ========================
class Diffuser:
def __init__(self, cutom_path='IDEA-CCNL/Taiyi-Diffusion-532M-Nature'):
self.model_setup(cutom_path)
def model_setup(self, custom_path):
# LOADING MODEL
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
print(f'Prepping model...model name: {custom_path}')
__, self.diffusion = create_model_and_diffusion(**model_config)
self.model = HFUNetModel.from_pretrained(custom_path)
# total = get_parameter_num(self.model)
# print("Number of parameter: %.2fM" % (total/1e6))
# print("Number of parameter: %.2fM" % (total/1024/1024))
self.model.requires_grad_(False).eval().to(device)
for name, param in self.model.named_parameters():
if 'qkv' in name or 'norm' in name or 'proj' in name:
param.requires_grad_()
if model_config['use_fp16']:
self.model.convert_to_fp16()
print(f'Diffusion_model Loaded {diffusion_model}')
# NOTE Directly Load The Text Encoder From Hugging Face
print('Prepping model...model name: CLIP')
self.taiyi_tokenizer = BertTokenizer.from_pretrained("IDEA-CCNL/Taiyi-CLIP-Roberta-large-326M-Chinese")
self.taiyi_transformer = BertForSequenceClassification.from_pretrained("IDEA-CCNL/Taiyi-CLIP-Roberta-large-326M-Chinese").eval().to(device)
self.clip_models = []
if ViTB32:
self.clip_models.append(clip.load('ViT-B/32', jit=False)[0].eval().requires_grad_(False).to(device))
if ViTB16:
self.clip_models.append(clip.load('ViT-B/16', jit=False)[0].eval().requires_grad_(False).to(device))
if ViTL14:
self.clip_models.append(clip.load('ViT-L/14', jit=False)[0].eval().requires_grad_(False).to(device))
if ViTL14_336px:
self.clip_models.append(clip.load('ViT-L/14@336px', jit=False)[0].eval().requires_grad_(False).to(device))
print('CLIP Loaded')
# self.lpips_model = lpips.LPIPS(net='vgg').to(device)
def generate(self,
input_text_prompts=['夕阳西下'],
init_image=None,
skip_steps=10,
clip_guidance_scale=7500,
init_scale=2000,
st_dynamic_image=None,
seed=None,
side_x=512,
side_y=512,
):
seed = seed
frame_num = 0
init_image = init_image
init_scale = init_scale
skip_steps = skip_steps
loss_values = []
# if seed is not None:
# np.random.seed(seed)
# random.seed(seed)
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
# torch.backends.cudnn.deterministic = True
# target_embeds, weights = [], []
frame_prompt = input_text_prompts
print(f'Frame {frame_num} Prompt: {frame_prompt}')
model_stats = []
for clip_model in self.clip_models:
# cutn = 16
model_stat = {"clip_model": None, "target_embeds": [], "make_cutouts": None, "weights": []}
model_stat["clip_model"] = clip_model
for prompt in frame_prompt:
txt, weight = parse_prompt(prompt)
# txt = clip_model.encode_text(clip.tokenize(prompt).to(device)).float()
# NOTE use chinese CLIP
txt = self.taiyi_transformer(self.taiyi_tokenizer(txt, return_tensors='pt')['input_ids'].to(device)).logits
if args.fuzzy_prompt:
for i in range(25):
model_stat["target_embeds"].append((txt + torch.randn(txt.shape).cuda() * args.rand_mag).clamp(0, 1))
model_stat["weights"].append(weight)
else:
model_stat["target_embeds"].append(txt)
model_stat["weights"].append(weight)
model_stat["target_embeds"] = torch.cat(model_stat["target_embeds"])
model_stat["weights"] = torch.tensor(model_stat["weights"], device=device)
if model_stat["weights"].sum().abs() < 1e-3:
raise RuntimeError('The weights must not sum to 0.')
model_stat["weights"] /= model_stat["weights"].sum().abs()
model_stats.append(model_stat)
init = None
if init_image is not None:
# init = Image.open(fetch(init_image)).convert('RGB') # 传递的是加载好的图片。而非地址~
init = init_image
init = init.resize((side_x, side_y), Image.LANCZOS)
init = TF.to_tensor(init).to(device).unsqueeze(0).mul(2).sub(1)
cur_t = None
def cond_fn(x, t, y=None):
with torch.enable_grad():
x_is_NaN = False
x = x.detach().requires_grad_()
n = x.shape[0]
my_t = torch.ones([n], device=device, dtype=torch.long) * cur_t
out = self.diffusion.p_mean_variance(self.model, x, my_t, clip_denoised=False, model_kwargs={'y': y})
fac = self.diffusion.sqrt_one_minus_alphas_cumprod[cur_t]
x_in = out['pred_xstart'] * fac + x * (1 - fac)
x_in_grad = torch.zeros_like(x_in)
for model_stat in model_stats:
for i in range(args.cutn_batches):
t_int = int(t.item()) + 1 # errors on last step without +1, need to find source
# try:
input_resolution = model_stat["clip_model"].visual.input_resolution
# except:
# input_resolution = 224
cuts = MakeCutoutsDango(input_resolution,
Overview=args.cut_overview[1000 - t_int],
InnerCrop=args.cut_innercut[1000 - t_int],
IC_Size_Pow=args.cut_ic_pow[1000 - t_int],
IC_Grey_P=args.cut_icgray_p[1000 - t_int],
args=args,
)
clip_in = normalize(cuts(x_in.add(1).div(2)))
image_embeds = model_stat["clip_model"].encode_image(clip_in).float()
dists = spherical_dist_loss(image_embeds.unsqueeze(1), model_stat["target_embeds"].unsqueeze(0))
dists = dists.view([args.cut_overview[1000 - t_int] + args.cut_innercut[1000 - t_int], n, -1])
losses = dists.mul(model_stat["weights"]).sum(2).mean(0)
loss_values.append(losses.sum().item()) # log loss, probably shouldn't do per cutn_batch
x_in_grad += torch.autograd.grad(losses.sum() * clip_guidance_scale, x_in)[0] / cutn_batches
tv_losses = tv_loss(x_in)
range_losses = range_loss(out['pred_xstart'])
sat_losses = torch.abs(x_in - x_in.clamp(min=-1, max=1)).mean()
loss = tv_losses.sum() * tv_scale + range_losses.sum() * range_scale + sat_losses.sum() * sat_scale
if init is not None and init_scale:
init_losses = self.lpips_model(x_in, init)
loss = loss + init_losses.sum() * init_scale
x_in_grad += torch.autograd.grad(loss, x_in)[0]
if not torch.isnan(x_in_grad).any():
grad = -torch.autograd.grad(x_in, x, x_in_grad)[0]
else:
x_is_NaN = True
grad = torch.zeros_like(x)
if args.clamp_grad and not x_is_NaN:
magnitude = grad.square().mean().sqrt()
return grad * magnitude.clamp(max=args.clamp_max) / magnitude # min=-0.02, min=-clamp_max,
return grad
if args.diffusion_sampling_mode == 'ddim':
sample_fn = self.diffusion.ddim_sample_loop_progressive
else:
sample_fn = self.diffusion.plms_sample_loop_progressive
for i in range(args.n_batches):
current_time = datetime.now().strftime('%y%m%d-%H%M%S_%f')
batchBar = tqdm(range(args.n_batches), desc="Batches")
batchBar.n = i
batchBar.refresh()
gc.collect()
torch.cuda.empty_cache()
cur_t = self.diffusion.num_timesteps - skip_steps - 1
# total_steps = cur_t
if args.diffusion_sampling_mode == 'ddim':
samples = sample_fn(
self.model,
(batch_size, 3, side_y, side_x),
clip_denoised=clip_denoised,
model_kwargs={},
cond_fn=cond_fn,
progress=True,
skip_timesteps=skip_steps,
init_image=init,
randomize_class=randomize_class,
eta=eta,
transformation_fn=symmetry_transformation_fn,
transformation_percent=args.transformation_percent
)
else:
samples = sample_fn(
self.model,
(batch_size, 3, side_y, side_x),
clip_denoised=clip_denoised,
model_kwargs={},
cond_fn=cond_fn,
progress=True,
skip_timesteps=skip_steps,
init_image=init,
randomize_class=randomize_class,
order=2,
)
for j, sample in enumerate(samples):
cur_t -= 1
intermediateStep = False
if args.steps_per_checkpoint is not None:
if j % steps_per_checkpoint == 0 and j > 0:
intermediateStep = True
elif j in args.intermediate_saves:
intermediateStep = True
if j % args.display_rate == 0 or cur_t == -1 or intermediateStep:
for k, image in enumerate(sample['pred_xstart']):
# tqdm.write(f'Batch {i}, step {j}, output {k}:')
# percent = math.ceil(j / total_steps * 100)
if args.n_batches > 0:
filename = f'{current_time}-{parse_prompt(prompt)[0]}.png'
image = TF.to_pil_image(image.add(1).div(2).clamp(0, 1))
if j % args.display_rate == 0 or cur_t == -1:
image.save(f'{outDirPath}/{filename}')
if st_dynamic_image:
st_dynamic_image.image(image, use_column_width=True)
# self.current_image = image
return image
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="setting")
parser.add_argument('--prompt', type=str, required=True)
parser.add_argument('--text_scale', type=int, default=5000)
parser.add_argument('--model_path', type=str, default="IDEA-CCNL/Taiyi-Diffusion-532M-Nature")
parser.add_argument('--width', type=int, default=512)
parser.add_argument('--height', type=int, default=512)
user_args = parser.parse_args()
dd = Diffuser(user_args.model_path)
dd.generate([user_args.prompt],
clip_guidance_scale=user_args.text_scale,
side_x=user_args.width,
side_y=user_args.height,
)
| 29,225 | 38.709239 | 150 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/resample.py | from abc import ABC, abstractmethod
import numpy as np
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
| 5,689 | 35.709677 | 87 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/losses.py | """
Helpers for various likelihood-based losses. These are ported from the original
Ho et al. diffusion models codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
"""
import numpy as np
import torch as th
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0 + logvar2 - logvar1 + th.exp(logvar1 - logvar2) + ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
| 2,502 | 32.824324 | 109 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/nn.py | """
Various utilities for neural networks.
"""
import math
import torch as th
import torch.nn as nn
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * th.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(th.autograd.Function):
@staticmethod
@th.cuda.amp.custom_fwd
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_length = length
ctx.save_for_backward(*args)
with th.no_grad():
output_tensors = ctx.run_function(*args[:length])
return output_tensors
@staticmethod
@th.cuda.amp.custom_bwd
def backward(ctx, *output_grads):
args = list(ctx.saved_tensors)
# Filter for inputs that require grad. If none, exit early.
input_indices = [i for (i, x) in enumerate(args) if x.requires_grad]
if not input_indices:
return (None, None) + tuple(None for _ in args)
with th.enable_grad():
for i in input_indices:
if i < ctx.input_length:
# Not sure why the OAI code does this little
# dance. It might not be necessary.
args[i] = args[i].detach().requires_grad_()
args[i] = args[i].view_as(args[i])
output_tensors = ctx.run_function(*args[:ctx.input_length])
if isinstance(output_tensors, th.Tensor):
output_tensors = [output_tensors]
# Filter for outputs that require grad. If none, exit early.
out_and_grads = [(o, g) for (o, g) in zip(output_tensors, output_grads) if o.requires_grad]
if not out_and_grads:
return (None, None) + tuple(None for _ in args)
# Compute gradients on the filtered tensors.
computed_grads = th.autograd.grad(
[o for (o, g) in out_and_grads],
[args[i] for i in input_indices],
[g for (o, g) in out_and_grads]
)
# Reassemble the complete gradient tuple.
input_grads = [None for _ in args]
for (i, g) in zip(input_indices, computed_grads):
input_grads[i] = g
return (None, None) + tuple(input_grads)
| 5,835 | 29.554974 | 99 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/fp16_util.py | """
Helpers to train with 16-bit precision.
"""
import numpy as np
import torch as th
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from . import logger
INITIAL_LOG_LOSS_SCALE = 20.0
def convert_module_to_f16(ll):
"""
Convert primitive modules to float16.
"""
if isinstance(ll, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
ll.weight.data = ll.weight.data.half()
if ll.bias is not None:
ll.bias.data = ll.bias.data.half()
def convert_module_to_f32(ll):
"""
Convert primitive modules to float32, undoing convert_module_to_f16().
"""
if isinstance(ll, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
ll.weight.data = ll.weight.data.float()
if ll.bias is not None:
ll.bias.data = ll.bias.data.float()
def make_master_params(param_groups_and_shapes):
"""
Copy model parameters into a (differently-shaped) list of full-precision
parameters.
"""
master_params = []
for param_group, shape in param_groups_and_shapes:
master_param = nn.Parameter(
_flatten_dense_tensors(
[param.detach().float() for (_, param) in param_group]
).view(shape)
)
master_param.requires_grad = True
master_params.append(master_param)
return master_params
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
"""
Copy the gradients from the model parameters into the master parameters
from make_master_params().
"""
for master_param, (param_group, shape) in zip(
master_params, param_groups_and_shapes
):
master_param.grad = _flatten_dense_tensors(
[param_grad_or_zeros(param) for (_, param) in param_group]
).view(shape)
def master_params_to_model_params(param_groups_and_shapes, master_params):
"""
Copy the master parameter data back into the model parameters.
"""
# Without copying to a list, if a generator is passed, this will
# silently not copy any parameters.
for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes):
for (_, param), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
param.detach().copy_(unflat_master_param)
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim <= 1],
(-1),
)
matrix_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim > 1],
(1, -1),
)
return [scalar_vector_named_params, matrix_named_params]
def master_params_to_state_dict(
model, param_groups_and_shapes, master_params, use_fp16
):
if use_fp16:
state_dict = model.state_dict()
for master_param, (param_group, _) in zip(
master_params, param_groups_and_shapes
):
for (name, _), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
assert name in state_dict
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for i, (name, _value) in enumerate(model.named_parameters()):
assert name in state_dict
state_dict[name] = master_params[i]
return state_dict
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [
(name, state_dict[name]) for name, _ in model.named_parameters()
]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for name, _ in model.named_parameters()]
return master_params
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
def zero_grad(model_params):
for param in model_params:
# Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def param_grad_or_zeros(param):
if param.grad is not None:
return param.grad.data.detach()
else:
return th.zeros_like(param)
class MixedPrecisionTrainer:
def __init__(
self,
*,
model,
use_fp16=False,
fp16_scale_growth=1e-3,
initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,
):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(
self.model.named_parameters()
)
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = 2 ** self.lg_loss_scale
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean("lg_loss_scale", self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale)
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
zero_master_grads(self.master_params)
return False
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
grad_norm, param_norm = self._compute_norms()
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2
if p.grad is not None:
grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2
return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(
self.model, self.param_groups_and_shapes, master_params, self.use_fp16
)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
def check_overflow(value):
return (value == float("inf")) or (value == -float("inf")) or (value != value)
| 7,955 | 32.56962 | 114 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/unet.py | from abc import abstractmethod
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from transformers import PreTrainedModel, PretrainedConfig
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
use_checkpoint=False,
use_new_attention_order=False,
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.use_checkpoint = use_checkpoint
self.norm = normalization(channels)
self.qkv = conv_nd(1, channels, channels * 3, 1)
if use_new_attention_order:
# split qkv before split heads
self.attention = QKVAttention(self.num_heads)
else:
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
def forward(self, x):
return checkpoint(self._forward, (x,), self.parameters(), self.use_checkpoint)
def _forward(self, x):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
h = self.attention(qkv)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial ** 2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, length),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
ch = input_ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=int(model_channels * mult),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(model_channels * mult)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(self, x, timesteps, y=None):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
hs.append(h)
h = self.middle_block(h, emb)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb)
h = h.type(x.dtype)
return self.out(h)
class SuperResModel(UNetModel):
"""
A UNetModel that performs super-resolution.
Expects an extra kwarg `low_res` to condition on a low-resolution image.
"""
def __init__(self, image_size, in_channels, *args, **kwargs):
super().__init__(image_size, in_channels * 2, *args, **kwargs)
def forward(self, x, timesteps, low_res=None, **kwargs):
_, _, new_height, new_width = x.shape
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
x = th.cat([x, upsampled], dim=1)
return super().forward(x, timesteps, **kwargs)
class EncoderUNetModel(nn.Module):
"""
The half UNet model with attention and timestep embedding.
For usage, see UNet.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
pool="adaptive",
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.pool = pool
if pool == "adaptive":
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.AdaptiveAvgPool2d((1, 1)),
zero_module(conv_nd(dims, ch, out_channels, 1)),
nn.Flatten(),
)
elif pool == "attention":
assert num_head_channels != -1
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
AttentionPool2d(
(image_size // ds), ch, num_head_channels, out_channels
),
)
elif pool == "spatial":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
nn.ReLU(),
nn.Linear(2048, self.out_channels),
)
elif pool == "spatial_v2":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
normalization(2048),
nn.SiLU(),
nn.Linear(2048, self.out_channels),
)
else:
raise NotImplementedError(f"Unexpected {pool} pooling")
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
def forward(self, x, timesteps):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:return: an [N x K] Tensor of outputs.
"""
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
results = []
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = self.middle_block(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = th.cat(results, axis=-1)
return self.out(h)
else:
h = h.type(x.dtype)
return self.out(h)
class UNetConfig(PretrainedConfig):
def __init__(
self,
image_size=512,
in_channels=3,
model_channels=256,
out_channels=6,
num_res_blocks=2,
attention_resolutions=[16, 32, 64],
dropout=0.0,
channel_mult=(0.5, 1, 1, 2, 2, 4, 4),
num_classes=None,
use_checkpoint=False,
use_fp16=True,
num_heads=4,
num_head_channels=64,
num_heads_upsample=-1,
use_scale_shift_norm=True,
resblock_updown=True,
use_new_attention_order=False,
**kwargs
):
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.use_fp16 = use_fp16
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
self.use_scale_shift_norm = use_scale_shift_norm
self.resblock_updown = resblock_updown
self.use_new_attention_order = use_new_attention_order
super().__init__(**kwargs)
class HFUNetModel(PreTrainedModel):
config_class = UNetConfig
def __init__(self, config):
super().__init__(config)
self.model = UNetModel(
image_size=config.image_size,
in_channels=config.in_channels,
model_channels=config.model_channels,
out_channels=config.out_channels,
num_res_blocks=config.num_res_blocks,
attention_resolutions=config.attention_resolutions,
dropout=config.dropout,
channel_mult=config.channel_mult,
num_classes=config.num_classes,
use_checkpoint=config.use_checkpoint,
use_fp16=config.use_fp16,
num_heads=config.num_heads,
num_head_channels=config.num_head_channels,
num_heads_upsample=config.num_heads_upsample,
use_scale_shift_norm=config.use_scale_shift_norm,
resblock_updown=config.resblock_updown,
use_new_attention_order=config.use_new_attention_order,
)
def forward(self, x, timesteps, y=None):
return self.model.forward(x, timesteps, y)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.model.input_blocks.apply(convert_module_to_f16)
self.model.middle_block.apply(convert_module_to_f16)
self.model.output_blocks.apply(convert_module_to_f16)
| 34,109 | 33.94877 | 124 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/gaussian_diffusion.py | """
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
import enum
import math
import numpy as np
import torch as th
from .nn import mean_flat
from .losses import normal_kl, discretized_gaussian_log_likelihood
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
:param loss_type: a LossType determining the loss function to use.
:param rescale_timesteps: if True, pass floating point timesteps into the
model so that they are always scaled like in the
original paper (0 to 1000).
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type,
rescale_timesteps=False,
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
)
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(
self.log_one_minus_alphas_cumprod, t, x_start.shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0] == posterior_variance.shape[0] == posterior_log_variance_clipped.shape[0] == x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None
):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
if self.model_var_type == ModelVarType.LEARNED:
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x.shape
)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
pred_xstart = process_xstart(
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
)
model_mean = model_output
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x, t=t
)
else:
raise NotImplementedError(self.model_mean_type)
assert (
model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
)
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev - _extract_into_tensor(self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape) * x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return t.float() * (1000.0 / self.num_timesteps)
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_mean_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, t, p_mean_var, **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
x, self._scale_timesteps(t), **model_kwargs
)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
x, t, p_mean_var, **model_kwargs
)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_with_grad(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
with th.enable_grad():
x = x.detach().requires_grad_()
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean_with_grad(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"].detach()}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
cond_fn_with_grad=cond_fn_with_grad,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
if init_image is not None:
my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0]
img = self.q_sample(init_image, my_t, img)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices, desc="Steps")
for i in indices:
t = th.tensor([i] * shape[0], device=device)
if randomize_class and 'y' in model_kwargs:
model_kwargs['y'] = th.randint(low=0, high=model.num_classes,
size=model_kwargs['y'].shape,
device=model_kwargs['y'].device)
with th.no_grad():
sample_fn = self.p_sample_with_grad if cond_fn_with_grad else self.p_sample
out = sample_fn(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
inpainting_mode=False,
orig_img=None,
mask_inpaint=None,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
if inpainting_mode:
noised_orig_img = th.sqrt(alpha_bar) * orig_img + \
th.sqrt(1 - alpha_bar) * th.randn_like(x)
# noised_orig_img_pil = TF.to_pil_image(noised_orig_img[0].add(1).div(2).clamp(0, 1))
# noised_orig_img_pil.save(f'/content/drive/MyDrive/AI/Disco_Diffusion/images_out/InpaintingTest/inpainting_dump/noised_orig_{t[0].item()}.png')
x = (1 - mask_inpaint) * noised_orig_img + mask_inpaint * x
# mixed_x = TF.to_pil_image(x[0].add(1).div(2).clamp(0, 1))
# mixed_x.save(f'/content/drive/MyDrive/AI/Disco_Diffusion/images_out/InpaintingTest/inpainting_dump/mixed_x_{t[0].item()}.png')
out_orig = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out_orig, x, t, model_kwargs=model_kwargs)
else:
out = out_orig
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) * th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out_orig["pred_xstart"]}
def ddim_sample_with_grad(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
with th.enable_grad():
x = x.detach().requires_grad_()
out_orig = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score_with_grad(cond_fn, out_orig, x, t,
model_kwargs=model_kwargs)
else:
out = out_orig
out["pred_xstart"] = out["pred_xstart"].detach()
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) * th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out_orig["pred_xstart"].detach()}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x - out["pred_xstart"]) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps
)
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
cond_fn_with_grad=cond_fn_with_grad,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
transformation_fn=None,
transformation_percent=[],
inpainting_mode=False,
mask_inpaint=None,
skip_timesteps_orig=None
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
transformation_steps = [int(len(indices) * (1 - i)) for i in transformation_percent]
if init_image is not None:
my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0]
img = self.q_sample(init_image, my_t, img)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices, desc="Steps")
if inpainting_mode and skip_timesteps_orig is None:
skip_timesteps_orig = self.num_timesteps
for i in indices:
t = th.tensor([i] * shape[0], device=device)
if randomize_class and 'y' in model_kwargs:
model_kwargs['y'] = th.randint(low=0, high=model.num_classes,
size=model_kwargs['y'].shape,
device=model_kwargs['y'].device)
with th.no_grad():
if i in transformation_steps and transformation_fn is not None:
img = transformation_fn(img)
sample_fn = self.ddim_sample_with_grad if cond_fn_with_grad else self.ddim_sample
if inpainting_mode \
and i >= self.num_timesteps - skip_timesteps_orig \
and not cond_fn_with_grad:
out = sample_fn(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
inpainting_mode=inpainting_mode,
orig_img=init_image,
mask_inpaint=mask_inpaint,
)
else:
out = sample_fn(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def plms_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
cond_fn_with_grad=False,
order=2,
old_out=None,
):
"""
Sample x_{t-1} from the model using Pseudo Linear Multistep.
Same usage as p_sample().
"""
if not int(order) or not 1 <= order <= 4:
raise ValueError('order is invalid (should be int from 1-4).')
def get_model_output(x, t):
with th.set_grad_enabled(cond_fn_with_grad and cond_fn is not None):
x = x.detach().requires_grad_() if cond_fn_with_grad else x
out_orig = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
if cond_fn_with_grad:
out = self.condition_score_with_grad(cond_fn, out_orig, x, t, model_kwargs=model_kwargs)
x = x.detach()
else:
out = self.condition_score(cond_fn, out_orig, x, t, model_kwargs=model_kwargs)
else:
out = out_orig
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
return eps, out, out_orig
# alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
eps, out, out_orig = get_model_output(x, t)
if order > 1 and old_out is None:
# Pseudo Improved Euler
old_eps = [eps]
mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev) * eps
eps_2, _, _ = get_model_output(mean_pred, t - 1)
eps_prime = (eps + eps_2) / 2
pred_prime = self._predict_xstart_from_eps(x, t, eps_prime)
mean_pred = pred_prime * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev) * eps_prime
else:
# Pseudo Linear Multistep (Adams-Bashforth)
old_eps = old_out["old_eps"]
old_eps.append(eps)
cur_order = min(order, len(old_eps))
if cur_order == 1:
eps_prime = old_eps[-1]
elif cur_order == 2:
eps_prime = (3 * old_eps[-1] - old_eps[-2]) / 2
elif cur_order == 3:
eps_prime = (23 * old_eps[-1] - 16 * old_eps[-2] + 5 * old_eps[-3]) / 12
elif cur_order == 4:
eps_prime = (55 * old_eps[-1] - 59 * old_eps[-2] + 37 * old_eps[-3] - 9 * old_eps[-4]) / 24
else:
raise RuntimeError('cur_order is invalid.')
pred_prime = self._predict_xstart_from_eps(x, t, eps_prime)
mean_pred = pred_prime * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev) * eps_prime
if len(old_eps) >= order:
old_eps.pop(0)
nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
sample = mean_pred * nonzero_mask + out["pred_xstart"] * (1 - nonzero_mask)
return {"sample": sample, "pred_xstart": out_orig["pred_xstart"], "old_eps": old_eps}
def plms_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
order=2,
):
"""
Generate samples from the model using Pseudo Linear Multistep.
Same usage as p_sample_loop().
"""
final = None
for sample in self.plms_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
cond_fn_with_grad=cond_fn_with_grad,
order=order,
):
final = sample
return final["sample"]
def plms_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
order=2,
):
"""
Use PLMS to sample from the model and yield intermediate samples from each
timestep of PLMS.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
if init_image is not None:
my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0]
img = self.q_sample(init_image, my_t, img)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices, desc="Steps")
old_out = None
for i in indices:
t = th.tensor([i] * shape[0], device=device)
if randomize_class and 'y' in model_kwargs:
model_kwargs['y'] = th.randint(low=0, high=model.num_classes,
size=model_kwargs['y'].shape,
device=model_kwargs['y'].device)
with th.no_grad():
out = self.plms_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
cond_fn_with_grad=cond_fn_with_grad,
order=order,
old_out=old_out,
)
yield out
old_out = out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
| 50,680 | 37.482156 | 185 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/respace.py | import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim"):])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, **kwargs)
| 5,192 | 39.255814 | 85 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/pretrain_erlangshen_deberta_v2/pretrain_deberta.py | from dataclasses import dataclass
from transformers import (
DebertaV2Config,
DebertaV2ForMaskedLM,
AutoTokenizer,
)
from pytorch_lightning import (
LightningModule,
Trainer,
)
from pytorch_lightning.callbacks import (
LearningRateMonitor,
)
import argparse
import torch
import os
import numpy as np
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.data.data_utils.truncate_utils import truncate_segments
from fengshen.data.data_utils.token_type_utils import create_tokens_and_tokentypes
from fengshen.data.data_utils.mask_utils import create_masked_lm_predictions
from fengshen.models.model_utils import (
add_module_args,
configure_optimizers,
get_total_steps,
)
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from torch.utils.data._utils.collate import default_collate
SHOW_DATA = False
@dataclass
class DeBERTaV2Collator:
'''
由input处理成samples,也就是最终模型的输入
其中主要处理逻辑在__call__里
包含Mask任务,使用Whole Word Mask
'''
tokenizer: None # 分词
max_seq_length: 512
masked_lm_prob: 0.15
content_key: str = 'text'
# 一些预处理操作
def setup(self):
self.np_rng = np.random.RandomState(seed=42)
inv_vocab = {v: k for k, v in self.tokenizer.vocab.items()}
self.vocab_id_list = list(inv_vocab.keys())
self.vocab_id_to_token_dict = inv_vocab
import jieba_fast
self.zh_tokenizer = jieba_fast.lcut
def __call__(self, samples):
'''
samples: 一个sample长这样{"text": "hello world"}
'''
model_inputs = []
for s in samples:
tokenized_sentences = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.tokenize(s[self.content_key]))
if len(tokenized_sentences) == 0:
print('find empty sentence')
continue
tokens_a = tokenized_sentences
# max_seq_length - 3因为还需要拼上[CLS] [SEP] [SEP]
if len(tokens_a) == 0:
continue
_ = truncate_segments(tokens_a, [], len(tokens_a),
0, self.max_seq_length-3, self.np_rng)
# Build tokens and toketypes.
tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, [],
self.tokenizer.cls_token_id, self.tokenizer.sep_token_id)
# Masking.
max_predictions_per_seq = self.masked_lm_prob * len(tokens)
(tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions(
tokens, self.vocab_id_list, self.vocab_id_to_token_dict, self.masked_lm_prob,
self.tokenizer.cls_token_id, self.tokenizer.sep_token_id, self.tokenizer.mask_token_id,
max_predictions_per_seq, self.np_rng,
masking_style='bert',
zh_tokenizer=self.zh_tokenizer)
# Some checks.
num_tokens = len(tokens)
padding_length = self.max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [self.tokenizer.pad_token_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length,
dtype=np.int64)
# Lables and loss mask.
labels = [-100] * self.max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
labels_np = np.array(labels, dtype=np.int64)
model_inputs.append(
{
'input_ids': tokens_np,
'attention_mask': padding_mask_np,
'token_type_ids': tokentypes_np,
'labels': labels_np,
}
)
return default_collate(model_inputs)
class ErlangshenDeBERTaV2(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('Erlangshen Bert')
parser.add_argument('--masked_lm_prob', type=float, default=0.15)
parser.add_argument('--max_seq_length', type=int, default=512)
parser.add_argument('--sample_content_key', type=str, default='text')
return parent_parser
def __init__(self, args, tokenizer, **kwargs) -> None:
super().__init__()
self.save_hyperparameters(args)
config = DebertaV2Config.from_pretrained(args.model_path)
self.config = config
self.tokenizer = tokenizer
self.model = DebertaV2ForMaskedLM(config)
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
return configure_optimizers(self)
def forward(self, **batch):
return self.model(**batch)
def detokenize(self, token_ids):
toks = self.tokenizer.convert_ids_to_tokens(token_ids)
return self.tokenizer.convert_tokens_to_string(toks)
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.shape[0]
return acc
def training_step(self, batch, batch_idx):
if self.trainer.global_rank == 0:
global SHOW_DATA
if not SHOW_DATA:
print(self.config)
print(self.model)
SHOW_DATA = True
print('source: {}'.format(batch['input_ids'][0]))
print('target: {}'.format(batch['labels'][0]))
print('source: {}'.format(self.detokenize(batch['input_ids'][0])))
label_idx = batch['labels'][0] != -100
print('target: {}'.format(self.detokenize(
batch['labels'][0][label_idx])))
output = self(**batch)
self.log('train_loss', output.loss, sync_dist=True)
label_idx = batch['labels'] != -100
acc = self.comput_metrix(
output.logits[label_idx].view(-1, output.logits.size(-1)), batch['labels'][label_idx])
self.log('train_acc', acc, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx):
output = self(**batch)
self.log('val_loss', output.loss, sync_dist=True)
return output.loss
def on_load_checkpoint(self, checkpoint) -> None:
# 兼容低版本lightning,低版本lightning从ckpt起来时steps数会被重置为0
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = ErlangshenDeBERTaV2.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(args.model_path)
collate_fn = DeBERTaV2Collator(
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
masked_lm_prob=args.masked_lm_prob,
content_key=args.sample_content_key,
)
collate_fn.setup()
data_module = UniversalDataModule(tokenizer=tokenizer, args=args, collate_fn=collate_fn)
print('data load complete')
model = ErlangshenDeBERTaV2(args, tokenizer=tokenizer)
print('model load complete')
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callback = UniversalCheckpoint(args)
# 做兼容,如果目录不存在的话把这个参数去掉,不然会报错
if args.load_ckpt_path is not None and \
not os.path.exists(args.load_ckpt_path):
print('--------warning no checkpoint found--------, remove args')
args.load_ckpt_path = None
trainer = Trainer.from_argparse_args(args,
callbacks=[
lr_monitor,
checkpoint_callback])
trainer.fit(model, data_module, ckpt_path=args.load_ckpt_path)
| 8,886 | 37.97807 | 119 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/tcbert/example.py | import argparse
from fengshen.pipelines.tcbert import TCBertPipelines
from pytorch_lightning import seed_everything
def main():
seed_everything(123)
total_parser = argparse.ArgumentParser("Topic Classification")
total_parser = TCBertPipelines.piplines_args(total_parser)
args = total_parser.parse_args()
pretrained_model_path = 'IDEA-CCNL/Erlangshen-TCBert-110M-Classification-Chinese'
args.learning_rate = 2e-5
args.max_length = 512
args.max_epochs = 5
args.batchsize = 4
args.train = 'train'
args.default_root_dir = './'
# args.gpus = 1 #注意:目前使用CPU进行训练,取消注释会使用GPU,但需要配置相应GPU环境版本
args.fixed_lablen = 2 #注意:可以设置固定标签长度,由于样本对应的标签长度可能不一致,建议选择适中的数值表示标签长度
train_data = [ # 训练数据
{"content": "真正的放养教育,放的是孩子的思维,养的是孩子的习惯", "label": "故事"},
{"content": "《唐人街探案》捧红了王宝强跟刘昊然,唯独戏份不少的他发展最差", "label": "娱乐"},
{"content": "油价攀升 阿曼经济加速增长", "label": "财经"},
{"content": "日本男篮近期动作频频,中国队的未来劲敌会是他们吗?", "label": "体育"},
{"content": "教育部:坚决防止因撤并乡村小规模学校导致学生上学困难", "label": "教育"},
{"content": "LOL设计最完美的三个英雄,玩家们都很认可!", "label": "电竞"},
{"content": "上联:浅看红楼终是梦,怎么对下联?", "label": "文化"},
{"content": "楼市再出新政!北京部分限房价项目或转为共有产权房", "label": "房产"},
{"content": "企业怎样选云服务器?云服务器哪家比较好?", "label": "科技"},
{"content": "贝纳利的三缸车TRE899K、TRE1130K华丽转身", "label": "汽车"},
{"content": "如何评价:刘姝威的《严惩做空中国股市者》?", "label": "股票"},
{"content": "宁夏邀深圳市民共赴“寻找穿越”之旅", "label": "旅游"},
{"content": "日本自民党又一派系力挺安倍 称会竭尽全力", "label": "国际"},
{"content": "农村养老保险每年交5000,交满15年退休后能每月领多少钱?", "label": "农业"},
{"content": "国产舰载机首次现身,进度超过预期,将率先在滑跃航母测试", "label": "军事"}
]
dev_data = [ # 验证数据
{"content": "西游记后传中,灵儿最爱的女人是谁?不是碧游!", "label": "故事"},
{"content": "小李子莱奥纳多有特别的提袋子技能,这些年他还有过哪些神奇的造型?", "label": "娱乐"},
{"content": "现在手上有钱是投资买房还是存钱,为什么?", "label": "财经"},
{"content": "迪卡侬的衣服值得购买吗?", "label": "体育"},
{"content": "黑龙江省旅游委在齐齐哈尔组织举办导游培训班", "label": "教育"},
{"content": "《王者荣耀》中,哪些英雄的大招最“废柴”?", "label": "电竞"},
{"content": "上交演绎马勒《复活》,用音乐带来抚慰和希望", "label": "文化"},
{"content": "All in服务业,58集团在租房、住房市场的全力以赋", "label": "房产"},
{"content": "为什么有的人宁愿选择骁龙660的X21,也不买骁龙845的小米MIX2S?", "label": "科技"},
{"content": "众泰大型SUV来袭,售13.98万,2.0T榨出231马力,汉兰达要危险了", "label": "汽车"},
{"content": "股票放量下趺,大资金出逃谁在接盘?", "label": "股票"},
{"content": "广西博白最大的特色是什么?", "label": "旅游"},
{"content": "特朗普退出《伊朗核协议》,对此你怎么看?", "label": "国际"},
{"content": "卖水果利润怎么样?", "label": "农业"},
{"content": "特种兵都是身材高大的猛男么?别再被电视骗了,超过1米8都不合格", "label": "军事"}
]
test_data = [ # 测试数据
{"content": "廖凡重出“江湖”再争影帝 亮相戛纳红毯霸气有型"},
{"content": "《绝地求生: 刺激战场》越玩越卡?竟是手机厂商没交“保护费”!"},
{"content": "买涡轮增压还是自然吸气车?今天终于有答案了!"},
]
#标签映射 将真实标签可以映射为更合适prompt的标签
prompt_label = {
"体育":"体育", "军事":"军事", "农业":"农业", "国际":"国际",
"娱乐":"娱乐", "房产":"房产", "故事":"故事", "教育":"教育",
"文化":"文化", "旅游":"旅游", "汽车":"汽车", "电竞":"电竞",
"科技":"科技", "股票":"股票", "财经":"财经"
}
#不同的prompt会影响模型效果
#prompt = "这一句描述{}的内容如下:"
prompt = "下面是一则关于{}的新闻:"
model = TCBertPipelines(args, model_path=pretrained_model_path, nlabels=len(prompt_label))
if args.train:
model.train(train_data, dev_data, prompt, prompt_label)
result = model.predict(test_data, prompt, prompt_label)
for i, line in enumerate(result):
print({"content":test_data[i]["content"], "label":list(prompt_label.keys())[line]})
if __name__ == "__main__":
main()
| 3,693 | 41.45977 | 94 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/ziya_inference/hf_quantizatin_inference.py | """
这是基于hugging face社区开源的框架accelerate制定的基础量化推理方案
该框架主要实现了int8、int4量化,以及cpu或者disk offload
实现了用低存储,小设备运行大模型
具体可以见wiki:http://wiki.team.idea.edu.cn/pages/viewpage.action?pageId=31464125
"""
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
import bitsandbytes as bnb
from bitsandbytes.nn import Linear8bitLt
import torch
# 量化的方案集成到from_pretrained方法中了
# 如果要量化加载,device_map必须设置
# 量化的参数主要是:load_in_8bit,load_in_4bit (最新的main分支有文档说明,transformer4.29.2还没有4bit)
# 更多参考文档:https://huggingface.co/docs/accelerate/usage_guides/big_modeling
def load_model_source(model_path, load_in_8bit=True):
if load_in_8bit:
lm = AutoModelForCausalLM.from_pretrained(model_path, device_map='auto', load_in_8bit=load_in_8bit).eval()
else:
lm = AutoModelForCausalLM.from_pretrained(model_path,device_map='auto',torch_dtype=torch.float16).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path)
# 查看加载后的模型,所占内存
print(f'模型所占显存: {lm.get_memory_footprint()/1024/1024/1024} GB')
# 查看模型的分布
print('模型在设备上的分布:\n', lm.hf_device_map)
return lm, tokenizer
def decode_speed_test(lm, tokenizer, batch_size=1, generate_lenght=100, test_round=5):
"""
测试推理速度
"""
st = time.time()
text = ['中国的首都是'] * batch_size
input_ids = tokenizer(text, return_tensors='pt').input_ids.to(0)
for _ in range(test_round):
out = lm.generate(input_ids, max_new_tokens=generate_lenght)
time_cost = time.time()-st
total_token_gen = batch_size*generate_lenght*test_round
token_gen_speed = total_token_gen/time_cost
per_token_time_cost = time_cost/total_token_gen*1000
info = f"""
bs:{batch_size} max_new_tokes:{generate_lenght} test_round:{test_round}
generate total token: {total_token_gen} sec
speed: {token_gen_speed:.2f} token/sec
token_time_cost: {per_token_time_cost:.2f} ms
"""
print(info)
return out, info
def generate(text, max_new_tokens=128, do_sample=True, top_p=0.9, return_n=5):
text = f'<human>:{text.strip()}\n<bot>:'
input_ids = tokenizer(text, return_tensors='pt').input_ids.to(0)
out = lm.generate(input_ids,
max_new_tokens=max_new_tokens,
do_sample=do_sample,
top_p=top_p,
num_return_sequences=return_n)
seq = tokenizer.batch_decode(out)
return out, seq
if __name__ == '__main__':
model_path = '/cognitive_comp/common_data/Huggingface-Models/IDEA-CCNL/Ziya-LLaMA-13B-RLHF-V1'
lm, tokenizer = load_model_source(model_path)
# _, _ = decode_speed_test(lm, tokenizer)
_,seq = generate('中国的首都是哪里?') | 2,638 | 35.652778 | 114 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/qa_t5/finetune_t5_cmrc.py | # -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : finetune_t5_cmrc.py
@Time : 2022/10/28 19:57
@Author : He Junqing
@Version : 1.0
@Contact : hejunqing@idea.edu.cn
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
# here put the import lib
import pytorch_lightning as pl
import os
import sys
import time
import torch
import argparse
from collections import Counter
from fengshen.utils.utils import chinese_char_tokenize
from fengshen.data.universal_datamodule import UniversalDataModule
from pytorch_lightning import Trainer, loggers
from pytorch_lightning.callbacks import LearningRateMonitor
from transformers import MT5ForConditionalGeneration, T5Tokenizer, MT5Config
from torchmetrics.text.rouge import ROUGEScore
from nltk.translate.bleu_score import corpus_bleu
torch.cuda.empty_cache()
class QAFinetuneModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group("BaseModel")
parser.add_argument("--prediction_res_path", default=None, type=str)
parser.add_argument(
"--decode_strategy",
default="greedy",
choices=["beamsearch", "sampling", "greedy"],
)
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
self.formator = args.formator
self.max_target_length = args.max_target_length
self.decode_strategy = args.decode_strategy
self.rouge_metric = ROUGEScore(
rouge_keys=("rougeL", "rouge1", "rouge2"), normalizer=lambda x: x
)
self.loss_func = torch.nn.CrossEntropyLoss(reduction="none")
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path
)
print("using MT5 model")
if args.tokenizer_type == "t5_tokenizer":
self.tokenizer = T5Tokenizer.from_pretrained(args.pretrained_model_path)
print("vocab_size:", len(self.tokenizer))
# self.tokenizer.add_special_tokens(special_token_dict)
# print('add special tokens to tokenizer,vocab size:',len(self.tokenizer))
else:
print("now only the t5_tokenizer is supported")
self.bleu_val = []
def setup(self, stage=None) -> None:
if stage == "fit":
train_loader = (
self.trainer._data_connector._train_dataloader_source.dataloader()
)
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches * float(
self.trainer.max_epochs
)
self.total_steps = (
len(train_loader.dataset) * self.trainer.max_epochs // tb_size
) // ab_size
else:
self.total_steps = (
self.trainer.max_steps // self.trainer.accumulate_grad_batches
)
print("Total steps: {}".format(self.total_steps))
# return super().setup(stage)
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
def on_save_checkpoint(self, checkpoint) -> None:
# Save the current loop info in the mid of epoch
# if you lightning <= 1.6.0 uncomment the line below
# checkpoint['loops'] = self.trainer.checkpoint_connector._get_loops_state_dict()
if (
self.trainer.global_rank == 0
and self.trainer.global_step % self.hparams.every_n_train_steps == 0
):
self.model.save_pretrained(
os.path.join(
self.trainer.checkpoint_callback.dirpath,
"hf_pretrained_epoch{}_step{}".format(
self.trainer.current_epoch, self.trainer.global_step
),
)
)
def on_load_checkpoint(self, checkpoint) -> None:
global_step_offset = checkpoint["global_step"]
if "global_samples" in checkpoint:
self.consumed_samples = checkpoint["global_samples"]
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
def training_step(self, batch, batch_idx): # todo: change
if self.formator == "t5style":
output = self.model(
input_ids=batch["input_ids"],
labels=batch["labels"],
decoder_input_ids=batch["decoder_input_ids"],
)
else:
output = self.model(
input_ids=batch["input_ids"],
input_token_type=batch["token_types"],
labels=batch["labels"],
decoder_input_ids=batch["decoder_input_ids"],
)
# print(output.logits)
acc = self.comput_metrix(output.logits, batch["labels"])
grad = get_gradient_norm(self.model)
self.log("train_loss", output.loss, sync_dist=True)
self.log("train_acc", acc, sync_dist=True)
self.log("train_grad", grad, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx):
output = self.model(
input_ids=batch["input_ids"],
labels=batch["labels"],
)
pred_ids = self.model.generate(
input_ids=batch["input_ids"], max_new_tokens=self.max_target_length
)
acc = self.comput_metrix(output.logits, batch["labels"])
# print(output.logits.shape)
self.log("val_loss", output.loss, sync_dist=True)
self.log("val_acc", acc, sync_dist=True)
batch_labels = torch.where(
batch["labels"] != -100, batch["labels"], self.tokenizer.pad_token_id
)
ppl = torch.exp(output.loss)
self.log("val_ppl", ppl, sync_dist=True)
pred_tokens = self.tokenizer.batch_decode(
pred_ids, cleanup_tokenization_space=True, skip_special_tokens=True
)
label_tokens = self.tokenizer.batch_decode(
batch_labels, cleanup_tokenization_space=True, skip_special_tokens=True
)
pred_sentences = list(map(remove_pad, pred_tokens))
# print(label_tokens)
self.bleu_val.append(compute_bleu(pred_sentences, [[t] for t in label_tokens]))
candidate = [
chinese_char_tokenize(p).lstrip("<extra_id_0>") for p in pred_tokens
]
target = [
generate_sentence(chinese_char_tokenize(sent)).lstrip("<extra_id_0>")
for sent in label_tokens
]
self.rouge_metric.update(preds=candidate, target=target)
f1 = compute_f1(candidate, label_tokens)
self.log("val_f1", f1, sync_dist=True)
def on_validation_epoch_end(self) -> None:
n = len(self.bleu_val)
avg_bleu = float(sum(self.bleu_val)) / n
print("bleu:", avg_bleu)
self.log("val_bleu", avg_bleu)
self.bleu_val = []
rouge_dict = self.rouge_metric.compute()
# reset the metric after once validation
self.rouge_metric.reset()
for k, v in rouge_dict.items():
self.log("val_{}".format(k), v, sync_dist=True)
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
print("rouge:\n", rouge_dict)
return
def predict_step(self, batch, batch_idx):
num_beams = 1
do_sample = False
top_p = None
if self.decode_strategy == "beamsearch":
num_beams = 10
elif self.decode_strategy == "sampling":
num_beams = 4
top_p = 0.9
do_sample = True
prediction_dic = self.model.generate(
input_ids=batch["input_ids"],
max_new_tokens=self.max_target_length,
num_beams=num_beams,
do_sample=do_sample,
top_p=top_p,
no_repeat_ngram_size=3,
return_dict_in_generate=True,
output_scores=True,
)
output = self.model(
input_ids=batch["input_ids"],
labels=batch["labels"],
)
prediction_ids = prediction_dic["sequences"]
loss_tensor = self.loss_func(output.logits.transpose(1, 2), batch["labels"])
indexes = torch.where(batch["labels"] == self.tokenizer.eos_token_id)[1]
loss = torch.sum(loss_tensor, dim=1) / indexes
return {
"input_ids": batch["input_ids"],
"predict_ids": prediction_ids,
"labels": batch["labels"],
"decoder_inputs": batch["decoder_input_ids"],
"loss": loss,
}
def save_preditions(self, result, args):
with open(args.prediction_res_path, "w", encoding="utf8") as fw:
preditions = []
labels = []
for batch in result:
print(batch.keys())
batch_labels = torch.where(
batch["labels"] != -100,
batch["labels"],
self.tokenizer.pad_token_id,
)
for i in range(len(batch["input_ids"])):
context = self.tokenizer.decode(
batch["input_ids"][i],
skip_special_tokens=True,
cleanup_tokenization_space=True,
)
pred = self.tokenizer.decode(
batch["predict_ids"][i],
cleanup_tokenization_space=True,
skip_special_tokens=True,
)
target = generate_sentence(
self.tokenizer.batch_decode(
batch_labels[i], cleanup_tokenization_space=True
)
)
pred = pred.lstrip("<extra_id_0>")
target = target.lstrip("<extra_id_0>")
self.rouge_metric.update(
preds=chinese_char_tokenize(pred),
target=chinese_char_tokenize(target),
)
preditions.append(list(pred))
labels.append([list(target)])
fw.write("context:" + "".join(context) + "\n")
fw.write("pred:" + pred + "\n")
fw.write("target" + target + "\n")
fw.write("loss:{:.6f}\n".format(batch["loss"][i].item()))
fw.write("\n")
bleu = compute_bleu(preditions, labels)
fw.write("bleu:{}".format(bleu))
print("finish prediction, saved in {}".format(args.prediction_res_path))
return preditions, labels
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_true = labels.float()
pad_num = torch.sum(torch.eq(labels, -100))
corr = torch.eq(y_pred, y_true)
acc = (torch.sum(corr.float()) - pad_num) / (
y_true.view(size=(-1,)).shape[0] - pad_num
)
return acc
class PredictDataModule(UniversalDataModule):
def predict_dataloader(self):
return self.test_dataloader()
def main():
total_parser = argparse.ArgumentParser("Finetune Dialogue model.")
total_parser.add_argument("--do_eval_only", action="store_true", default=False)
total_parser.add_argument("--pretrained_model_path", default=None, type=str)
total_parser.add_argument("--new_vocab_path", default=None, type=str)
total_parser.add_argument(
"--tokenizer_type",
default="t5_tokenizer",
choices=["t5_tokenizer", "bert_tokenizer"],
)
total_parser.add_argument("--train_split_size", default=0.995, type=int)
total_parser.add_argument("--preprocessing_num_workers", default="10", type=int)
total_parser.add_argument("--ckpt_path", default=None, type=str)
total_parser.add_argument("--use_cache", default=False, type=bool)
total_parser.add_argument(
"--formator", default="dialog", choices=["dialog", "ccqa", "t5style"]
)
sys.path.append("../../../")
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from qa_dataset import T5StyleDataset, TextGenCollator
total_parser = T5StyleDataset.add_data_specific_args(total_parser)
total_parser = UniversalDataModule.add_data_specific_args(
total_parser
) # TaskDataModel
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = QAFinetuneModel.add_model_specific_args(
total_parser
) # todo: check names
args = total_parser.parse_args()
print("Argument parse success.")
print("superviseT5DataModel load start {}".format(get_time_str()))
config = MT5Config.from_pretrained(args.pretrained_model_path)
collate_fn = TextGenCollator(
config=config,
pad_token_id=config.pad_token_id,
decoder_start_token_id=config.decoder_start_token_id,
formator=args.formator)
if not args.do_eval_only:
datasets = {'train': T5StyleDataset(args.train_file, args, load_data_type=0, data="train"),
'validation': T5StyleDataset(args.val_file, args, load_data_type=0, data="dev")}
model = QAFinetuneModel(args)
print("superviseT5DataModel load end {}".format(get_time_str()))
data_model = UniversalDataModule(
tokenizer=None, args=args, collate_fn=collate_fn, datasets=datasets
)
print('data loaded')
checkpoint_callback = UniversalCheckpoint(args)
lr_monitor = LearningRateMonitor(logging_interval="step")
logger = loggers.TensorBoardLogger(
save_dir=os.path.join(args.default_root_dir, "logs/") # TOCHANGE
)
trainer = Trainer.from_argparse_args(
args, logger=logger, callbacks=[checkpoint_callback, lr_monitor]
)
trainer.fit(model, data_model)
else:
datasets = {'test': T5StyleDataset(args.test_file, args, load_data_type=0, data="test")}
data_model = PredictDataModule(
tokenizer=None, args=args, collate_fn=collate_fn, datasets=datasets
)
tokenizer = T5Tokenizer.from_pretrained(args.pretrained_model_path)
model = QAFinetuneModel(args=args)
trainer = Trainer.from_argparse_args(args)
result = trainer.predict(model, data_model, ckpt_path=args.ckpt_path)
predictions, labels = model.save_preditions(result, args)
sample = result[0] # first_batch
batch_labels = torch.where(
sample["labels"] != -100, sample["labels"], model.tokenizer.pad_token_id
)
for i in range(4):
print(tokenizer.batch_decode(sample["input_ids"][i]))
print(tokenizer.batch_decode(sample["predict_ids"][i]))
print(tokenizer.batch_decode(batch_labels[i]))
def compute_f1(cand, ref):
f1_score = []
for p, t in zip(cand, ref):
p_tokens = p.split()
t_tokens = t.split()
common = Counter() & Counter(t.split())
num_same = sum(common.values())
if len(t_tokens) == 0 or len(p_tokens) == 0:
f1 = int(p == t)
elif num_same == 0:
f1 = 0
else:
precision = 1.0 * num_same / len(p_tokens)
recall = 1.0 * num_same / len(t_tokens)
f1 = (2 * precision * recall) / (precision + recall + 1e-8)
f1_score.append(f1)
f1 = sum(f1_score) / float(len(cand))
return f1
def generate_sentence(raw_list):
words = []
i = 0
while i < len(raw_list) and raw_list[i] != "</s>":
words.append(raw_list[i])
i += 1
return "".join(words)
def remove_pad(raw_text, ref=False):
if ref:
return [raw_text.lstrip("<pad>")]
else:
return raw_text.lstrip("<pad>")
def compute_bleu(preditions, labels):
score_nltk = corpus_bleu(labels, preditions)
return score_nltk
def get_gradient_norm(model):
total_norm = 0
parameters = [
p for p in model.parameters() if p.grad is not None and p.requires_grad
]
for p in parameters:
param_norm = p.grad.detach().data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm**0.5
return total_norm
def get_time_str():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if __name__ == "__main__":
main()
| 17,183 | 37.101996 | 100 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/qa_t5/qa_dataset.py | # -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : qa_dataset.py
@Time : 2022/10/28 19:57
@Author : He Junqing
@Version : 1.0
@Contact : hejunqing@idea.edu.cn
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
# here put the import lib
from dataclasses import dataclass
import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence
from fengshen.data.t5_dataloader.t5_gen_datasets import DialogDataset
class T5StyleDataset(DialogDataset):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group("Dataset")
parser.add_argument("--max_seq_length", default=512, type=int)
parser.add_argument("--max_knowledge_length", default=128, type=int)
parser.add_argument("--max_target_length", default=128, type=int)
return parent_args
def regular_tokenize(self, sample):
"""
sample.keys:question:str,context:stc, answer:[],idx:int,ans_span:[]
"""
plain_text = (
"question:"
+ sample["question"]
+ "knowledge:"
+ sample["context"][: self.max_knowledge_length]
)
l_text = len(plain_text)
ctx_len = self.max_seq_length - l_text - 1
if ctx_len > 0 and "history" in sample:
context = "[SEP]".join(sample["history"])
plain_text += "context:" + context
res_prefix = self.tokenizer.encode("answer:", add_special_tokens=False)
# res_prefix.tolist()
l_rp = len(res_prefix)
tokenized = self.tokenizer.encode(
plain_text,
add_special_tokens=False,
truncation=True,
max_length=self.max_seq_length - 2 - l_rp,
)
# tokenized.tolist()
tokenized += res_prefix
# add maskid
mask_id = self.tokenizer.convert_tokens_to_ids("<extra_id_0>")
tokenized.append(mask_id)
tokenized.append(self.eos_token_id)
# print(tokenized)
target_ids = self.tokenizer.encode(
"<extra_id_0>" + sample["answer"][0],
add_special_tokens=True,
truncation=True,
max_length=self.max_target_length,
)
# print(target_ids)
tokenized_sample = {}
tokenized_sample["input_ids"] = np.array(tokenized, dtype=np.int32)
tokenized_sample["attention_mask"] = np.ones(len(tokenized), dtype=np.int8)
tokenized_sample["labels"] = np.array(target_ids, dtype=np.int32)
tokenized_sample["idx"] = sample["idx"]
# print(tokenized_sample)
return tokenized_sample
@dataclass
class TextGenCollator:
'''
'''
config: None
pad_token_id: -100
decoder_start_token_id: 0
formator: str = 't5style'
def setup(self):
pass
def __call__(self, samples):
batch = {
k: [
torch.tensor(samples[i][k], dtype=torch.int64)
for i in range(len(samples))
]
for k in ["input_ids", "attention_mask", "labels"]
}
batch["idx"] = torch.tensor([samples[i]["idx"] for i in range(len(samples))])
# print(batch)
for k, v in batch.items():
if k != "labels" and k != "idx":
batch[k] = pad_sequence(
v, batch_first=True, padding_value=self.pad_token_id
)
elif k == "labels":
batch[k] = pad_sequence(v, batch_first=True, padding_value=-100)
batch["decoder_input_ids"] = torch.tensor(
self.shift_tokens_right(
batch["labels"], self.pad_token_id, self.decoder_start_token_id
),
dtype=torch.long,
)
return batch
def shift_tokens_right(
self, input_ids: np.array, pad_token_id: int, decoder_start_token_id: int
) -> np.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(
shifted_input_ids == -100, pad_token_id, shifted_input_ids
)
return shifted_input_ids
if __name__ == "__main__":
# test
import argparse
total_parser = argparse.ArgumentParser("DATASET parser")
total_parser.add_argument(
"--tokenizer_type",
default="t5_tokenizer",
choices=["bert_tokenizer", "t5_tokenizer"],
)
total_parser.add_argument("--preprocessing_num_workers", default="4", type=int)
total_parser.add_argument(
"--new_vocab_path",
default=None,
type=str,
)
total_parser.add_argument(
"--pretrained_model_path",
default="YOUR DOWNLOAD MODEL PATH",
)
total_parser.add_argument("--train_split_size", default=0.995, type=int)
total_parser.add_argument(
"--formator", default="t5style", choices=["t5style", "squad", "dialog"]
)
total_parser = TextGenCollator.add_data_specific_args(total_parser)
args = total_parser.parse_args()
args.train_data_path = "cmrc"
ds = T5StyleDataset("cmrc", args, "dev")
print(len(ds))
for i in range(10):
print(ds[i])
dl = TextGenCollator(args)
for i in range(5):
for batch in dl.val_dataloader():
print(batch)
print(batch["input_ids"])
print(batch["no_answer"])
print(batch["decoder_input_ids"])
print(batch["labels"])
| 6,086 | 31.37766 | 96 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/hubert/pretrain_hubert.py | import fengshen.data.hubert.hubert_dataset as datasets
from fengshen.data.universal_datamodule import UniversalDataModule
from transformers import HubertConfig, HubertModel
# from transformers.models.hubert.modeling_hubert import _compute_mask_indices
import argparse
from fairseq.data import Dictionary
from pytorch_lightning import (
LightningModule,
Trainer,
loggers,
)
from pytorch_lightning.callbacks import LearningRateMonitor
import torch
import os
import torch.nn.functional as F
import torch.nn as nn
class LabelEncoder(object):
def __init__(self, dictionary: Dictionary):
self.dictionary = dictionary
def __call__(self, label: str):
return self.dictionary.encode_line(
label,
append_eos=False,
add_if_not_exist=False,
)
class HubertPretrainDataLoader():
def __init__(self, args):
self.cfg = args
self.dictionaries = self.load_dictionaries()
self.load_datasets = {}
# TODO 改成HuggingFace Tokenizer
def load_dictionaries(self):
label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir
dictionaries = [
Dictionary.load(f"{label_dir}/dict.{label}.txt")
for label in self.cfg.labels
]
return dictionaries
def get_label_dir(self):
if self.cfg.label_dir is None:
return self.cfg.data
return self.cfg.label_dir
@property
def datasets(self):
return self.load_datasets
def load_dataset(self, split: str, **kwargs):
manifest = f"{self.cfg.data}/{split}.tsv"
dicts = self.dictionaries
pad_list = [dict.pad() for dict in dicts]
eos_list = [dict.eos() for dict in dicts]
procs = [LabelEncoder(dict) for dict in dicts]
paths = [f"{self.get_label_dir()}/{split}.{lb}" for lb in self.cfg.labels]
# hubert v1: pad_audio=True, random_crop=False;
self.load_datasets[split] = datasets.HubertDataset(
manifest,
sample_rate=self.cfg.sample_rate,
label_paths=paths,
label_rates=self.cfg.label_rate,
pad_list=pad_list,
eos_list=eos_list,
label_processors=procs,
max_keep_sample_size=self.cfg.max_keep_size,
min_keep_sample_size=self.cfg.min_sample_size,
max_sample_size=self.cfg.max_sample_size,
pad_audio=self.cfg.pad_audio,
normalize=self.cfg.normalize,
store_labels=False,
random_crop=self.cfg.random_crop,
single_target=self.cfg.single_target,
)
def perpare_data(args):
loader = HubertPretrainDataLoader(args)
loader.load_dataset('train')
loader.load_dataset('valid')
return loader
class HubertLightning(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('HuBert Lightning')
parser.add_argument('--pred_masked_weight', type=float, default=1.0)
parser.add_argument('--logit_temp', type=float, default=1.0)
parser.add_argument('--loss_weights', type=float, nargs='+')
# parser.add_argument('--mask_prob', type=float, default=0.65)
# parser.add_argument('--mask_length', type=int, default=10)
# parser.add_argument('--mask_selection', type=str, default='static',
# choice=["static", "uniform", "normal", "poisson"])
# parser.add_argument('--mask_other', type=float, default=0)
# parser.add_argument('--no_mask_overlap', type=bool, default=False)
# parser.add_argument('--mask_min_space', type=int, default=1)
return parent_parser
def __init__(self, args, loader, ** kwargs) -> None:
super().__init__()
self.save_hyperparameters(args)
config = HubertConfig.from_pretrained(args.model_path)
self.config = config
self.model = HubertModel(config=config)
self.num_classes = [len(d) for d in loader.dictionaries]
self.label_embs_concat = nn.Parameter(
torch.FloatTensor(sum(self.num_classes), self.config.conv_dim[-1] // 2)
)
self.final_proj = nn.Linear(
self.config.hidden_size, self.config.conv_dim[-1] // 2 * len(loader.dictionaries)
)
nn.init.uniform_(self.label_embs_concat)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
def compute_nce(self, x, pos, negs):
neg_is_pos = (pos == negs).all(-1)
pos = pos.unsqueeze(0)
targets = torch.cat([pos, negs], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
logits /= self.hparams.logit_temp
if neg_is_pos.any():
logits[1:][neg_is_pos] = float("-inf")
logits = logits.transpose(0, 1) # (num_x, num_cls+1)
return logits
def forward(self, **batch):
target_list = batch['target_list']
padding_mask = batch['net_input']['padding_mask']
input_values = batch['net_input']['source']
output = self.model(input_values=input_values,
attention_mask=padding_mask,
target_list=target_list,
mask_time_indices=None,
return_dict=False)
def compute_pred(proj_x, target, label_embs):
# compute logits for the i-th label set
y = torch.index_select(label_embs, 0, target.long())
negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1)
# proj_x: (S, D)
# y: (S, D)
# negs: (Neg, S, D)
return self.compute_nce(proj_x, y, negs)
label_embs_list = self.label_embs_concat.split(self.num_classes, 0)
x, extra_losses, target_list, mask_indices, padding_mask = output[
0], output[-4], output[-3], output[-2], output[-1]
masked_indices = torch.logical_and(~padding_mask, mask_indices)
proj_x_m = self.final_proj(x[masked_indices])
proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1)
logp_m_list = [
compute_pred(proj_x_m, t[masked_indices], label_embs_list[i])
for i, (proj_x_m, t) in enumerate(zip(proj_x_m_list, target_list))
]
targ_m_list = [x.new_zeros(x.size(0), dtype=torch.long) for x in logp_m_list]
loss = 0.0
loss_m_list = []
for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)):
loss_m = F.cross_entropy(logp_m, targ_m)
loss_m_list.append(loss_m)
self.log(f"loss_m_{i}", loss_m.detach().item())
loss += self.hparams.pred_masked_weight * sum(loss_m_list)
loss_weights = self.hparams.loss_weights
if loss_weights is not None:
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
names = ['extra']
if len(loss_weights) == 1 and len(extra_losses) != 1:
loss_weights = [loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(
loss_weights
), f"{len(extra_losses)}, {len(loss_weights)}"
for p, n, coef in zip(extra_losses, names, loss_weights):
if coef != 0 and p is not None:
p = coef * p.float()
loss += p
self.log(f"loss_{n}", p.item())
return {'loss': loss}
def training_step(self, batch, batch_idx):
output = self(**batch)
self.log('train_loss', output['loss'])
return output
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float()) / y_true.size()[0]
return acc
def validation_step(self, batch, batch_idx):
output = self(**batch)
# self.log('val_loss', output.loss, sync_dist=True)
# acc = self.comput_metrix(output.logits, batch['labels'])
# self.log('val_acc', acc, sync_dist=True)
return output
def on_save_checkpoint(self, checkpoint) -> None:
# Save the current loop info in the mid of epoch
# if you lightning <= 1.6.0 uncomment the line below
# checkpoint['loops'] = self.trainer.checkpoint_connector._get_loops_state_dict()
if self.trainer.global_rank == 0:
self.model.save_pretrained(os.path.join(
self.trainer.checkpoint_callback.dirpath,
'hf_pretrained_epoch{}_step{}'.format(self.trainer.current_epoch, self.trainer.global_step)))
def on_load_checkpoint(self, checkpoint) -> None:
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
from fengshen.utils import UniversalCheckpoint
from fengshen.models.model_utils import add_module_args
args_parser = add_module_args(args_parser)
args_parser = datasets.add_data_specific_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = HubertLightning.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args_parser.add_argument('--ckpt_path', type=str, )
args = args_parser.parse_args()
data_module = UniversalDataModule(args=args, tokenizer=None, collate_fn=None)
data_loader = perpare_data(args)
data_module.datasets = data_loader.datasets
module = HubertLightning(args, loader=data_loader)
lr_monitor = LearningRateMonitor(logging_interval='step')
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'logs/'),
name=os.path.basename(os.path.dirname(args.model_path)))
checkpoint_callback = UniversalCheckpoint(args).callbacks
if args.ckpt_path is not None and \
not os.path.exists(args.ckpt_path):
print('--------warning no checkpoint found--------, remove args')
args.ckpt_path = None
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[
lr_monitor,
checkpoint_callback])
trainer.fit(module, data_module, ckpt_path=args.ckpt_path)
| 11,643 | 39.430556 | 109 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/pretrain_erlangshen_bert/pretrain_erlangshen.py | from dataclasses import dataclass
from transformers import (
MegatronBertConfig,
MegatronBertForPreTraining,
AutoTokenizer,
)
from pytorch_lightning import (
LightningModule,
Trainer,
)
from pytorch_lightning.callbacks import (
LearningRateMonitor,
)
import argparse
import torch
import os
import numpy as np
import time
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.data.data_utils.sop_utils import get_a_and_b_segments
from fengshen.data.data_utils.truncate_utils import truncate_segments
from fengshen.data.data_utils.token_type_utils import create_tokens_and_tokentypes
from fengshen.data.data_utils.mask_utils import create_masked_lm_predictions
from fengshen.models.model_utils import (
add_module_args,
configure_optimizers,
get_total_steps,
)
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from torch.utils.data._utils.collate import default_collate
SHOW_DATA = False
@dataclass
class ErLangShenCollator:
'''
由input处理成samples,也就是最终模型的输入
其中主要处理逻辑在__call__里
包含Mask和Sop任务
'''
tokenizer: None # 分词
max_seq_length: 512
masked_lm_prob: 0.15
content_key: str = 'text'
# 一些预处理操作
def setup(self):
from fengshen.data.data_utils.sentence_split import ChineseSentenceSplitter
self.sentence_split = ChineseSentenceSplitter()
self.np_rng = np.random.RandomState(seed=((int(time.time()) % 2**32)))
inv_vocab = {v: k for k, v in self.tokenizer.vocab.items()}
self.vocab_id_list = list(inv_vocab.keys())
self.vocab_id_to_token_dict = inv_vocab
def __call__(self, samples):
'''
samples: 一个sample长这样{"text": "hello world"}
'''
model_inputs = []
for s in samples:
sentences = self.sentence_split.tokenize(s[self.content_key])
# Divide sample into two segments (A and B).
tokenized_sentences = [self.tokenizer.convert_tokens_to_ids(
self.tokenizer.tokenize(sent)) for sent in sentences]
if len(tokenized_sentences) == 0:
print('find empty sentence')
continue
if len(tokenized_sentences) > 1:
tokens_a, tokens_b, is_next_random = get_a_and_b_segments(tokenized_sentences,
self.np_rng)
else:
tokens_a = tokenized_sentences[0]
tokens_b = []
is_next_random = False
# max_seq_length - 3因为还需要拼上[CLS] [SEP] [SEP]
if len(tokens_a) == 0:
continue
_ = truncate_segments(tokens_a, tokens_b, len(tokens_a),
len(tokens_b), self.max_seq_length-3, self.np_rng)
# Build tokens and toketypes.
tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b,
self.tokenizer.cls_token_id, self.tokenizer.sep_token_id)
# Masking.
max_predictions_per_seq = self.masked_lm_prob * len(tokens)
(tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions(
tokens, self.vocab_id_list, self.vocab_id_to_token_dict, self.masked_lm_prob,
self.tokenizer.cls_token_id, self.tokenizer.sep_token_id, self.tokenizer.mask_token_id,
max_predictions_per_seq, self.np_rng,
masking_style='bert')
# Some checks.
num_tokens = len(tokens)
padding_length = self.max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [self.tokenizer.pad_token_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length,
dtype=np.int64)
# Lables and loss mask.
labels = [-100] * self.max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
labels_np = np.array(labels, dtype=np.int64)
model_inputs.append(
{
'input_ids': tokens_np,
'attention_mask': padding_mask_np,
'token_type_ids': tokentypes_np,
'labels': labels_np,
'next_sentence_label': int(is_next_random)
}
)
return default_collate(model_inputs)
class ErLangShenBert(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('Erlangshen Bert')
parser.add_argument('--masked_lm_prob', type=float, default=0.15)
parser.add_argument('--max_seq_length', type=int, default=512)
parser.add_argument('--sample_content_key', type=str, default='text')
return parent_parser
def __init__(self, args, tokenizer, **kwargs) -> None:
super().__init__()
self.save_hyperparameters(args)
config = MegatronBertConfig.from_pretrained(args.model_path)
self.config = config
self.tokenizer = tokenizer
self.model = MegatronBertForPreTraining(config)
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
return configure_optimizers(self)
def forward(self, **batch):
return self.model(**batch)
def detokenize(self, token_ids):
toks = self.tokenizer.convert_ids_to_tokens(token_ids)
return self.tokenizer.convert_tokens_to_string(toks)
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.shape[0]
return acc
def training_step(self, batch, batch_idx):
if self.trainer.global_rank == 0:
global SHOW_DATA
if not SHOW_DATA:
print(self.config)
print(self.model)
SHOW_DATA = True
print('source: {}'.format(batch['input_ids'][0]))
print('target: {}'.format(batch['labels'][0]))
print('source: {}'.format(self.detokenize(batch['input_ids'][0])))
label_idx = batch['labels'][0] != -100
print('target: {}'.format(self.detokenize(
batch['labels'][0][label_idx])))
output = self(**batch)
self.log('train_loss', output.loss, sync_dist=True)
label_idx = batch['labels'] != -100
acc = self.comput_metrix(
output.prediction_logits[label_idx].view(-1, output.prediction_logits.size(-1)), batch['labels'][label_idx])
self.log('train_acc', acc, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx):
output = self(**batch)
self.log('val_loss', output.loss, sync_dist=True)
return output.loss
def on_load_checkpoint(self, checkpoint) -> None:
# 兼容低版本lightning,低版本lightning从ckpt起来时steps数会被重置为0
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = ErLangShenBert.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(args.model_path)
collate_fn = ErLangShenCollator(
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
masked_lm_prob=args.masked_lm_prob,
content_key=args.sample_content_key,
)
collate_fn.setup()
data_module = UniversalDataModule(tokenizer=tokenizer, args=args, collate_fn=collate_fn)
print('data load complete')
model = ErLangShenBert(args, tokenizer=tokenizer)
print('model load complete')
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callback = UniversalCheckpoint(args)
# 做兼容,如果目录不存在的话把这个参数去掉,不然会报错
if args.load_ckpt_path is not None and \
not os.path.exists(args.load_ckpt_path):
print('--------warning no checkpoint found--------, remove args')
args.load_ckpt_path = None
trainer = Trainer.from_argparse_args(args,
callbacks=[
lr_monitor,
checkpoint_callback])
trainer.fit(model, data_module, ckpt_path=args.load_ckpt_path)
| 9,575 | 39.235294 | 120 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/wenzhong_qa/finetune_medicalQA.py | from transformers import GPT2LMHeadModel
from data.task_dataloader.medicalQADataset import GPT2QADataModel
from transformers.optimization import get_linear_schedule_with_warmup
from pytorch_lightning import Trainer, loggers
from pytorch_lightning.callbacks import ModelCheckpoint
import pytorch_lightning as pl
import argparse
import torch
import os
import sys
sys.path.insert(0, '/cognitive_comp/wuziwei/codes/fengshen/fengshen')
# sys.path.append('../../')
# sys.path.append('../')
# os.environ["CUDA_VISIBLE_DEVICES"] = '4,5,6,7'
class GPT2FinetuneMedicalQAModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./ckpt/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_last', action='store_true', default=True)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=1000, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
# every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename,
save_last=args.save_last)
class GPT2FinetuneMedicalQA(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--learning_rate', default=1e-4, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup', default=0.01, type=float)
return parent_args
def __init__(self, args, num_data):
super().__init__()
self.args = args
self.num_data = num_data
print('num_data:', num_data)
self.model = GPT2LMHeadModel.from_pretrained(
args.pretrained_model_path)
def setup(self, stage) -> None:
if stage == 'fit':
num_gpus = self.trainer.gpus if self.trainer.gpus is not None else 0
self.total_step = int(self.trainer.max_epochs * self.num_data /
(max(1, num_gpus) * self.trainer.accumulate_grad_batches))
print('Total training step:', self.total_step)
def training_step(self, batch, batch_idx):
output = self.model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'], labels=batch['labels'])
# output = self.model(input_ids=batch['input_ids'], labels=batch['labels'])
# acc = self.comput_metrix(output.logits, batch['labels'])
self.log('train_loss', output.loss)
return output.loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
output = self.model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'], labels=batch['labels'])
# output = self.model(input_ids=batch['input_ids'], labels=batch['labels'])
# acc = self.comput_metrix(output.logits, batch['labels'])
self.log('val_loss', output.loss)
# self.log('val_acc', acc)
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
paras = list(
filter(lambda p: p[1].requires_grad, self.named_parameters()))
paras = [{
'params':
[p for n, p in paras if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay
}, {
'params': [p for n, p in paras if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
optimizer = torch.optim.AdamW(paras, lr=self.args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, int(self.total_step * self.args.warmup),
self.total_step)
return [{
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
}]
def main():
total_parser = argparse.ArgumentParser("Summary Task")
total_parser.add_argument(
'--do_eval_only', action='store_true', default=False)
total_parser.add_argument(
'--pretrained_model_path', default=None, type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = GPT2QADataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = GPT2FinetuneMedicalQAModelCheckpoint.add_argparse_args(
total_parser)
total_parser = GPT2FinetuneMedicalQA.add_model_specific_args(total_parser)
# * Args for base model
args = total_parser.parse_args()
data_model = GPT2QADataModel(args)
if not args.do_eval_only:
model = GPT2FinetuneMedicalQA(args, len(data_model.train_dataloader()))
checkpoint_callback = GPT2FinetuneMedicalQAModelCheckpoint(
args).callbacks
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'log/'), name='MedicalQA-GPT2')
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[checkpoint_callback]
)
trainer.fit(model, data_model)
# result = trainer.predict(model, data_model)
# with open('test_results.txt', 'wt', encoding='utf-8') as w:
# for line in result:
# w.writelines(line)
model.model.save_pretrained(
'/cognitive_comp/wuziwei/pretrained_model_hf')
else:
print('save to hf.....')
trainer = Trainer.from_argparse_args(args)
model = GPT2FinetuneMedicalQA(
args, len(data_model.predict_dataloader()))
result = trainer.predict(
model, data_model, ckpt_path='/cognitive_comp/wuziwei/task/fs_medical_qa_finetune/ckpt/last.ckpt')
# with open('test_results.txt','wt',encoding='utf-8') as w:
# for line in result:
# w.writelines(line)
model.model.save_pretrained(
'/cognitive_comp/wuziwei/pretrained_model_hf')
if __name__ == '__main__':
main()
| 7,423 | 40.943503 | 110 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/wenzhong_qa/finetune_wenzhong.py | # sys.path.append('./')
import os
import torch
import argparse
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import Trainer, loggers
from transformers.optimization import get_linear_schedule_with_warmup
from transformers import GPT2LMHeadModel
from fengshen.data.task_dataloader.medicalQADataset import GPT2QADataModel
class GPT2FinetuneMedicalQAModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./ckpt/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_last', action='store_true', default=True)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename,
save_last=args.save_last)
class GPT2FinetuneMedicalQA(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--learning_rate', default=1e-4, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup', default=0.01, type=float)
return parent_args
def __init__(self, args, num_data):
super().__init__()
self.args = args
self.num_data = num_data
print('num_data:', num_data)
self.model = GPT2LMHeadModel.from_pretrained(args.pretrained_model_path)
def setup(self, stage) -> None:
if stage == 'fit':
num_gpus = self.trainer.gpus if self.trainer.gpus is not None else 0
self.total_step = int(self.trainer.max_epochs * self.num_data
/ (max(1, num_gpus) * self.trainer.accumulate_grad_batches))
print('Total training step:', self.total_step)
def training_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'], attention_mask=batch['attention_mask'], labels=batch['labels'])
# output = self.model(input_ids=batch['input_ids'], labels=batch['labels'])
# acc = self.comput_metrix(output.logits, batch['labels'])
self.log('train_loss', output.loss)
return output.loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float()) / labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'], attention_mask=batch['attention_mask'], labels=batch['labels'])
# output = self.model(input_ids=batch['input_ids'], labels=batch['labels'])
# acc = self.comput_metrix(output.logits, batch['labels'])
self.log('val_loss', output.loss)
# self.log('val_acc', acc)
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
paras = list(
filter(lambda p: p[1].requires_grad, self.named_parameters()))
paras = [{
'params':
[p for n, p in paras if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay
}, {
'params': [p for n, p in paras if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
optimizer = torch.optim.AdamW(paras, lr=self.args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, int(self.total_step * self.args.warmup),
self.total_step)
return [{
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
}]
def main():
total_parser = argparse.ArgumentParser("QA Task")
total_parser.add_argument('--do_eval_only', action='store_true', default=False)
total_parser.add_argument('--pretrained_model_path', default='google/mt5-small', type=str)
total_parser.add_argument('--output_save_path', default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = GPT2QADataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = GPT2FinetuneMedicalQAModelCheckpoint.add_argparse_args(total_parser)
total_parser = GPT2FinetuneMedicalQA.add_model_specific_args(total_parser)
# * Args for base model
args = total_parser.parse_args()
data_model = GPT2QADataModel(args)
if not args.do_eval_only:
model = GPT2FinetuneMedicalQA(args, len(data_model.train_dataloader()))
checkpoint_callback = GPT2FinetuneMedicalQAModelCheckpoint(args).callbacks
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'log/'), name='WenZhong')
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[checkpoint_callback]
)
trainer.fit(model, data_model)
if __name__ == '__main__':
main()
# test()
'''
# python examples/mt5_summary.py --gpus=1 --test_data=test_public.jsonl
# --default_root_dir=/cognitive_comp/ganruyi/fengshen/mt5_summary/eval
# --do_eval_only
# --resume_from_checkpoint=/cognitive_comp/ganruyi/fengshen/mt5_summary/ckpt/model-epoch=01-train_loss=1.9166.ckpt
# --strategy=ddp
'''
| 6,611 | 41.935065 | 114 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/finetune_bart_qg/utils.py | # -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : utils.py
@Time : 2022/10/28 18:27
@Author : Qi Yang
@Version : 1.0
@Contact : yangqi@idea.edu.cn
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn.functional as F
class LabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self, smoothing=0.1):
super(LabelSmoothingCrossEntropy, self).__init__()
self.smoothing = smoothing
self.ignore_index = -100
def forward(self, x, target):
confidence = 1. - self.smoothing
logprobs = F.log_softmax(x, dim=-1)
targets_ignore = torch.where(target != self.ignore_index, target, 0)
nll_loss = -logprobs.gather(dim=-1, index=targets_ignore.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
def truncate_sequence(document: str, max_num_tokens: int, reverse=False):
total_length = len(document)
if total_length <= max_num_tokens:
return document
else:
if reverse:
return document[-1*max_num_tokens:]
else:
return document[:max_num_tokens]
def padding_to_maxlength(ids, max_length, pad_id):
cur_len = len(ids)
len_diff = max_length - len(ids)
return ids + [pad_id] * len_diff, [1] * cur_len + [0] * len_diff
def white_space_fix(text):
return "".join(text.split(" "))
def remove_prompt(text):
if ":" in text:
return text.split(":")[1]
return text
| 2,208 | 30.112676 | 96 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/finetune_bart_qg/finetune_bart.py | # -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : finetune_bart.py
@Time : 2022/10/28 18:23
@Author : Qi Yang
@Version : 1.0
@Contact : yangqi@idea.edu.cn
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
from fengshen.models.model_utils import configure_optimizers
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.utils import chinese_char_tokenize
from utils import truncate_sequence, white_space_fix
from utils import LabelSmoothingCrossEntropy
import sys
import os
import torch
import argparse
import pytorch_lightning as pl
from dataclasses import dataclass
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor
from transformers import BartForConditionalGeneration
from transformers import BertTokenizer, AutoTokenizer
from torchmetrics.text.rouge import ROUGEScore
sys.path.append('../../../')
@dataclass
class QGT5Collator:
@ staticmethod
def add_data_specific_args(parent_args):
# the hyperparameters should be determined according to the max length of context in dataset
parser = parent_args.add_argument_group('BART DIalo Collator')
parser.add_argument('--max_seq_length', default=512, type=int)
parser.add_argument('--max_src_length', default=32, type=int)
parser.add_argument('--max_kno_length', default=416, type=int)
parser.add_argument('--max_tgt_length', default=64, type=int)
parser.add_argument('--mask_ans_style',
default='normal',
type=str,
choices=['normal', 'unmask', 'anstoken', 'postag', 'anstoken_multispan', 'postag_multispan', 'normal_multispan'])
return parent_args
def __init__(self, tokenizer, args):
self.args = args
self.tokenizer = tokenizer
self.max_seq_length = args.max_seq_length
self.print_example = True
self.mask_ans_style = args.mask_ans_style
self.do_eval_only = args.do_eval_only
self.tokenizer_type = args.tokenizer_type
def encode(self, x, y):
if self.tokenizer_type == "bert":
x = x
y = y
else:
# t5 sentence piece
x = self.tokenizer.bos_token + x + self.tokenizer.eos_token
y = y + self.tokenizer.eos_token
encoder_input = self.tokenizer.encode_plus(
x,
max_length=self.args.max_kno_length + self.args.max_src_length,
padding="max_length",
truncation=True,
return_tensors='pt'
)
decoder_output = self.tokenizer.encode_plus(
y,
max_length=self.args.max_tgt_length,
padding="max_length",
truncation=True,
return_tensors='pt'
)
return encoder_input, decoder_output
def mask(self, s):
def replace_span(source, target, sptoken):
ans_bos, ans_eos = s["ans_span"][0]
return source[:ans_bos] + sptoken + source[ans_eos:]
def replace_all(source, target, sptoken):
return source.replace(target, sptoken)
if 'multispan' in self.mask_ans_style:
fn = replace_all
else:
fn = replace_span
# unmask: 北京是中国的首都
if 'unmask' in self.mask_ans_style:
return s["context"]
# normal: 北京是 <mask> 的首都
if 'normal' in self.mask_ans_style:
self.anstoken = self.tokenizer.mask_token
masked_context = fn(s["context"], s["answer"][0], self.anstoken)
return masked_context
# anstoken: 北京是 [ANS] 的首都
if 'anstoken' in self.mask_ans_style:
anstoken_dict = {
"bert": "[ANS]",
"bart": "<ans>"
}
self.anstoken = anstoken_dict[self.tokenizer_type]
masked_context = fn(s["context"], s["answer"][0], self.anstoken)
return masked_context
# postag: 北京是 <beg> 中国 <eos> 的首都
if 'postag' in self.mask_ans_style:
begtoken, endtoken = "<beg>", "<eos>"
self.anstoken = begtoken + s["answer"][0] + endtoken
masked_context = fn(s["context"], s["answer"][0], self.anstoken)
return masked_context
return masked_context
def prompt(self, context, answer, question):
pre_prompt, mid_prompt, post_prompt = "知识:", "回答:", "问题:" # prompt
context = truncate_sequence(context, self.args.max_kno_length-len(pre_prompt)-1)
# used in squad-2.0
# noted that src and tgt is reversed in qg
answer = truncate_sequence(answer, self.args.max_src_length - len(mid_prompt)-1)
question = truncate_sequence(question, self.args.max_tgt_length-len(post_prompt)-1)
x_trunc = f'{pre_prompt}{context}{mid_prompt}{answer}'
y_trunc = f'{post_prompt}{question}'
return x_trunc, y_trunc
def __call__(self, samples):
"""
ans_num = 1 适用于 Train 数据只有 1 条 answer 取第一条情况
ans_num > 1 适用于 Dev 数据有多条 answer 情况
Input:
input_ids: input_ids (text + answer)
attn_mask: input attn mask
labels: decoder_ids (question)
"""
input_ids, attn_mask, labels = [], [], []
ans, qes, ctx, ans_spans, idxs, imp = [], [], [], [], [], []
for s in samples:
if self.do_eval_only:
# log origin answer to compare
ans.append(s["answer"])
qes.append(s["question"])
ctx.append(s["context"])
ans_spans.append(s["ans_span"])
idxs.append(s["idx"])
if "is_impossible" in s:
imp.append(s["is_impossible"])
else:
imp.append(False) # SQUAD 1.0 don't have is_impossible
if not s["is_impossible"]: # have ans and ans_span
context = self.mask(s)
answer = s["answer"][0]
question = s["question"]
else: # no ans and ans_span
context = s["context"]
answer = "无答案"
question = s["question"]
x_trunc, y_trunc = self.prompt(context, answer, question)
encoder_input, decoder_output = self.encode(x_trunc, y_trunc)
input_ids.append(encoder_input["input_ids"])
attn_mask.append(encoder_input["attention_mask"])
labels.append(decoder_output["input_ids"])
labels = torch.cat(labels)
if self.tokenizer_type == "bart":
end_token_index = torch.where(labels == self.tokenizer.eos_token_id)[1]
else:
end_token_index = torch.where(labels == self.tokenizer.sep_token_id)[1]
for idx, end_idx in enumerate(end_token_index):
labels[idx][end_idx + 1:] = -100 # cross entropy cal
data = {
'input_ids': torch.cat(input_ids),
'attention_mask': torch.cat(attn_mask),
'labels': labels
}
if self.do_eval_only:
data.update({
'answer': ans,
'question': qes,
'context': ctx,
'ans_span': ans_spans,
'idx': idxs,
'is_impossible': imp
})
if self.print_example:
print(x_trunc)
print(y_trunc)
self.print_example = False
return data
class BARTFinetuneModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--model_path', type=str, default='')
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--min_learning_rate', default=1e-7, type=float)
parser.add_argument('--lr_decay_steps', default=0, type=int)
parser.add_argument('--lr_decay_ratio', default=1.0, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup_steps', default=1000, type=int)
parser.add_argument('--warmup_ratio', default=0.01, type=float)
parser.add_argument('--label_smooth', default=0, type=float)
parser.add_argument('--new_token_path', default="./", type=str) # save new token after add special token
parser.add_argument('--adam_beta1', default=0.9, type=float)
parser.add_argument('--adam_beta2', default=0.999, type=float)
parser.add_argument('--adam_epsilon', default=1e-8, type=float)
parser.add_argument('--scheduler_type', default='polynomial', type=str)
return parent_args
def __init__(self, tokenizer, args):
super().__init__()
self.save_hyperparameters(args)
self.model = BartForConditionalGeneration.from_pretrained(args.model_path)
self.tokenizer = tokenizer
# add special token ans
# self.tokenizer.save_vocabulary(self.args.model_path)
new_vocab = args.model_path+"/sp_vocab/"
if not os.path.exists(new_vocab):
os.makedirs(new_vocab)
self.tokenizer.save_pretrained(new_vocab)
self.model.resize_token_embeddings(len(tokenizer))
self.vocab_size = len(tokenizer)
self.rougescore = ROUGEScore(rouge_keys=('rougeL'), normalizer=lambda x: x)
if self.hparams.label_smooth:
self.loss_fct = LabelSmoothingCrossEntropy(smoothing=0.1)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
return configure_optimizers(self)
def training_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'])
loss = output.loss
if self.hparams.label_smooth:
loss = self.loss_fct(output.logits.view(-1, self.vocab_size), batch["labels"].view(-1))
self.log('train_loss', loss, sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'])
acc = self.compute_acc(output.logits, batch['labels'])
self.log('val_loss', output.loss, sync_dist=True)
self.log('val_acc', acc, sync_dist=True)
self.log('val_ppl', torch.exp(output.loss), sync_dist=True)
cond_output = self.model.generate(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
do_sample=True,
num_beams=5,
early_stopping=True,
max_length=64,
top_p=0.9,
)
batch_label = torch.where(batch["labels"] != -100, batch["labels"], self.tokenizer.pad_token_id)
pred = self.tokenizer.batch_decode(cond_output, clean_up_tokenization_spaces=True, skip_special_tokens=True)
ques = self.tokenizer.batch_decode(batch_label, clean_up_tokenization_spaces=True, skip_special_tokens=True)
pred = [chinese_char_tokenize(white_space_fix(p)) for p in pred]
ques = [chinese_char_tokenize(white_space_fix(q)) for q in ques]
self.rougescore.update(pred, ques)
return pred
def validation_epoch_end(self, validation_step_outputs):
rouge = self.rougescore.compute()
self.log('val_rouge', rouge["rougeL_fmeasure"], sync_dist=True)
def on_predict_start(self):
self.loss_fct = torch.nn.CrossEntropyLoss(reduction='none')
def predict_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'])
loss_tensor = self.loss_fct(output.logits.transpose(1, 2), batch["labels"])
if self.hparams.tokenizer_type == 'bart':
eos_index = torch.where(batch['labels'] == self.tokenizer.eos_token_id)[1]
elif self.hparams.tokenizer_type == 'bert':
eos_index = torch.where(batch['labels'] == self.tokenizer.sep_token_id)[1]
loss = torch.sum(loss_tensor, dim=1) / eos_index
with torch.no_grad():
cond_output = self.model.generate(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
do_sample=True,
num_beams=5,
max_length=64,
top_p=0.9,
output_scores=True,
return_dict_in_generate=True
)
pred = self.tokenizer.batch_decode(
cond_output.sequences, clean_up_tokenization_spaces=True, skip_special_tokens=True) # ['sequences']
pred = [white_space_fix(p) for p in pred] # remove prompt and white space
score = cond_output.sequences_scores
return pred, score, loss
def compute_acc(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/y_true.shape[0]
return acc
def on_save_checkpoint(self, checkpoint) -> None:
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
self.model.save_pretrained(os.path.join(
self.trainer.checkpoint_callback.dirpath,
'hf_pretrained_epoch{}_step{}'.format(checkpoint['epoch'], checkpoint['global_step'])))
def on_load_checkpoint(self, checkpoint) -> None:
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
def get_tokenizer(tokenizer_type, pretrained_model_path):
if tokenizer_type == 'bart':
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_path, use_fast=False, additional_special_tokens=["<ans>", "<beg>", "<end>"])
print(len(tokenizer))
elif tokenizer_type == 'bert':
tokenizer = BertTokenizer.from_pretrained(
pretrained_model_path, use_fast=False, additional_special_tokens=["[ANS]"])
return tokenizer
def main():
total_parser = argparse.ArgumentParser("Finetune BART for QG")
total_parser.add_argument('--do_eval_only', action='store_true', default=False)
total_parser.add_argument('--tokenizer_type', type=str, default="bart", choices=['bart', 'bert'])
total_parser.add_argument('--tensorboard_dir', type=str, default="bart")
total_parser.add_argument('--deepspeed')
total_parser = UniversalDataModule.add_data_specific_args(total_parser)
total_parser = QGT5Collator.add_data_specific_args(total_parser)
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = BARTFinetuneModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
tokenizer = get_tokenizer(args.tokenizer_type, args.model_path)
collator = QGT5Collator(tokenizer=tokenizer, args=args)
data_model = UniversalDataModule(collate_fn=collator, tokenizer=tokenizer, args=args)
print("Data load complete...")
if args.deepspeed is not None:
os.environ['PL_DEEPSPEED_CONFIG_PATH'] = args.deepspeed
model = BARTFinetuneModel(tokenizer, args)
checkpoint_callback = UniversalCheckpoint(args)
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = Trainer.from_argparse_args(args,
callbacks=[checkpoint_callback, lr_monitor]
)
if not args.do_eval_only:
trainer.fit(model, data_model)
if __name__ == '__main__':
main()
| 17,301 | 39.237209 | 141 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/zen1_finetune/fengshen_token_level_ft_task.py | # coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fengshen.models.zen1.ngram_utils import ZenNgramDict
from fengshen.models.zen1.modeling import ZenForTokenClassification
from fengshen.metric.metric import SeqEntityScore
from fengshen.models.zen1.tokenization import BertTokenizer
from random import shuffle
from pytorch_lightning.callbacks import LearningRateMonitor
from dataclasses import dataclass
import logging
import math
import numpy as np
import os
import json
import torch
import pytorch_lightning as pl
import argparse
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.ERROR)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, ngram_ids, ngram_positions, ngram_lengths,
ngram_tuples, ngram_seg_ids, ngram_masks, valid_ids=None, label_mask=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.valid_ids = valid_ids
self.label_mask = label_mask
self.ngram_ids = ngram_ids
self.ngram_positions = ngram_positions
self.ngram_lengths = ngram_lengths
self.ngram_tuples = ngram_tuples
self.ngram_seg_ids = ngram_seg_ids
self.ngram_masks = ngram_masks
def convert_examples_to_features(examples, label_map, max_seq_length, tokenizer, ngram_dict):
"""Loads a data file into a list of `InputBatch`s."""
# label_map = {label: i for i, label in enumerate(label_list, 1)}
features = []
for (ex_index, example) in enumerate(examples):
textlist = example.text_a
labellist = example.label
tokens = []
labels = []
valid = []
label_mask = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
valid.append(1)
label_mask.append(1)
else:
valid.append(0)
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
valid = valid[0:(max_seq_length - 2)]
label_mask = label_mask[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
valid.insert(0, 1)
label_mask.insert(0, 1)
label_ids.append(label_map["[CLS]"])
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if len(labels) > i:
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]")
segment_ids.append(0)
valid.append(1)
label_mask.append(1)
label_ids.append(label_map["[SEP]"])
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
label_mask = [1] * len(label_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
valid.append(1)
label_mask.append(0)
while len(label_ids) < max_seq_length:
label_ids.append(0)
label_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(valid) == max_seq_length
assert len(label_mask) == max_seq_length
# ----------- code for ngram BEGIN-----------
ngram_matches = []
# Filter the ngram segment from 2 to 7 to check whether there is a ngram
for p in range(2, 8):
for q in range(0, len(tokens) - p + 1):
character_segment = tokens[q:q + p]
# j is the starting position of the ngram
# i is the length of the current ngram
character_segment = tuple(character_segment)
if character_segment in ngram_dict.ngram_to_id_dict:
ngram_index = ngram_dict.ngram_to_id_dict[character_segment]
ngram_matches.append([ngram_index, q, p, character_segment])
shuffle(ngram_matches)
max_ngram_in_seq_proportion = math.ceil((len(tokens) / max_seq_length) * ngram_dict.max_ngram_in_seq)
if len(ngram_matches) > max_ngram_in_seq_proportion:
ngram_matches = ngram_matches[:max_ngram_in_seq_proportion]
ngram_ids = [ngram[0] for ngram in ngram_matches]
ngram_positions = [ngram[1] for ngram in ngram_matches]
ngram_lengths = [ngram[2] for ngram in ngram_matches]
ngram_tuples = [ngram[3] for ngram in ngram_matches]
ngram_seg_ids = [0 if position < (len(tokens) + 2) else 1 for position in ngram_positions]
ngram_mask_array = np.zeros(ngram_dict.max_ngram_in_seq, dtype=np.bool)
ngram_mask_array[:len(ngram_ids)] = 1
# record the masked positions
ngram_positions_matrix = np.zeros(shape=(max_seq_length, ngram_dict.max_ngram_in_seq), dtype=np.int32)
for i in range(len(ngram_ids)):
ngram_positions_matrix[ngram_positions[i]:ngram_positions[i] + ngram_lengths[i], i] = 1.0
# Zero-pad up to the max ngram in seq length.
padding = [0] * (ngram_dict.max_ngram_in_seq - len(ngram_ids))
ngram_ids += padding
ngram_lengths += padding
ngram_seg_ids += padding
# ----------- code for ngram END-----------
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
ngram_ids=ngram_ids,
ngram_positions=ngram_positions_matrix,
ngram_lengths=ngram_lengths,
ngram_tuples=ngram_tuples,
ngram_seg_ids=ngram_seg_ids,
ngram_masks=ngram_mask_array,
valid_ids=valid,
label_mask=label_mask))
return features
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_examples(self, data_path, set_type, quotechar=' '):
"""See base class."""
return self._create_examples(
self._read_tsv(data_path, self.get_quotechar()), set_type)
def _create_examples(self, lines, set_type):
examples = []
for i, (sentence, label) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = sentence
label = label
examples.append(InputExample(guid=guid, text_a=text_a, label=label))
return examples
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def get_quotechar(self):
return ' '
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
'''
read file
return format :
[ ['EU', 'B-ORG'], ['rejects', 'O'], ['German', 'B-MISC'], ['call', 'O'], ['to', 'O'], ['boycott', 'O'], ['British', 'B-MISC'], ['lamb', 'O'], ['.', 'O'] ]
'''
f = open(input_file)
data = []
sentence = []
label = []
for line in f:
if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == "\n":
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
continue
splits = line.split(quotechar)
sentence.append(splits[0])
label.append(splits[-1][:-1])
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
return data
class MSRAProcessor(DataProcessor):
"""Processor for the msra data set."""
def get_labels(self):
return ['B-NR', 'B-NS', 'B-NT', 'E-NR', 'E-NS', 'E-NT', 'M-NR',
'M-NS', 'M-NT', 'O', 'S-NR', 'S-NS', 'S-NT', '[CLS]', '[SEP]']
class OntoNotes4Processor(DataProcessor):
"""Processor for the OntoNotes4 data set."""
def get_labels(self):
return ['B-GPE', 'B-LOC', 'B-ORG', 'B-PER', 'E-GPE', 'E-LOC',
'E-ORG', 'E-PER', 'M-GPE', 'M-LOC', 'M-ORG', 'M-PER', 'O',
'S-GPE', 'S-LOC', 'S-ORG', 'S-PER', '[CLS]', '[SEP]']
class WeiboProcessor(DataProcessor):
"""Processor for the Weibo data set."""
def get_labels(self):
return ['B-GPE.NAM', 'B-GPE.NOM', 'B-LOC.NAM', 'B-LOC.NOM',
'B-ORG.NAM', 'B-ORG.NOM', 'B-PER.NAM', 'B-PER.NOM', 'E-GPE.NAM',
'E-GPE.NOM', 'E-LOC.NAM', 'E-LOC.NOM', 'E-ORG.NAM', 'E-ORG.NOM',
'E-PER.NAM', 'E-PER.NOM', 'M-GPE.NAM', 'M-LOC.NAM', 'M-LOC.NOM',
'M-ORG.NAM', 'M-ORG.NOM', 'M-PER.NAM', 'M-PER.NOM', 'O',
'S-GPE.NAM', 'S-LOC.NOM', 'S-PER.NAM', 'S-PER.NOM', '[CLS]', '[SEP]']
class ResumeProcessor(DataProcessor):
"""Processor for the resume data set."""
def get_labels(self):
return ['B-CONT', 'B-EDU', 'B-LOC', 'B-NAME', 'B-ORG', 'B-PRO',
'B-RACE', 'B-TITLE', 'E-CONT', 'E-EDU', 'E-LOC', 'E-NAME',
'E-ORG', 'E-PRO', 'E-RACE', 'E-TITLE', 'M-CONT', 'M-EDU',
'M-LOC', 'M-NAME', 'M-ORG', 'M-PRO', 'M-RACE', 'M-TITLE',
'O', 'S-NAME', 'S-ORG', 'S-RACE', '[CLS]', '[SEP]']
class CMeEEProcessor(DataProcessor):
"""Processor for the CMeEE data set."""
def get_quotechar(self):
return '\t'
def get_labels(self):
return ['B-临床表现', 'B-医学检验项目', 'B-医疗程序', 'B-医疗设备',
'B-微生物类', 'B-疾病', 'B-科室', 'B-药物', 'B-身体', 'I-临床表现',
'I-医学检验项目', 'I-医疗程序', 'I-医疗设备', 'I-微生物类',
'I-疾病', 'I-科室', 'I-药物', 'I-身体', 'O', '[CLS]', '[SEP]']
class CLUENERProcessor(DataProcessor):
"""Processor for the CLUENER data set."""
def get_quotechar(self):
return '\t'
def get_labels(self):
return ['B-书名', 'B-公司', 'B-地址', 'B-姓名', 'B-政府', 'B-景点',
'B-游戏', 'B-电影', 'B-组织机构', 'B-职位', 'I-书名', 'I-公司',
'I-地址', 'I-姓名', 'I-政府', 'I-景点', 'I-游戏', 'I-电影',
'I-组织机构', 'I-职位', 'O', '[CLS]', '[SEP]']
class TaskDataset(Dataset):
def __init__(self, data_path, processor, mode='train'):
super().__init__()
self.data = self.load_data(data_path, processor, mode)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path, processor, mode):
if mode == "train":
examples = processor.get_examples(data_path, mode)
elif mode == "test":
examples = processor.get_examples(data_path, mode)
elif mode == "dev":
examples = processor.get_examples(data_path, mode)
return examples
@dataclass
class TaskCollator:
args = None
tokenizer = None
ngram_dict = None
label2id = None
def __call__(self, samples):
features = convert_examples_to_features(samples, self.label2id, self.args.max_seq_length, self.tokenizer, self.ngram_dict)
# logger.info(" Num examples = %d", len(samples))
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
valid_ids = torch.tensor([f.valid_ids for f in features], dtype=torch.long)
ngram_ids = torch.tensor([f.ngram_ids for f in features], dtype=torch.long)
ngram_positions = torch.tensor([f.ngram_positions for f in features], dtype=torch.long)
# ngram_lengths = torch.tensor([f.ngram_lengths for f in features], dtype=torch.long)
# ngram_seg_ids = torch.tensor([f.ngram_seg_ids for f in features], dtype=torch.long)
# ngram_masks = torch.tensor([f.ngram_masks for f in features], dtype=torch.long)
# label_mask = torch.tensor([f.label_mask for f in features], dtype=torch.long)
return {
'input_ids': input_ids,
'ngram_ids': ngram_ids,
'ngram_positions': ngram_positions,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'labels': label_ids,
'valid_ids': valid_ids,
}
class TaskDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--data_dir', default='./data', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.json', type=str)
parser.add_argument('--valid_data', default='dev.json', type=str)
parser.add_argument('--test_data', default='test.json', type=str)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--valid_batchsize', default=32, type=int)
parser.add_argument('--max_seq_length', default=128, type=int)
parser.add_argument('--texta_name', default='text', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--id_name', default='id', type=str)
parser.add_argument('--dataset_name', default=None, type=str)
parser.add_argument('--vocab_file',
type=str, default=None,
help="Vocabulary mapping/file BERT was pretrainined on")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--task_name', default='weibo', type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
self.collator = TaskCollator()
self.collator.args = args
self.collator.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path, do_lower_case=args.do_lower_case)
self.collator.ngram_dict = ZenNgramDict.from_pretrained(args.pretrained_model_path, tokenizer=self.collator.tokenizer)
processors = {
'weibo': WeiboProcessor,
'resume': ResumeProcessor,
'msra': MSRAProcessor,
'ontonotes4': OntoNotes4Processor,
'cmeee': CMeEEProcessor,
'cluener': CLUENERProcessor,
}
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
# 生成id映射
label_list = processor.get_labels()
label2id = {label: i for i, label in enumerate(label_list, 1)}
label2id["[PAD]"] = 0
self.id2label = {v: k for k, v in label2id.items()}
self.collator.label2id = label2id
if args.dataset_name is None:
self.train_data = TaskDataset(os.path.join(
args.data_dir, args.train_data), processor, mode='train')
self.valid_data = TaskDataset(os.path.join(
args.data_dir, args.valid_data), processor, mode='dev')
self.test_data = TaskDataset(os.path.join(
args.data_dir, args.test_data), processor, mode='test')
else:
import datasets
ds = datasets.load_dataset(args.dataset_name)
self.train_data = ds['train']
self.valid_data = ds['validation']
self.test_data = ds['test']
self.save_hyperparameters(args)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False,
collate_fn=self.collator)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
class LitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--markup', default='bios', type=str)
parser.add_argument('--middle_prefix', default='I-', type=str)
return parent_args
def __init__(self, args, id2label):
super().__init__()
# config = ZenConfig(os.path.join(args.pretrained_model_path, 'config.json'))
self.model = ZenForTokenClassification.from_pretrained(args.pretrained_model_path, num_labels=len(id2label))
self.seq_entity_score = SeqEntityScore(id2label, markup=args.markup, middle_prefix=args.middle_prefix)
self.train_seq_entity_score = SeqEntityScore(id2label, markup=args.markup, middle_prefix=args.middle_prefix)
self.id2label = id2label
self.label2id = {v: k for k, v in id2label.items()}
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss, _ = outputs
# logits = outputs.logits
# preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
# preds = preds.detach().cpu().numpy()
# labels = batch['labels'].detach().cpu().numpy()
# num_labels = len(self.label2id)
# y_true = []
# y_pred = []
# for i, label in enumerate(labels):
# temp_1 = []
# temp_2 = []
# for j, m in enumerate(label):
# if j == 0:
# continue
# elif labels[i][j] == num_labels - 1:
# y_true.append(temp_1)
# y_pred.append(temp_2)
# break
# else:
# temp_1.append(self.id2label[labels[i][j]])
# temp_2.append(self.id2label[preds[i][j]])
# self.train_seq_entity_score.update(y_true, y_pred)
# result = self.train_seq_entity_score.result()
# self.train_seq_entity_score.reset()
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss, logits = outputs
preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
preds = preds.detach().cpu().numpy()
labels = batch['labels'].detach().cpu().numpy()
num_labels = len(self.label2id)
y_true = []
y_pred = []
for i, label in enumerate(labels):
temp_1 = []
temp_2 = []
for j, m in enumerate(label):
if j == 0:
continue
elif labels[i][j] == num_labels - 1:
y_true.append(temp_1)
y_pred.append(temp_2)
break
else:
temp_1.append(self.id2label[labels[i][j]])
temp_2.append(self.id2label[preds[i][j]])
self.seq_entity_score.update(y_true, y_pred)
self.log('val_loss', loss)
def validation_epoch_end(self, outputs):
# compute metric for all process
score_dict, _ = self.seq_entity_score.result()
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
print('score_dict:\n', score_dict)
# reset the metric after once validation
self.seq_entity_score.reset()
for k, v in score_dict.items():
self.log('val_{}'.format(k), v)
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename)
def save_test(data, args, data_model):
with open(args.output_save_path, 'w', encoding='utf-8') as f:
idx = 0
for i in range(len(data)):
batch = data[i]
for sample in batch:
tmp_result = dict()
label_id = np.argmax(sample.numpy())
tmp_result['id'] = data_model.test_data.data[idx]['id']
tmp_result['label'] = data_model.id2label[label_id]
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data+'\n')
idx += 1
print('save the result to '+args.output_save_path)
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = TaskDataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = pl.Trainer.add_argparse_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
# * Args for base model
from fengshen.models.model_utils import add_module_args
total_parser = add_module_args(total_parser)
total_parser = LitModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
checkpoint_callback = TaskModelCheckpoint(args).callbacks
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(args,
callbacks=[checkpoint_callback, lr_monitor]
)
data_model = TaskDataModel(args)
id2label = data_model.id2label
print('id2label:', id2label)
model = LitModel(args, id2label)
trainer.fit(model, data_model)
if __name__ == "__main__":
main()
| 26,317 | 39.614198 | 163 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/zen1_finetune/fengshen_sequence_level_ft_task.py | # coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fengshen.models.zen1.tokenization import BertTokenizer
from fengshen.models.zen1.modeling import ZenForSequenceClassification
from fengshen.models.zen1.ngram_utils import ZenNgramDict
from pytorch_lightning.callbacks import LearningRateMonitor
import csv
from dataclasses import dataclass
import logging
import math
import numpy as np
import os
from tqdm import tqdm
import json
import torch
import pytorch_lightning as pl
from random import shuffle
import argparse
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import Dataset, DataLoader
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, ngram_ids, ngram_positions, ngram_lengths,
ngram_tuples, ngram_seg_ids, ngram_masks):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.ngram_ids = ngram_ids
self.ngram_positions = ngram_positions
self.ngram_lengths = ngram_lengths
self.ngram_tuples = ngram_tuples
self.ngram_seg_ids = ngram_seg_ids
self.ngram_masks = ngram_masks
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_examples(self, data_path, mode):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
# if sys.version_info[0] == 2:
# line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
@classmethod
def _read_json(cls, input_file):
"""Reads a jsonl file."""
with open(input_file, "r", encoding="utf-8") as f:
lines = f.readlines()
samples = []
for line in tqdm(lines):
data = json.loads(line)
samples.append(data)
return samples
class TnewsProcessor(DataProcessor):
"""Processor for the tnews data set (HIT version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence']
label = line['label'] if 'label' in line.keys() else None
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class OcnliProcessor(DataProcessor):
"""Processor for the ocnli or cmnli data set (HIT version)."""
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence1']
text_b = line['sentence2']
label = line['label'] if 'label' in line.keys() else None
# 特殊处理,cmnli有label为-的
if label == '-':
label = None
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class IflytekProcessor(DataProcessor):
"""Processor for the iflytek data set (HIT version)."""
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence']
label = line['label'] if 'label' in line.keys() else None
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
def convert_examples_to_features(examples, label_map, max_seq_length, tokenizer, ngram_dict):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
# ----------- code for ngram BEGIN-----------
ngram_matches = []
# Filter the word segment from 2 to 7 to check whether there is a word
for p in range(2, 8):
for q in range(0, len(tokens) - p + 1):
character_segment = tokens[q:q + p]
# j is the starting position of the word
# i is the length of the current word
character_segment = tuple(character_segment)
if character_segment in ngram_dict.ngram_to_id_dict:
ngram_index = ngram_dict.ngram_to_id_dict[character_segment]
ngram_matches.append([ngram_index, q, p, character_segment])
shuffle(ngram_matches)
# max_word_in_seq_proportion = max_word_in_seq
max_word_in_seq_proportion = math.ceil((len(tokens) / max_seq_length) * ngram_dict.max_ngram_in_seq)
if len(ngram_matches) > max_word_in_seq_proportion:
ngram_matches = ngram_matches[:max_word_in_seq_proportion]
ngram_ids = [ngram[0] for ngram in ngram_matches]
ngram_positions = [ngram[1] for ngram in ngram_matches]
ngram_lengths = [ngram[2] for ngram in ngram_matches]
ngram_tuples = [ngram[3] for ngram in ngram_matches]
ngram_seg_ids = [0 if position < (len(tokens_a) + 2) else 1 for position in ngram_positions]
ngram_mask_array = np.zeros(ngram_dict.max_ngram_in_seq, dtype=np.bool)
ngram_mask_array[:len(ngram_ids)] = 1
# record the masked positions
ngram_positions_matrix = np.zeros(shape=(max_seq_length, ngram_dict.max_ngram_in_seq), dtype=np.int32)
for i in range(len(ngram_ids)):
ngram_positions_matrix[ngram_positions[i]:ngram_positions[i] + ngram_lengths[i], i] = 1.0
# Zero-pad up to the max word in seq length.
padding = [0] * (ngram_dict.max_ngram_in_seq - len(ngram_ids))
ngram_ids += padding
ngram_lengths += padding
ngram_seg_ids += padding
# ----------- code for ngram END-----------
label_id = label_map[example.label] if example.label is not None else 0
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
ngram_ids=ngram_ids,
ngram_positions=ngram_positions_matrix,
ngram_lengths=ngram_lengths,
ngram_tuples=ngram_tuples,
ngram_seg_ids=ngram_seg_ids,
ngram_masks=ngram_mask_array))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class TaskDataset(Dataset):
def __init__(self, data_path, processor, mode='train'):
super().__init__()
self.data = self.load_data(data_path, processor, mode)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path, processor, mode):
if mode == "train":
examples = processor.get_examples(data_path, mode)
elif mode == "test":
examples = processor.get_examples(data_path, mode)
elif mode == "dev":
examples = processor.get_examples(data_path, mode)
return examples
@dataclass
class TaskCollator:
args = None
tokenizer = None
ngram_dict = None
label2id = None
def __call__(self, samples):
features = convert_examples_to_features(samples, self.label2id, self.args.max_seq_length, self.tokenizer, self.ngram_dict)
# logger.info(" Num examples = %d", len(samples))
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
ngram_ids = torch.tensor([f.ngram_ids for f in features], dtype=torch.long)
ngram_positions = torch.tensor([f.ngram_positions for f in features], dtype=torch.long)
# ngram_lengths = torch.tensor([f.ngram_lengths for f in features], dtype=torch.long)
# ngram_seg_ids = torch.tensor([f.ngram_seg_ids for f in features], dtype=torch.long)
# ngram_masks = torch.tensor([f.ngram_masks for f in features], dtype=torch.long)
return {
'input_ids': input_ids,
'input_ngram_ids': ngram_ids,
'ngram_position_matrix': ngram_positions,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'labels': label_ids,
}
# return default_collate(sample_list)
class TaskDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--data_dir', default='./data', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.json', type=str)
parser.add_argument('--valid_data', default='dev.json', type=str)
parser.add_argument('--test_data', default='test.json', type=str)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--valid_batchsize', default=32, type=int)
parser.add_argument('--max_seq_length', default=128, type=int)
parser.add_argument('--texta_name', default='text', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--id_name', default='id', type=str)
parser.add_argument('--dataset_name', default=None, type=str)
parser.add_argument('--vocab_file',
type=str, default=None,
help="Vocabulary mapping/file BERT was pretrainined on")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--task_name', default='tnews', type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
self.collator = TaskCollator()
self.collator.args = args
self.collator.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path, do_lower_case=args.do_lower_case)
self.collator.ngram_dict = ZenNgramDict.from_pretrained(args.pretrained_model_path, tokenizer=self.collator.tokenizer)
processors = {
'afqmc': OcnliProcessor,
'tnews': TnewsProcessor,
'ocnli': OcnliProcessor,
'cmnli': OcnliProcessor,
'iflytek': IflytekProcessor,
}
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
if args.dataset_name is None:
self.label2id, self.id2label = self.load_schema(os.path.join(
args.data_dir, args.train_data), args)
self.train_data = TaskDataset(os.path.join(
args.data_dir, args.train_data), processor, mode='train')
self.valid_data = TaskDataset(os.path.join(
args.data_dir, args.valid_data), processor, mode='dev')
self.test_data = TaskDataset(os.path.join(
args.data_dir, args.test_data), processor, mode='test')
self.collator.label2id = self.label2id
else:
import datasets
ds = datasets.load_dataset(args.dataset_name)
self.train_data = ds['train']
self.valid_data = ds['validation']
self.test_data = ds['test']
self.save_hyperparameters(args)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False,
collate_fn=self.collator)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def load_schema(self, data_path, args):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
label_list = []
for line in tqdm(lines):
data = json.loads(line)
labels = data[args.label_name] if args.label_name in data.keys(
) else 0
if labels not in label_list:
label_list.append(labels)
label2id, id2label = {}, {}
for i, k in enumerate(label_list):
label2id[k] = i
id2label[i] = k
return label2id, id2label
class LitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--num_labels', default=2, type=int)
return parent_args
def __init__(self, args):
super().__init__()
self.model = ZenForSequenceClassification.from_pretrained(args.pretrained_model_path, num_labels=args.num_labels)
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
loss, logits = self.model(**batch)
acc = self.comput_metrix(logits, batch['labels'])
self.log('train_loss', loss)
self.log('train_acc', acc)
return loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
loss, logits = self.model(**batch)
acc = self.comput_metrix(logits, batch['labels'])
self.log('val_loss', loss)
self.log('val_acc', acc)
def predict_step(self, batch, batch_idx):
output = self.model(**batch)
return output.logits
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename)
def save_test(data, args, data_model):
with open(args.output_save_path, 'w', encoding='utf-8') as f:
idx = 0
for i in range(len(data)):
batch = data[i]
for sample in batch:
tmp_result = dict()
label_id = np.argmax(sample.numpy())
tmp_result['id'] = data_model.test_data.data[idx]['id']
tmp_result['label'] = data_model.id2label[label_id]
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data+'\n')
idx += 1
print('save the result to '+args.output_save_path)
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = TaskDataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = pl.Trainer.add_argparse_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
# * Args for base model
from fengshen.models.model_utils import add_module_args
total_parser = add_module_args(total_parser)
total_parser = LitModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
checkpoint_callback = TaskModelCheckpoint(args).callbacks
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(args,
callbacks=[checkpoint_callback, lr_monitor]
)
data_model = TaskDataModel(args)
model = LitModel(args)
trainer.fit(model, data_model)
if __name__ == "__main__":
main()
| 24,857 | 39.684124 | 130 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/pretrain_taiyi_clip/pretrain.py | from pytorch_lightning import (
LightningModule,
Trainer,
)
from pytorch_lightning.callbacks import (
LearningRateMonitor,
)
from fengshen.models.clip import (
TaiyiCLIPModel,
TaiyiCLIPProcessor,
)
from fengshen.models.model_utils import (
add_module_args,
configure_optimizers,
get_total_steps,
)
import torch
import torch.nn.functional as F
import argparse
import math
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.data.taiyi_stable_diffusion_datasets.taiyi_datasets import add_data_args, load_data
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
import os
import numpy as np
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor
OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
class Collator():
def __init__(self, args, processor):
self.processor = processor
self.seq_length = args.seq_length
self.transforms = Compose([
ToTensor(),
RandomResizedCrop(args.resolution, scale=(0.9, 1.0),
interpolation=InterpolationMode.BICUBIC),
Normalize(mean=OPENAI_DATASET_MEAN, std=OPENAI_DATASET_STD),
])
def __call__(self, inputs):
max_length = min(self.seq_length, max([len(i['caption']) for i in inputs]))
images = []
texts = []
labels = []
for i in inputs:
# instance_image = Image.open(i['img_path'])
# instance_image = jpeg4py.JPEG(i['img_path']).decode()
instance_image = np.load(i['npy_path'])
images.append(self.transforms(instance_image))
texts.append(i['caption'])
labels.append(i['labels'] if 'labels' in i else -100)
# images_input = self.processor(images=images, return_tensors="pt")
texts_input = self.processor(text=texts,
max_length=max_length,
padding='max_length',
truncation=True,
return_tensors='pt')
# return images_input, texts_input, labels
return {'pixel_values': torch.stack(images)}, texts_input, labels
class TaiyiCLIP(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('Taiyi CLIP')
parser.add_argument('--loss_type', choices=['local', 'global'], default='local')
parser.add_argument('--seq_length', default=77)
parser.add_argument('--gather_with_grad', default=False, action='store_true')
parser.add_argument('--freeze_image_tower', default=False, action='store_true')
return parent_parser
def __init__(self, args, **kwargs) -> None:
super().__init__()
self.save_hyperparameters(args)
self.model = TaiyiCLIPModel.from_pretrained(args.model_path)
self.processor = TaiyiCLIPProcessor.from_pretrained(args.model_path)
self.local_loss = args.loss_type == 'local'
if args.freeze_image_tower:
for param in self.model.vision_model.parameters():
param.requires_grad = False
self.model.visual_projection.requires_grad = False
# cache
self.cache_labels = True
self.prev_num_logits = 0
self.labels = {}
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
elif stage == 'validate':
self.total_steps = 100
def configure_optimizers(self):
return configure_optimizers(self)
def forward(self, image, text):
assert image is not None
assert text is not None
image_features = self.model.get_image_features(**image)
text_features = self.model.get_text_features(**text)
image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)
return image_features, text_features, self.model.logit_scale.exp()
def gather_features(self, features):
if self.trainer.world_size == 1:
return features
all_features = self.all_gather(
features, sync_grads=self.hparams.gather_with_grad)
if not self.local_loss and not self.gather_with_grad:
# 如果是全局loss,并且不需要梯度,需要把梯度更新回tensor
all_features[self.global_rank] = features
all_features = all_features.view(-1, all_features.shape[-1])
return all_features
def clip_loss(self, image_features, text_features, logit_scale):
logits_per_image = None
# 如果我冻住VIT并且是local_loss,那么我只需要自己的这部分text feature就行
# 因为根本不需要image2text的feature训练VIT
if self.hparams.freeze_image_tower and self.local_loss:
all_text_features = None
else:
all_text_features = self.gather_features(
text_features)
all_image_features = self.gather_features(
image_features)
if self.local_loss:
if all_text_features is not None:
logits_per_image = logit_scale * image_features @ all_text_features.T
logits_per_text = logit_scale * text_features @ all_image_features.T
else:
# 如果是global_loss,那all_text_features肯定不是空的
logits_per_image = logit_scale * all_image_features @ all_text_features.T
logits_per_text = logits_per_image.T
num_logits = logits_per_text.shape[0]
if self.prev_num_logits != num_logits or self.device not in self.labels:
labels = torch.arange(num_logits, device=self.device, dtype=torch.long)
if self.trainer.world_size > 1 and self.local_loss:
labels = labels + num_logits * self.global_rank
if self.cache_labels:
self.labels[self.device] = labels
self.prev_num_logits = num_logits
else:
labels = self.labels[self.device]
total_loss = (
F.cross_entropy(logits_per_image, labels) +
F.cross_entropy(logits_per_text, labels)
) / 2 if logits_per_image is not None else F.cross_entropy(logits_per_text, labels)
return total_loss
def training_step(self, batch):
image, text, _ = batch
image_features, text_features, logit_scale = self(image, text)
total_loss = self.clip_loss(image_features, text_features, logit_scale)
self.log('train_loss', total_loss, sync_dist=False)
return total_loss
def on_train_batch_end(self, outputs, batch, batch_idx: int) -> None:
with torch.no_grad():
self.model.logit_scale.clamp_(0, math.log(100))
def get_metrics(self, image_features, text_features, labels, logit_scale):
# 计算相似度,支持多个样本的情况(比如一个图片有多个caption)
# img2txt计算的时候要用到,因为一张图片可能对应多个文本。
# txt2img计算的时候不需要(一般一个text只有一个对应图片)
metrics = {}
logits_per_image = (logit_scale * image_features @ text_features.t()).detach().cpu()
logits_per_text = logits_per_image.t().detach().cpu()
logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text}
label2idx = {} # 计算label到idx的映射。
repeat_id = []
for i, label in enumerate(labels):
if label not in label2idx:
label2idx[label] = [i]
else:
# 表示该index的标签出现过,记录这个index,后续算txt2img分数的时候,这些index的权值要降低。
label2idx[label].append(i)
repeat_id.append(i)
ground_truth = [label2idx[label] for label in labels]
for name, logit in logits.items():
if name == 'text_to_image':
logit[:, repeat_id] -= 1e8 # 这部分的分数要降低。(重复出现的图片,直接忽略)
r_stat = {1: [], 5: [], 10: []}
# r1_stat, r5_stat, r10_stat = [], [], []
# index of the largest element to the smallest
ranking = torch.argsort(logit, descending=True)
for i, each_query in enumerate(ranking[:, :10]):
for j, q in enumerate(each_query):
found = False
if q in ground_truth[i]:
for k, v in r_stat.items():
if j < k:
found = True
v.append(1)
if found:
break
for k, v in r_stat.items():
metrics[f'{name}_R@{k}'] = sum(v)/len(logit)
return metrics
def validation_step(self, batch, batch_idx):
image, text, label = batch
image_features, text_features, logit_scale = self(image, text)
return image_features, text_features, logit_scale, text['input_ids'].shape[0], label
def validation_epoch_end(self, val_outputs):
all_image_features = []
all_text_features = []
all_labels = []
sample_size = 0
for o in val_outputs:
all_image_features.append(o[0])
all_text_features.append(o[1])
sample_size += o[3]
all_labels += o[4]
if len(all_image_features) == 0 or len(all_text_features) == 0:
return
all_image_features = torch.cat(all_image_features)
all_text_features = torch.cat(all_text_features)
logit_scale = val_outputs[0][2].mean()
logits_per_image = logit_scale * all_image_features @ all_text_features.t()
logits_per_text = logits_per_image.t()
labels = torch.arange(sample_size, device=self.device).long()
total_loss = (F.cross_entropy(logits_per_image, labels)
+ F.cross_entropy(logits_per_text, labels)) / 2
val_metrics = self.get_metrics(
image_features=all_image_features,
text_features=all_text_features,
logit_scale=logit_scale,
labels=all_labels)
loss = total_loss / sample_size
self.log('val_loss', loss, sync_dist=False)
for k, v in val_metrics.items():
self.log(f'val_{k}', v, sync_dist=False)
def on_load_checkpoint(self, checkpoint) -> None:
# 兼容低版本lightning,低版本lightning从ckpt起来时steps数会被重置为0
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
def on_save_checkpoint(self, checkpoint) -> None:
# 保存的时候把权重按huggingface的形式保存出来
if self.global_rank == 0:
dir_path = os.path.join(
self.hparams.default_root_dir, f'hf_out_{self.trainer.current_epoch}_{self.trainer.global_step}')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
self.model.save_pretrained(dir_path)
self.processor.save_pretrained(dir_path)
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = add_data_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = TaiyiCLIP.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callback = UniversalCheckpoint(args)
trainer = Trainer.from_argparse_args(args,
callbacks=[
lr_monitor,
checkpoint_callback])
model = TaiyiCLIP(args)
processor = model.processor
collate_fn = Collator(args, processor)
datasets = load_data(args, global_rank=trainer.global_rank)
# 加载单个验证集:!!!验证代码有效性临时这样干的,验证完有效性会删除
from fengshen.examples.pretrain_taiyi_clip.flickr_datasets import flickr30k_CNA
img_root = '/shared_space/ccnl/mm_data/Flickr30k-CNA/flickr30k/images'
text_annot_path = '/shared_space/ccnl/mm_data/Flickr30k-CNA/test/flickr30k_cn_test.txt'
datasets[args.val_datasets_field] = flickr30k_CNA(img_root, text_annot_path, collate_fn)
datamoule = UniversalDataModule(
tokenizer=None, collate_fn=collate_fn, args=args, datasets=datasets)
trainer.fit(model, datamoule, ckpt_path=args.load_ckpt_path)
| 12,711 | 40.139159 | 113 | py |
Fengshenbang-LM | Fengshenbang-LM-main/fengshen/examples/pretrain_taiyi_clip/test.py | from pytorch_lightning import (
Trainer,
)
from fengshen.models.model_utils import (
add_module_args,
)
import argparse
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.examples.pretrain_taiyi_clip.pretrain import (
TaiyiCLIP,
Collator,
)
from fengshen.data.fs_datasets import load_dataset
from torch.utils.data import DataLoader
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = TaiyiCLIP.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
checkpoint_callback = UniversalCheckpoint(args)
trainer = Trainer.from_argparse_args(args, callbacks=[
checkpoint_callback
])
model = TaiyiCLIP(args)
processor = model.processor
collate_fn = Collator(processor)
datasets = load_dataset(args.datasets_name)
dataloader = DataLoader(datasets[args.test_datasets_field],
batch_size=args.test_batchsize, num_workers=2, collate_fn=collate_fn)
trainer.validate(model, dataloaders=dataloader, ckpt_path=args.load_ckpt_path)
| 1,404 | 36.972973 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.