repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
s2anet | s2anet-master/mmdet/ops/box_iou_rotated_diff/box_iou_rotated_diff.py | """
Differentiable IoU calculation for rotated boxes
Most of the code is adapted from https://github.com/lilanxiao/Rotated_IoU
"""
import torch
from .box_intersection_2d import oriented_box_intersection_2d
def rotated_box_to_poly(rotated_boxes: torch.Tensor):
""" Transform rotated boxes to polygons
Args:
rotated_boxes (Tensor): (x, y, w, h, a) with shape (n, 5)
Return:
polys (Tensor): 4 corner points (x, y) of polygons with shape (n, 4, 2)
"""
cs = torch.cos(rotated_boxes[:, 4])
ss = torch.sin(rotated_boxes[:, 4])
w = rotated_boxes[:, 2] - 1
h = rotated_boxes[:, 3] - 1
x_ctr = rotated_boxes[:, 0]
y_ctr = rotated_boxes[:, 1]
x1 = x_ctr + cs * (w / 2.0) - ss * (-h / 2.0)
x2 = x_ctr + cs * (w / 2.0) - ss * (h / 2.0)
x3 = x_ctr + cs * (-w / 2.0) - ss * (h / 2.0)
x4 = x_ctr + cs * (-w / 2.0) - ss * (-h / 2.0)
y1 = y_ctr + ss * (w / 2.0) + cs * (-h / 2.0)
y2 = y_ctr + ss * (w / 2.0) + cs * (h / 2.0)
y3 = y_ctr + ss * (-w / 2.0) + cs * (h / 2.0)
y4 = y_ctr + ss * (-w / 2.0) + cs * (-h / 2.0)
polys = torch.stack([x1, y1, x2, y2, x3, y3, x4, y4], dim=-1)
polys = polys.reshape(-1, 4, 2) # to (n, 4, 2)
return polys
def box_iou_rotated_differentiable(boxes1: torch.Tensor, boxes2: torch.Tensor, iou_only: bool = True):
"""Calculate IoU between rotated boxes
Args:
box1 (torch.Tensor): (n, 5)
box2 (torch.Tensor): (n, 5)
iou_only: Whether to keep other vars, e.g., polys, unions. Default True to drop these vars.
Returns:
iou (torch.Tensor): (n, )
polys1 (torch.Tensor): (n, 4, 2)
polys2 (torch.Tensor): (n, 4, 2)
U (torch.Tensor): (n) area1 + area2 - inter_area
"""
# transform to polygons
polys1 = rotated_box_to_poly(boxes1)
polys2 = rotated_box_to_poly(boxes2)
# calculate insection areas
inter_area, _ = oriented_box_intersection_2d(polys1, polys2)
area1 = boxes1[..., 2] * boxes1[..., 3]
area2 = boxes2[..., 2] * boxes2[..., 3]
union = area1 + area2 - inter_area
iou = inter_area / union
if iou_only:
return iou
else:
return iou, union, polys1, polys2,
| 2,207 | 32.454545 | 102 | py |
s2anet | s2anet-master/mmdet/ops/box_iou_rotated_diff/box_intersection_2d.py | '''
torch implementation of 2d oriented box intersection
author: lanxiao li
Modified by csuhan:
Remove the `batch` indice in a tensor.
This setting is more suitable for mmdet.
'''
import torch
from .sort_vertices_cuda import sort_vertices_forward
EPSILON = 1e-8
def get_intersection_points(polys1: torch.Tensor, polys2: torch.Tensor):
"""Find intersection points of rectangles
Convention: if two edges are collinear, there is no intersection point
Args:
polys1 (torch.Tensor): n, 4, 2
polys2 (torch.Tensor): n, 4, 2
Returns:
intersectons (torch.Tensor): n, 4, 4, 2
mask (torch.Tensor) : n, 4, 4; bool
"""
# build edges from corners
line1 = torch.cat([polys1, polys1[..., [1, 2, 3, 0], :]],
dim=2) # n, 4, 4: Box, edge, point
line2 = torch.cat([polys2, polys2[..., [1, 2, 3, 0], :]], dim=2)
# duplicate data to pair each edges from the boxes
# (n, 4, 4) -> (n, 4, 4, 4) : Box, edge1, edge2, point
line1_ext = line1.unsqueeze(2).repeat([1, 1, 4, 1])
line2_ext = line2.unsqueeze(1).repeat([1, 4, 1, 1])
x1 = line1_ext[..., 0]
y1 = line1_ext[..., 1]
x2 = line1_ext[..., 2]
y2 = line1_ext[..., 3]
x3 = line2_ext[..., 0]
y3 = line2_ext[..., 1]
x4 = line2_ext[..., 2]
y4 = line2_ext[..., 3]
# math: https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
num = (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4)
den_t = (x1-x3)*(y3-y4) - (y1-y3)*(x3-x4)
t = den_t / num
t[num == .0] = -1.
mask_t = (t > 0) * (t < 1) # intersection on line segment 1
den_u = (x1-x2)*(y1-y3) - (y1-y2)*(x1-x3)
u = -den_u / num
u[num == .0] = -1.
mask_u = (u > 0) * (u < 1) # intersection on line segment 2
mask = mask_t * mask_u
# overwrite with EPSILON. otherwise numerically unstable
t = den_t / (num + EPSILON)
intersections = torch.stack([x1 + t*(x2-x1), y1 + t*(y2-y1)], dim=-1)
intersections = intersections * mask.float().unsqueeze(-1)
return intersections, mask
def get_in_box_points(polys1: torch.Tensor, polys2: torch.Tensor):
"""check if corners of poly1 lie in poly2
Convention: if a corner is exactly on the edge of the other box, it's also a valid point
Args:
polys1 (torch.Tensor): (n, 4, 2)
polys2 (torch.Tensor): (n, 4, 2)
Returns:
c1_in_2: (n, 4) Bool
"""
a = polys2[..., 0:1, :] # (n, 1, 2)
b = polys2[..., 1:2, :] # (n, 1, 2)
d = polys2[..., 3:4, :] # (n, 1, 2)
ab = b - a # (n, 1, 2)
am = polys1 - a # (n, 4, 2)
ad = d - a # (n, 1, 2)
p_ab = torch.sum(ab * am, dim=-1) # (n, 4)
norm_ab = torch.sum(ab * ab, dim=-1) # (n, 1)
p_ad = torch.sum(ad * am, dim=-1) # (n, 4)
norm_ad = torch.sum(ad * ad, dim=-1) # (n, 1)
# NOTE: the expression looks ugly but is stable if the two boxes are exactly the same
# also stable with different scale of bboxes
cond1 = (p_ab / norm_ab > - 1e-6) * \
(p_ab / norm_ab < 1 + 1e-6) # (n, 4)
cond2 = (p_ad / norm_ad > - 1e-6) * \
(p_ad / norm_ad < 1 + 1e-6) # (n, 4)
return cond1 * cond2
def build_vertices(polys1: torch.Tensor, polys2: torch.Tensor,
c1_in_2: torch.Tensor, c2_in_1: torch.Tensor,
inters: torch.Tensor, mask_inter: torch.Tensor):
"""find vertices of intersection area
Args:
polys1 (torch.Tensor): (n, 4, 2)
polys2 (torch.Tensor): (n, 4, 2)
c1_in_2 (torch.Tensor): Bool, (n, 4)
c2_in_1 (torch.Tensor): Bool, (n, 4)
inters (torch.Tensor): (n, 4, 4, 2)
mask_inter (torch.Tensor): (n, 4, 4)
Returns:
vertices (torch.Tensor): (n, 24, 2) vertices of intersection area. only some elements are valid
mask (torch.Tensor): (n, 24) indicates valid elements in vertices
"""
# NOTE: inter has elements equals zero and has zeros gradient (masked by multiplying with 0).
# can be used as trick
n = polys1.size(0)
# (n, 4+4+16, 2)
vertices = torch.cat([polys1, polys2, inters.view(
[n, -1, 2])], dim=1)
# Bool (n, 4+4+16)
mask = torch.cat([c1_in_2, c2_in_1, mask_inter.view([n, -1])], dim=1)
return vertices, mask
def sort_indices(vertices: torch.Tensor, mask: torch.Tensor):
"""[summary]
Args:
vertices (torch.Tensor): float (n, 24, 2)
mask (torch.Tensor): bool (n, 24)
Returns:
sorted_index: bool (n, 9)
Note:
why 9? the polygon has maximal 8 vertices. +1 to duplicate the first element.
the index should have following structure:
(A, B, C, ... , A, X, X, X)
and X indicates the index of arbitary elements in the last 16 (intersections not corners) with
value 0 and mask False. (cause they have zero value and zero gradient)
"""
# here we pad dim 0 to be consistent with the `sort_vertices_forward` function
vertices = vertices.unsqueeze(0)
mask = mask.unsqueeze(0)
num_valid = torch.sum(mask.int(), dim=2).int() # (B, N)
mean = torch.sum(vertices * mask.float().unsqueeze(-1), dim=2,
keepdim=True) / num_valid.unsqueeze(-1).unsqueeze(-1)
# normalization makes sorting easier
vertices_normalized = vertices - mean
return sort_vertices_forward(vertices_normalized, mask, num_valid).squeeze(0).long()
def calculate_area(idx_sorted: torch.Tensor, vertices: torch.Tensor):
"""calculate area of intersection
Args:
idx_sorted (torch.Tensor): (n, 9)
vertices (torch.Tensor): (n, 24, 2)
return:
area: (n), area of intersection
selected: (n, 9, 2), vertices of polygon with zero padding
"""
idx_ext = idx_sorted.unsqueeze(-1).repeat([1, 1, 2])
selected = torch.gather(vertices, 1, idx_ext)
total = selected[..., 0:-1, 0]*selected[..., 1:, 1] - \
selected[..., 0:-1, 1]*selected[..., 1:, 0]
total = torch.sum(total, dim=1)
area = torch.abs(total) / 2
return area, selected
def oriented_box_intersection_2d(polys1: torch.Tensor, polys2: torch.Tensor):
"""calculate intersection area of 2d rectangles
Args:
polys1 (torch.Tensor): (n, 4, 2)
polys2 (torch.Tensor): (n, 4, 2)
Returns:
area: (n,), area of intersection
selected: (n, 9, 2), vertices of polygon with zero padding
"""
# find intersection points
inters, mask_inter = get_intersection_points(polys1, polys2)
# find inter points
c12 = get_in_box_points(polys1, polys2)
c21 = get_in_box_points(polys2, polys1)
# build vertices
vertices, mask = build_vertices(
polys1, polys2, c12, c21, inters, mask_inter)
# getting sorted indices
sorted_indices = sort_indices(vertices, mask)
# calculate areas using torch.gather
return calculate_area(sorted_indices, vertices)
| 6,963 | 35.460733 | 103 | py |
s2anet | s2anet-master/mmdet/ops/roi_pool/roi_pool.py | import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from . import roi_pool_cuda
class RoIPoolFunction(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale):
assert features.is_cuda
out_h, out_w = _pair(out_size)
assert isinstance(out_h, int) and isinstance(out_w, int)
ctx.save_for_backward(rois)
num_channels = features.size(1)
num_rois = rois.size(0)
out_size = (num_rois, num_channels, out_h, out_w)
output = features.new_zeros(out_size)
argmax = features.new_zeros(out_size, dtype=torch.int)
roi_pool_cuda.forward(features, rois, out_h, out_w, spatial_scale,
output, argmax)
ctx.spatial_scale = spatial_scale
ctx.feature_size = features.size()
ctx.argmax = argmax
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
assert grad_output.is_cuda
spatial_scale = ctx.spatial_scale
feature_size = ctx.feature_size
argmax = ctx.argmax
rois = ctx.saved_tensors[0]
assert feature_size is not None
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.new_zeros(feature_size)
roi_pool_cuda.backward(grad_output.contiguous(), rois, argmax,
spatial_scale, grad_input)
return grad_input, grad_rois, None, None
roi_pool = RoIPoolFunction.apply
class RoIPool(nn.Module):
def __init__(self, out_size, spatial_scale, use_torchvision=False):
super(RoIPool, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = float(spatial_scale)
self.use_torchvision = use_torchvision
def forward(self, features, rois):
if self.use_torchvision:
from torchvision.ops import roi_pool as tv_roi_pool
return tv_roi_pool(features, rois, self.out_size,
self.spatial_scale)
else:
return roi_pool(features, rois, self.out_size, self.spatial_scale)
def __repr__(self):
format_str = self.__class__.__name__
format_str += '(out_size={}, spatial_scale={}'.format(
self.out_size, self.spatial_scale)
format_str += ', use_torchvision={})'.format(self.use_torchvision)
return format_str
| 2,544 | 32.486842 | 78 | py |
s2anet | s2anet-master/mmdet/ops/roi_pool/gradcheck.py | import os.path as osp
import sys
import torch
from torch.autograd import gradcheck
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from roi_pool import RoIPool # noqa: E402, isort:skip
feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda()
rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55],
[1, 67, 40, 110, 120]]).cuda()
inputs = (feat, rois)
print('Gradcheck for roi pooling...')
test = gradcheck(RoIPool(4, 1.0 / 8), inputs, eps=1e-5, atol=1e-3)
print(test)
| 513 | 29.235294 | 66 | py |
s2anet | s2anet-master/mmdet/ops/roi_align_rotated/roi_align_rotated.py | # Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from .roi_align_rotated_cuda import roi_align_rotated_forward, roi_align_rotated_backward
class _ROIAlignRotated(Function):
@staticmethod
def forward(ctx, input, roi, out_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.out_size = out_size
ctx.spatial_scale = spatial_scale
ctx.sample_num = sampling_ratio
ctx.input_shape = input.size()
output = roi_align_rotated_forward(
input, roi, spatial_scale, out_size[0], out_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(rois,) = ctx.saved_tensors
output_size = ctx.out_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sample_num
bs, ch, h, w = ctx.input_shape
grad_input = roi_align_rotated_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None, None
roi_align_rotated = _ROIAlignRotated.apply
class RoIAlignRotated(nn.Module):
def __init__(self, out_size, spatial_scale, sample_num):
"""
Args:
out_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sample_num (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
Note:
roi_align_rotated supports continuous coordinate by default:
Given a continuous coordinate c, its two neighboring pixel indices (in our
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
from the underlying signal at continuous coordinates 0.5 and 1.5).
"""
super(RoIAlignRotated, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = spatial_scale
self.sample_num = sample_num
def forward(self, input, rois):
"""
Args:
input: NCHW images
rois: Bx6 boxes. First column is the index into N.
The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees).
"""
assert rois.dim() == 2 and rois.size(1) == 6
orig_dtype = input.dtype
if orig_dtype == torch.float16:
input = input.float()
rois = rois.float()
return roi_align_rotated(
input, rois, self.out_size, self.spatial_scale, self.sample_num
).to(dtype=orig_dtype)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "out_size=" + str(self.out_size[0])
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sample_num=" + str(self.sample_num)
tmpstr += ")"
return tmpstr
| 3,281 | 33.914894 | 90 | py |
s2anet | s2anet-master/mmdet/ops/nms/nms_wrapper.py | import numpy as np
import torch
from . import nms_cpu, nms_cuda
from .soft_nms_cpu import soft_nms_cpu
def nms(dets, iou_thr, device_id=None):
"""Dispatch to either CPU or GPU NMS implementations.
The input can be either a torch tensor or numpy array. GPU NMS will be used
if the input is a gpu tensor or device_id is specified, otherwise CPU NMS
will be used. The returned type will always be the same as inputs.
Arguments:
dets (torch.Tensor or np.ndarray): bboxes with scores.
iou_thr (float): IoU threshold for NMS.
device_id (int, optional): when `dets` is a numpy array, if `device_id`
is None, then cpu nms is used, otherwise gpu_nms will be used.
Returns:
tuple: kept bboxes and indice, which is always the same data type as
the input.
"""
# convert dets (tensor or numpy array) to tensor
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id)
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError(
'dets must be either a Tensor or numpy array, but got {}'.format(
type(dets)))
# execute cpu or cuda nms
if dets_th.shape[0] == 0:
inds = dets_th.new_zeros(0, dtype=torch.long)
else:
if dets_th.is_cuda:
inds = nms_cuda.nms(dets_th, iou_thr)
else:
inds = nms_cpu.nms(dets_th, iou_thr)
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds
def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3):
if isinstance(dets, torch.Tensor):
is_tensor = True
dets_np = dets.detach().cpu().numpy()
elif isinstance(dets, np.ndarray):
is_tensor = False
dets_np = dets
else:
raise TypeError(
'dets must be either a Tensor or numpy array, but got {}'.format(
type(dets)))
method_codes = {'linear': 1, 'gaussian': 2}
if method not in method_codes:
raise ValueError('Invalid method for SoftNMS: {}'.format(method))
new_dets, inds = soft_nms_cpu(
dets_np,
iou_thr,
method=method_codes[method],
sigma=sigma,
min_score=min_score)
if is_tensor:
return dets.new_tensor(new_dets), dets.new_tensor(
inds, dtype=torch.long)
else:
return new_dets.astype(np.float32), inds.astype(np.int64)
| 2,580 | 31.670886 | 79 | py |
taming-transformers | taming-transformers-master/main.py | import argparse, os, sys, datetime, glob, importlib
from omegaconf import OmegaConf
import numpy as np
from PIL import Image
import torch
import torchvision
from torch.utils.data import random_split, DataLoader, Dataset
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities import rank_zero_only
from taming.data.utils import custom_collate
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument(
"-n",
"--name",
type=str,
const=True,
default="",
nargs="?",
help="postfix for logdir",
)
parser.add_argument(
"-r",
"--resume",
type=str,
const=True,
default="",
nargs="?",
help="resume from logdir or checkpoint in logdir",
)
parser.add_argument(
"-b",
"--base",
nargs="*",
metavar="base_config.yaml",
help="paths to base configs. Loaded from left-to-right. "
"Parameters can be overwritten or added with command-line options of the form `--key value`.",
default=list(),
)
parser.add_argument(
"-t",
"--train",
type=str2bool,
const=True,
default=False,
nargs="?",
help="train",
)
parser.add_argument(
"--no-test",
type=str2bool,
const=True,
default=False,
nargs="?",
help="disable test",
)
parser.add_argument("-p", "--project", help="name of new or path to existing project")
parser.add_argument(
"-d",
"--debug",
type=str2bool,
nargs="?",
const=True,
default=False,
help="enable post-mortem debugging",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=23,
help="seed for seed_everything",
)
parser.add_argument(
"-f",
"--postfix",
type=str,
default="",
help="post-postfix for default name",
)
return parser
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
def instantiate_from_config(config):
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
class WrappedDataset(Dataset):
"""Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
def __init__(self, dataset):
self.data = dataset
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self, batch_size, train=None, validation=None, test=None,
wrap=False, num_workers=None):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = num_workers if num_workers is not None else batch_size*2
if train is not None:
self.dataset_configs["train"] = train
self.train_dataloader = self._train_dataloader
if validation is not None:
self.dataset_configs["validation"] = validation
self.val_dataloader = self._val_dataloader
if test is not None:
self.dataset_configs["test"] = test
self.test_dataloader = self._test_dataloader
self.wrap = wrap
def prepare_data(self):
for data_cfg in self.dataset_configs.values():
instantiate_from_config(data_cfg)
def setup(self, stage=None):
self.datasets = dict(
(k, instantiate_from_config(self.dataset_configs[k]))
for k in self.dataset_configs)
if self.wrap:
for k in self.datasets:
self.datasets[k] = WrappedDataset(self.datasets[k])
def _train_dataloader(self):
return DataLoader(self.datasets["train"], batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=True, collate_fn=custom_collate)
def _val_dataloader(self):
return DataLoader(self.datasets["validation"],
batch_size=self.batch_size,
num_workers=self.num_workers, collate_fn=custom_collate)
def _test_dataloader(self):
return DataLoader(self.datasets["test"], batch_size=self.batch_size,
num_workers=self.num_workers, collate_fn=custom_collate)
class SetupCallback(Callback):
def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
super().__init__()
self.resume = resume
self.now = now
self.logdir = logdir
self.ckptdir = ckptdir
self.cfgdir = cfgdir
self.config = config
self.lightning_config = lightning_config
def on_pretrain_routine_start(self, trainer, pl_module):
if trainer.global_rank == 0:
# Create logdirs and save configs
os.makedirs(self.logdir, exist_ok=True)
os.makedirs(self.ckptdir, exist_ok=True)
os.makedirs(self.cfgdir, exist_ok=True)
print("Project config")
print(self.config.pretty())
OmegaConf.save(self.config,
os.path.join(self.cfgdir, "{}-project.yaml".format(self.now)))
print("Lightning config")
print(self.lightning_config.pretty())
OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}),
os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now)))
else:
# ModelCheckpoint callback created log directory --- remove it
if not self.resume and os.path.exists(self.logdir):
dst, name = os.path.split(self.logdir)
dst = os.path.join(dst, "child_runs", name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
try:
os.rename(self.logdir, dst)
except FileNotFoundError:
pass
class ImageLogger(Callback):
def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True):
super().__init__()
self.batch_freq = batch_frequency
self.max_images = max_images
self.logger_log_images = {
pl.loggers.WandbLogger: self._wandb,
pl.loggers.TestTubeLogger: self._testtube,
}
self.log_steps = [2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)]
if not increase_log_steps:
self.log_steps = [self.batch_freq]
self.clamp = clamp
@rank_zero_only
def _wandb(self, pl_module, images, batch_idx, split):
raise ValueError("No way wandb")
grids = dict()
for k in images:
grid = torchvision.utils.make_grid(images[k])
grids[f"{split}/{k}"] = wandb.Image(grid)
pl_module.logger.experiment.log(grids)
@rank_zero_only
def _testtube(self, pl_module, images, batch_idx, split):
for k in images:
grid = torchvision.utils.make_grid(images[k])
grid = (grid+1.0)/2.0 # -1,1 -> 0,1; c,h,w
tag = f"{split}/{k}"
pl_module.logger.experiment.add_image(
tag, grid,
global_step=pl_module.global_step)
@rank_zero_only
def log_local(self, save_dir, split, images,
global_step, current_epoch, batch_idx):
root = os.path.join(save_dir, "images", split)
for k in images:
grid = torchvision.utils.make_grid(images[k], nrow=4)
grid = (grid+1.0)/2.0 # -1,1 -> 0,1; c,h,w
grid = grid.transpose(0,1).transpose(1,2).squeeze(-1)
grid = grid.numpy()
grid = (grid*255).astype(np.uint8)
filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
k,
global_step,
current_epoch,
batch_idx)
path = os.path.join(root, filename)
os.makedirs(os.path.split(path)[0], exist_ok=True)
Image.fromarray(grid).save(path)
def log_img(self, pl_module, batch, batch_idx, split="train"):
if (self.check_frequency(batch_idx) and # batch_idx % self.batch_freq == 0
hasattr(pl_module, "log_images") and
callable(pl_module.log_images) and
self.max_images > 0):
logger = type(pl_module.logger)
is_train = pl_module.training
if is_train:
pl_module.eval()
with torch.no_grad():
images = pl_module.log_images(batch, split=split, pl_module=pl_module)
for k in images:
N = min(images[k].shape[0], self.max_images)
images[k] = images[k][:N]
if isinstance(images[k], torch.Tensor):
images[k] = images[k].detach().cpu()
if self.clamp:
images[k] = torch.clamp(images[k], -1., 1.)
self.log_local(pl_module.logger.save_dir, split, images,
pl_module.global_step, pl_module.current_epoch, batch_idx)
logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None)
logger_log_images(pl_module, images, pl_module.global_step, split)
if is_train:
pl_module.train()
def check_frequency(self, batch_idx):
if (batch_idx % self.batch_freq) == 0 or (batch_idx in self.log_steps):
try:
self.log_steps.pop(0)
except IndexError:
pass
return True
return False
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.log_img(pl_module, batch, batch_idx, split="train")
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.log_img(pl_module, batch, batch_idx, split="val")
if __name__ == "__main__":
# custom parser to specify config files, train, test and debug mode,
# postfix, resume.
# `--key value` arguments are interpreted as arguments to the trainer.
# `nested.key=value` arguments are interpreted as config parameters.
# configs are merged from left-to-right followed by command line parameters.
# model:
# base_learning_rate: float
# target: path to lightning module
# params:
# key: value
# data:
# target: main.DataModuleFromConfig
# params:
# batch_size: int
# wrap: bool
# train:
# target: path to train dataset
# params:
# key: value
# validation:
# target: path to validation dataset
# params:
# key: value
# test:
# target: path to test dataset
# params:
# key: value
# lightning: (optional, has sane defaults and can be specified on cmdline)
# trainer:
# additional arguments to trainer
# logger:
# logger to instantiate
# modelcheckpoint:
# modelcheckpoint to instantiate
# callbacks:
# callback1:
# target: importpath
# params:
# key: value
now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
# add cwd for convenience and to make classes in this file available when
# running as `python main.py`
# (in particular `main.DataModuleFromConfig`)
sys.path.append(os.getcwd())
parser = get_parser()
parser = Trainer.add_argparse_args(parser)
opt, unknown = parser.parse_known_args()
if opt.name and opt.resume:
raise ValueError(
"-n/--name and -r/--resume cannot be specified both."
"If you want to resume training in a new log folder, "
"use -n/--name in combination with --resume_from_checkpoint"
)
if opt.resume:
if not os.path.exists(opt.resume):
raise ValueError("Cannot find {}".format(opt.resume))
if os.path.isfile(opt.resume):
paths = opt.resume.split("/")
idx = len(paths)-paths[::-1].index("logs")+1
logdir = "/".join(paths[:idx])
ckpt = opt.resume
else:
assert os.path.isdir(opt.resume), opt.resume
logdir = opt.resume.rstrip("/")
ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
opt.resume_from_checkpoint = ckpt
base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
opt.base = base_configs+opt.base
_tmp = logdir.split("/")
nowname = _tmp[_tmp.index("logs")+1]
else:
if opt.name:
name = "_"+opt.name
elif opt.base:
cfg_fname = os.path.split(opt.base[0])[-1]
cfg_name = os.path.splitext(cfg_fname)[0]
name = "_"+cfg_name
else:
name = ""
nowname = now+name+opt.postfix
logdir = os.path.join("logs", nowname)
ckptdir = os.path.join(logdir, "checkpoints")
cfgdir = os.path.join(logdir, "configs")
seed_everything(opt.seed)
try:
# init and save configs
configs = [OmegaConf.load(cfg) for cfg in opt.base]
cli = OmegaConf.from_dotlist(unknown)
config = OmegaConf.merge(*configs, cli)
lightning_config = config.pop("lightning", OmegaConf.create())
# merge trainer cli with config
trainer_config = lightning_config.get("trainer", OmegaConf.create())
# default to ddp
trainer_config["distributed_backend"] = "ddp"
for k in nondefault_trainer_args(opt):
trainer_config[k] = getattr(opt, k)
if not "gpus" in trainer_config:
del trainer_config["distributed_backend"]
cpu = True
else:
gpuinfo = trainer_config["gpus"]
print(f"Running on GPUs {gpuinfo}")
cpu = False
trainer_opt = argparse.Namespace(**trainer_config)
lightning_config.trainer = trainer_config
# model
model = instantiate_from_config(config.model)
# trainer and callbacks
trainer_kwargs = dict()
# default logger configs
# NOTE wandb < 0.10.0 interferes with shutdown
# wandb >= 0.10.0 seems to fix it but still interferes with pudb
# debugging (wrongly sized pudb ui)
# thus prefer testtube for now
default_logger_cfgs = {
"wandb": {
"target": "pytorch_lightning.loggers.WandbLogger",
"params": {
"name": nowname,
"save_dir": logdir,
"offline": opt.debug,
"id": nowname,
}
},
"testtube": {
"target": "pytorch_lightning.loggers.TestTubeLogger",
"params": {
"name": "testtube",
"save_dir": logdir,
}
},
}
default_logger_cfg = default_logger_cfgs["testtube"]
logger_cfg = lightning_config.logger or OmegaConf.create()
logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg)
trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)
# modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to
# specify which metric is used to determine best models
default_modelckpt_cfg = {
"target": "pytorch_lightning.callbacks.ModelCheckpoint",
"params": {
"dirpath": ckptdir,
"filename": "{epoch:06}",
"verbose": True,
"save_last": True,
}
}
if hasattr(model, "monitor"):
print(f"Monitoring {model.monitor} as checkpoint metric.")
default_modelckpt_cfg["params"]["monitor"] = model.monitor
default_modelckpt_cfg["params"]["save_top_k"] = 3
modelckpt_cfg = lightning_config.modelcheckpoint or OmegaConf.create()
modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg)
# add callback which sets up log directory
default_callbacks_cfg = {
"setup_callback": {
"target": "main.SetupCallback",
"params": {
"resume": opt.resume,
"now": now,
"logdir": logdir,
"ckptdir": ckptdir,
"cfgdir": cfgdir,
"config": config,
"lightning_config": lightning_config,
}
},
"image_logger": {
"target": "main.ImageLogger",
"params": {
"batch_frequency": 750,
"max_images": 4,
"clamp": True
}
},
"learning_rate_logger": {
"target": "main.LearningRateMonitor",
"params": {
"logging_interval": "step",
#"log_momentum": True
}
},
}
callbacks_cfg = lightning_config.callbacks or OmegaConf.create()
callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg]
trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs)
# data
data = instantiate_from_config(config.data)
# NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
# calling these ourselves should not be necessary but it is.
# lightning still takes care of proper multiprocessing though
data.prepare_data()
data.setup()
# configure learning rate
bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
if not cpu:
ngpu = len(lightning_config.trainer.gpus.strip(",").split(','))
else:
ngpu = 1
accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches or 1
print(f"accumulate_grad_batches = {accumulate_grad_batches}")
lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr
print("Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format(
model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr))
# allow checkpointing via USR1
def melk(*args, **kwargs):
# run all checkpoint hooks
if trainer.global_rank == 0:
print("Summoning checkpoint.")
ckpt_path = os.path.join(ckptdir, "last.ckpt")
trainer.save_checkpoint(ckpt_path)
def divein(*args, **kwargs):
if trainer.global_rank == 0:
import pudb; pudb.set_trace()
import signal
signal.signal(signal.SIGUSR1, melk)
signal.signal(signal.SIGUSR2, divein)
# run
if opt.train:
try:
trainer.fit(model, data)
except Exception:
melk()
raise
if not opt.no_test and not trainer.interrupted:
trainer.test(model, data)
except Exception:
if opt.debug and trainer.global_rank==0:
try:
import pudb as debugger
except ImportError:
import pdb as debugger
debugger.post_mortem()
raise
finally:
# move newly created debug project to debug_runs
if opt.debug and not opt.resume and trainer.global_rank==0:
dst, name = os.path.split(logdir)
dst = os.path.join(dst, "debug_runs", name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
os.rename(logdir, dst)
| 21,251 | 35.266212 | 138 | py |
taming-transformers | taming-transformers-master/setup.py | from setuptools import setup, find_packages
setup(
name='taming-transformers',
version='0.0.1',
description='Taming Transformers for High-Resolution Image Synthesis',
packages=find_packages(),
install_requires=[
'torch',
'numpy',
'tqdm',
],
)
| 292 | 19.928571 | 74 | py |
taming-transformers | taming-transformers-master/scripts/extract_segmentation.py | import sys, os
import numpy as np
import scipy
import torch
import torch.nn as nn
from scipy import ndimage
from tqdm import tqdm, trange
from PIL import Image
import torch.hub
import torchvision
import torch.nn.functional as F
# download deeplabv2_resnet101_msc-cocostuff164k-100000.pth from
# https://github.com/kazuto1011/deeplab-pytorch/releases/download/v1.0/deeplabv2_resnet101_msc-cocostuff164k-100000.pth
# and put the path here
CKPT_PATH = "TODO"
rescale = lambda x: (x + 1.) / 2.
def rescale_bgr(x):
x = (x+1)*127.5
x = torch.flip(x, dims=[0])
return x
class COCOStuffSegmenter(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.n_labels = 182
model = torch.hub.load("kazuto1011/deeplab-pytorch", "deeplabv2_resnet101", n_classes=self.n_labels)
ckpt_path = CKPT_PATH
model.load_state_dict(torch.load(ckpt_path))
self.model = model
normalize = torchvision.transforms.Normalize(mean=self.mean, std=self.std)
self.image_transform = torchvision.transforms.Compose([
torchvision.transforms.Lambda(lambda image: torch.stack(
[normalize(rescale_bgr(x)) for x in image]))
])
def forward(self, x, upsample=None):
x = self._pre_process(x)
x = self.model(x)
if upsample is not None:
x = torch.nn.functional.upsample_bilinear(x, size=upsample)
return x
def _pre_process(self, x):
x = self.image_transform(x)
return x
@property
def mean(self):
# bgr
return [104.008, 116.669, 122.675]
@property
def std(self):
return [1.0, 1.0, 1.0]
@property
def input_size(self):
return [3, 224, 224]
def run_model(img, model):
model = model.eval()
with torch.no_grad():
segmentation = model(img, upsample=(img.shape[2], img.shape[3]))
segmentation = torch.argmax(segmentation, dim=1, keepdim=True)
return segmentation.detach().cpu()
def get_input(batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
return x.float()
def save_segmentation(segmentation, path):
# --> class label to uint8, save as png
os.makedirs(os.path.dirname(path), exist_ok=True)
assert len(segmentation.shape)==4
assert segmentation.shape[0]==1
for seg in segmentation:
seg = seg.permute(1,2,0).numpy().squeeze().astype(np.uint8)
seg = Image.fromarray(seg)
seg.save(path)
def iterate_dataset(dataloader, destpath, model):
os.makedirs(destpath, exist_ok=True)
num_processed = 0
for i, batch in tqdm(enumerate(dataloader), desc="Data"):
try:
img = get_input(batch, "image")
img = img.cuda()
seg = run_model(img, model)
path = batch["relative_file_path_"][0]
path = os.path.splitext(path)[0]
path = os.path.join(destpath, path + ".png")
save_segmentation(seg, path)
num_processed += 1
except Exception as e:
print(e)
print("but anyhow..")
print("Processed {} files. Bye.".format(num_processed))
from taming.data.sflckr import Examples
from torch.utils.data import DataLoader
if __name__ == "__main__":
dest = sys.argv[1]
batchsize = 1
print("Running with batch-size {}, saving to {}...".format(batchsize, dest))
model = COCOStuffSegmenter({}).cuda()
print("Instantiated model.")
dataset = Examples()
dloader = DataLoader(dataset, batch_size=batchsize)
iterate_dataset(dataloader=dloader, destpath=dest, model=model)
print("done.")
| 3,753 | 27.656489 | 119 | py |
taming-transformers | taming-transformers-master/scripts/sample_conditional.py | import argparse, os, sys, glob, math, time
import torch
import numpy as np
from omegaconf import OmegaConf
import streamlit as st
from streamlit import caching
from PIL import Image
from main import instantiate_from_config, DataModuleFromConfig
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
rescale = lambda x: (x + 1.) / 2.
def bchw_to_st(x):
return rescale(x.detach().cpu().numpy().transpose(0,2,3,1))
def save_img(xstart, fname):
I = (xstart.clip(0,1)[0]*255).astype(np.uint8)
Image.fromarray(I).save(fname)
def get_interactive_image(resize=False):
image = st.file_uploader("Input", type=["jpg", "JPEG", "png"])
if image is not None:
image = Image.open(image)
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
print("upload image shape: {}".format(image.shape))
img = Image.fromarray(image)
if resize:
img = img.resize((256, 256))
image = np.array(img)
return image
def single_image_to_torch(x, permute=True):
assert x is not None, "Please provide an image through the upload function"
x = np.array(x)
x = torch.FloatTensor(x/255.*2. - 1.)[None,...]
if permute:
x = x.permute(0, 3, 1, 2)
return x
def pad_to_M(x, M):
hp = math.ceil(x.shape[2]/M)*M-x.shape[2]
wp = math.ceil(x.shape[3]/M)*M-x.shape[3]
x = torch.nn.functional.pad(x, (0,wp,0,hp,0,0,0,0))
return x
@torch.no_grad()
def run_conditional(model, dsets):
if len(dsets.datasets) > 1:
split = st.sidebar.radio("Split", sorted(dsets.datasets.keys()))
dset = dsets.datasets[split]
else:
dset = next(iter(dsets.datasets.values()))
batch_size = 1
start_index = st.sidebar.number_input("Example Index (Size: {})".format(len(dset)), value=0,
min_value=0,
max_value=len(dset)-batch_size)
indices = list(range(start_index, start_index+batch_size))
example = default_collate([dset[i] for i in indices])
x = model.get_input("image", example).to(model.device)
cond_key = model.cond_stage_key
c = model.get_input(cond_key, example).to(model.device)
scale_factor = st.sidebar.slider("Scale Factor", min_value=0.5, max_value=4.0, step=0.25, value=1.00)
if scale_factor != 1.0:
x = torch.nn.functional.interpolate(x, scale_factor=scale_factor, mode="bicubic")
c = torch.nn.functional.interpolate(c, scale_factor=scale_factor, mode="bicubic")
quant_z, z_indices = model.encode_to_z(x)
quant_c, c_indices = model.encode_to_c(c)
cshape = quant_z.shape
xrec = model.first_stage_model.decode(quant_z)
st.write("image: {}".format(x.shape))
st.image(bchw_to_st(x), clamp=True, output_format="PNG")
st.write("image reconstruction: {}".format(xrec.shape))
st.image(bchw_to_st(xrec), clamp=True, output_format="PNG")
if cond_key == "segmentation":
# get image from segmentation mask
num_classes = c.shape[1]
c = torch.argmax(c, dim=1, keepdim=True)
c = torch.nn.functional.one_hot(c, num_classes=num_classes)
c = c.squeeze(1).permute(0, 3, 1, 2).float()
c = model.cond_stage_model.to_rgb(c)
st.write(f"{cond_key}: {tuple(c.shape)}")
st.image(bchw_to_st(c), clamp=True, output_format="PNG")
idx = z_indices
half_sample = st.sidebar.checkbox("Image Completion", value=False)
if half_sample:
start = idx.shape[1]//2
else:
start = 0
idx[:,start:] = 0
idx = idx.reshape(cshape[0],cshape[2],cshape[3])
start_i = start//cshape[3]
start_j = start %cshape[3]
if not half_sample and quant_z.shape == quant_c.shape:
st.info("Setting idx to c_indices")
idx = c_indices.clone().reshape(cshape[0],cshape[2],cshape[3])
cidx = c_indices
cidx = cidx.reshape(quant_c.shape[0],quant_c.shape[2],quant_c.shape[3])
xstart = model.decode_to_img(idx[:,:cshape[2],:cshape[3]], cshape)
st.image(bchw_to_st(xstart), clamp=True, output_format="PNG")
temperature = st.number_input("Temperature", value=1.0)
top_k = st.number_input("Top k", value=100)
sample = st.checkbox("Sample", value=True)
update_every = st.number_input("Update every", value=75)
st.text(f"Sampling shape ({cshape[2]},{cshape[3]})")
animate = st.checkbox("animate")
if animate:
import imageio
outvid = "sampling.mp4"
writer = imageio.get_writer(outvid, fps=25)
elapsed_t = st.empty()
info = st.empty()
st.text("Sampled")
if st.button("Sample"):
output = st.empty()
start_t = time.time()
for i in range(start_i,cshape[2]-0):
if i <= 8:
local_i = i
elif cshape[2]-i < 8:
local_i = 16-(cshape[2]-i)
else:
local_i = 8
for j in range(start_j,cshape[3]-0):
if j <= 8:
local_j = j
elif cshape[3]-j < 8:
local_j = 16-(cshape[3]-j)
else:
local_j = 8
i_start = i-local_i
i_end = i_start+16
j_start = j-local_j
j_end = j_start+16
elapsed_t.text(f"Time: {time.time() - start_t} seconds")
info.text(f"Step: ({i},{j}) | Local: ({local_i},{local_j}) | Crop: ({i_start}:{i_end},{j_start}:{j_end})")
patch = idx[:,i_start:i_end,j_start:j_end]
patch = patch.reshape(patch.shape[0],-1)
cpatch = cidx[:, i_start:i_end, j_start:j_end]
cpatch = cpatch.reshape(cpatch.shape[0], -1)
patch = torch.cat((cpatch, patch), dim=1)
logits,_ = model.transformer(patch[:,:-1])
logits = logits[:, -256:, :]
logits = logits.reshape(cshape[0],16,16,-1)
logits = logits[:,local_i,local_j,:]
logits = logits/temperature
if top_k is not None:
logits = model.top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = torch.nn.functional.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
idx[:,i,j] = ix
if (i*cshape[3]+j)%update_every==0:
xstart = model.decode_to_img(idx[:, :cshape[2], :cshape[3]], cshape,)
xstart = bchw_to_st(xstart)
output.image(xstart, clamp=True, output_format="PNG")
if animate:
writer.append_data((xstart[0]*255).clip(0, 255).astype(np.uint8))
xstart = model.decode_to_img(idx[:,:cshape[2],:cshape[3]], cshape)
xstart = bchw_to_st(xstart)
output.image(xstart, clamp=True, output_format="PNG")
#save_img(xstart, "full_res_sample.png")
if animate:
writer.close()
st.video(outvid)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--resume",
type=str,
nargs="?",
help="load from logdir or checkpoint in logdir",
)
parser.add_argument(
"-b",
"--base",
nargs="*",
metavar="base_config.yaml",
help="paths to base configs. Loaded from left-to-right. "
"Parameters can be overwritten or added with command-line options of the form `--key value`.",
default=list(),
)
parser.add_argument(
"-c",
"--config",
nargs="?",
metavar="single_config.yaml",
help="path to single config. If specified, base configs will be ignored "
"(except for the last one if left unspecified).",
const=True,
default="",
)
parser.add_argument(
"--ignore_base_data",
action="store_true",
help="Ignore data specification from base configs. Useful if you want "
"to specify a custom datasets on the command line.",
)
return parser
def load_model_from_config(config, sd, gpu=True, eval_mode=True):
if "ckpt_path" in config.params:
st.warning("Deleting the restore-ckpt path from the config...")
config.params.ckpt_path = None
if "downsample_cond_size" in config.params:
st.warning("Deleting downsample-cond-size from the config and setting factor=0.5 instead...")
config.params.downsample_cond_size = -1
config.params["downsample_cond_factor"] = 0.5
try:
if "ckpt_path" in config.params.first_stage_config.params:
config.params.first_stage_config.params.ckpt_path = None
st.warning("Deleting the first-stage restore-ckpt path from the config...")
if "ckpt_path" in config.params.cond_stage_config.params:
config.params.cond_stage_config.params.ckpt_path = None
st.warning("Deleting the cond-stage restore-ckpt path from the config...")
except:
pass
model = instantiate_from_config(config)
if sd is not None:
missing, unexpected = model.load_state_dict(sd, strict=False)
st.info(f"Missing Keys in State Dict: {missing}")
st.info(f"Unexpected Keys in State Dict: {unexpected}")
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def get_data(config):
# get data
data = instantiate_from_config(config.data)
data.prepare_data()
data.setup()
return data
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def load_model_and_dset(config, ckpt, gpu, eval_mode):
# get data
dsets = get_data(config) # calls data.config ...
# now load the specified checkpoint
if ckpt:
pl_sd = torch.load(ckpt, map_location="cpu")
global_step = pl_sd["global_step"]
else:
pl_sd = {"state_dict": None}
global_step = None
model = load_model_from_config(config.model,
pl_sd["state_dict"],
gpu=gpu,
eval_mode=eval_mode)["model"]
return dsets, model, global_step
if __name__ == "__main__":
sys.path.append(os.getcwd())
parser = get_parser()
opt, unknown = parser.parse_known_args()
ckpt = None
if opt.resume:
if not os.path.exists(opt.resume):
raise ValueError("Cannot find {}".format(opt.resume))
if os.path.isfile(opt.resume):
paths = opt.resume.split("/")
try:
idx = len(paths)-paths[::-1].index("logs")+1
except ValueError:
idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt
logdir = "/".join(paths[:idx])
ckpt = opt.resume
else:
assert os.path.isdir(opt.resume), opt.resume
logdir = opt.resume.rstrip("/")
ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
print(f"logdir:{logdir}")
base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*-project.yaml")))
opt.base = base_configs+opt.base
if opt.config:
if type(opt.config) == str:
opt.base = [opt.config]
else:
opt.base = [opt.base[-1]]
configs = [OmegaConf.load(cfg) for cfg in opt.base]
cli = OmegaConf.from_dotlist(unknown)
if opt.ignore_base_data:
for config in configs:
if hasattr(config, "data"): del config["data"]
config = OmegaConf.merge(*configs, cli)
st.sidebar.text(ckpt)
gs = st.sidebar.empty()
gs.text(f"Global step: ?")
st.sidebar.text("Options")
#gpu = st.sidebar.checkbox("GPU", value=True)
gpu = True
#eval_mode = st.sidebar.checkbox("Eval Mode", value=True)
eval_mode = True
#show_config = st.sidebar.checkbox("Show Config", value=False)
show_config = False
if show_config:
st.info("Checkpoint: {}".format(ckpt))
st.json(OmegaConf.to_container(config))
dsets, model, global_step = load_model_and_dset(config, ckpt, gpu, eval_mode)
gs.text(f"Global step: {global_step}")
run_conditional(model, dsets)
| 12,535 | 34.213483 | 122 | py |
taming-transformers | taming-transformers-master/scripts/sample_fast.py | import argparse, os, sys, glob
import torch
import time
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from einops import repeat
from main import instantiate_from_config
from taming.modules.transformer.mingpt import sample_with_past
rescale = lambda x: (x + 1.) / 2.
def chw_to_pillow(x):
return Image.fromarray((255*rescale(x.detach().cpu().numpy().transpose(1,2,0))).clip(0,255).astype(np.uint8))
@torch.no_grad()
def sample_classconditional(model, batch_size, class_label, steps=256, temperature=None, top_k=None, callback=None,
dim_z=256, h=16, w=16, verbose_time=False, top_p=None):
log = dict()
assert type(class_label) == int, f'expecting type int but type is {type(class_label)}'
qzshape = [batch_size, dim_z, h, w]
assert not model.be_unconditional, 'Expecting a class-conditional Net2NetTransformer.'
c_indices = repeat(torch.tensor([class_label]), '1 -> b 1', b=batch_size).to(model.device) # class token
t1 = time.time()
index_sample = sample_with_past(c_indices, model.transformer, steps=steps,
sample_logits=True, top_k=top_k, callback=callback,
temperature=temperature, top_p=top_p)
if verbose_time:
sampling_time = time.time() - t1
print(f"Full sampling takes about {sampling_time:.2f} seconds.")
x_sample = model.decode_to_img(index_sample, qzshape)
log["samples"] = x_sample
log["class_label"] = c_indices
return log
@torch.no_grad()
def sample_unconditional(model, batch_size, steps=256, temperature=None, top_k=None, top_p=None, callback=None,
dim_z=256, h=16, w=16, verbose_time=False):
log = dict()
qzshape = [batch_size, dim_z, h, w]
assert model.be_unconditional, 'Expecting an unconditional model.'
c_indices = repeat(torch.tensor([model.sos_token]), '1 -> b 1', b=batch_size).to(model.device) # sos token
t1 = time.time()
index_sample = sample_with_past(c_indices, model.transformer, steps=steps,
sample_logits=True, top_k=top_k, callback=callback,
temperature=temperature, top_p=top_p)
if verbose_time:
sampling_time = time.time() - t1
print(f"Full sampling takes about {sampling_time:.2f} seconds.")
x_sample = model.decode_to_img(index_sample, qzshape)
log["samples"] = x_sample
return log
@torch.no_grad()
def run(logdir, model, batch_size, temperature, top_k, unconditional=True, num_samples=50000,
given_classes=None, top_p=None):
batches = [batch_size for _ in range(num_samples//batch_size)] + [num_samples % batch_size]
if not unconditional:
assert given_classes is not None
print("Running in pure class-conditional sampling mode. I will produce "
f"{num_samples} samples for each of the {len(given_classes)} classes, "
f"i.e. {num_samples*len(given_classes)} in total.")
for class_label in tqdm(given_classes, desc="Classes"):
for n, bs in tqdm(enumerate(batches), desc="Sampling Class"):
if bs == 0: break
logs = sample_classconditional(model, batch_size=bs, class_label=class_label,
temperature=temperature, top_k=top_k, top_p=top_p)
save_from_logs(logs, logdir, base_count=n * batch_size, cond_key=logs["class_label"])
else:
print(f"Running in unconditional sampling mode, producing {num_samples} samples.")
for n, bs in tqdm(enumerate(batches), desc="Sampling"):
if bs == 0: break
logs = sample_unconditional(model, batch_size=bs, temperature=temperature, top_k=top_k, top_p=top_p)
save_from_logs(logs, logdir, base_count=n * batch_size)
def save_from_logs(logs, logdir, base_count, key="samples", cond_key=None):
xx = logs[key]
for i, x in enumerate(xx):
x = chw_to_pillow(x)
count = base_count + i
if cond_key is None:
x.save(os.path.join(logdir, f"{count:06}.png"))
else:
condlabel = cond_key[i]
if type(condlabel) == torch.Tensor: condlabel = condlabel.item()
os.makedirs(os.path.join(logdir, str(condlabel)), exist_ok=True)
x.save(os.path.join(logdir, str(condlabel), f"{count:06}.png"))
def get_parser():
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--resume",
type=str,
nargs="?",
help="load from logdir or checkpoint in logdir",
)
parser.add_argument(
"-o",
"--outdir",
type=str,
nargs="?",
help="path where the samples will be logged to.",
default=""
)
parser.add_argument(
"-b",
"--base",
nargs="*",
metavar="base_config.yaml",
help="paths to base configs. Loaded from left-to-right. "
"Parameters can be overwritten or added with command-line options of the form `--key value`.",
default=list(),
)
parser.add_argument(
"-n",
"--num_samples",
type=int,
nargs="?",
help="num_samples to draw",
default=50000
)
parser.add_argument(
"--batch_size",
type=int,
nargs="?",
help="the batch size",
default=25
)
parser.add_argument(
"-k",
"--top_k",
type=int,
nargs="?",
help="top-k value to sample with",
default=250,
)
parser.add_argument(
"-t",
"--temperature",
type=float,
nargs="?",
help="temperature value to sample with",
default=1.0
)
parser.add_argument(
"-p",
"--top_p",
type=float,
nargs="?",
help="top-p value to sample with",
default=1.0
)
parser.add_argument(
"--classes",
type=str,
nargs="?",
help="specify comma-separated classes to sample from. Uses 1000 classes per default.",
default="imagenet"
)
return parser
def load_model_from_config(config, sd, gpu=True, eval_mode=True):
model = instantiate_from_config(config)
if sd is not None:
model.load_state_dict(sd)
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def load_model(config, ckpt, gpu, eval_mode):
# load the specified checkpoint
if ckpt:
pl_sd = torch.load(ckpt, map_location="cpu")
global_step = pl_sd["global_step"]
print(f"loaded model from global step {global_step}.")
else:
pl_sd = {"state_dict": None}
global_step = None
model = load_model_from_config(config.model, pl_sd["state_dict"], gpu=gpu, eval_mode=eval_mode)["model"]
return model, global_step
if __name__ == "__main__":
sys.path.append(os.getcwd())
parser = get_parser()
opt, unknown = parser.parse_known_args()
assert opt.resume
ckpt = None
if not os.path.exists(opt.resume):
raise ValueError("Cannot find {}".format(opt.resume))
if os.path.isfile(opt.resume):
paths = opt.resume.split("/")
try:
idx = len(paths)-paths[::-1].index("logs")+1
except ValueError:
idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt
logdir = "/".join(paths[:idx])
ckpt = opt.resume
else:
assert os.path.isdir(opt.resume), opt.resume
logdir = opt.resume.rstrip("/")
ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*-project.yaml")))
opt.base = base_configs+opt.base
configs = [OmegaConf.load(cfg) for cfg in opt.base]
cli = OmegaConf.from_dotlist(unknown)
config = OmegaConf.merge(*configs, cli)
model, global_step = load_model(config, ckpt, gpu=True, eval_mode=True)
if opt.outdir:
print(f"Switching logdir from '{logdir}' to '{opt.outdir}'")
logdir = opt.outdir
if opt.classes == "imagenet":
given_classes = [i for i in range(1000)]
else:
cls_str = opt.classes
assert not cls_str.endswith(","), 'class string should not end with a ","'
given_classes = [int(c) for c in cls_str.split(",")]
logdir = os.path.join(logdir, "samples", f"top_k_{opt.top_k}_temp_{opt.temperature:.2f}_top_p_{opt.top_p}",
f"{global_step}")
print(f"Logging to {logdir}")
os.makedirs(logdir, exist_ok=True)
run(logdir, model, opt.batch_size, opt.temperature, opt.top_k, unconditional=model.be_unconditional,
given_classes=given_classes, num_samples=opt.num_samples, top_p=opt.top_p)
print("done.")
| 9,191 | 34.218391 | 115 | py |
taming-transformers | taming-transformers-master/scripts/make_scene_samples.py | import glob
import os
import sys
from itertools import product
from pathlib import Path
from typing import Literal, List, Optional, Tuple
import numpy as np
import torch
from omegaconf import OmegaConf
from pytorch_lightning import seed_everything
from torch import Tensor
from torchvision.utils import save_image
from tqdm import tqdm
from scripts.make_samples import get_parser, load_model_and_dset
from taming.data.conditional_builder.objects_center_points import ObjectsCenterPointsConditionalBuilder
from taming.data.helper_types import BoundingBox, Annotation
from taming.data.annotated_objects_dataset import AnnotatedObjectsDataset
from taming.models.cond_transformer import Net2NetTransformer
seed_everything(42424242)
device: Literal['cuda', 'cpu'] = 'cuda'
first_stage_factor = 16
trained_on_res = 256
def _helper(coord: int, coord_max: int, coord_window: int) -> (int, int):
assert 0 <= coord < coord_max
coord_desired_center = (coord_window - 1) // 2
return np.clip(coord - coord_desired_center, 0, coord_max - coord_window)
def get_crop_coordinates(x: int, y: int) -> BoundingBox:
WIDTH, HEIGHT = desired_z_shape[1], desired_z_shape[0]
x0 = _helper(x, WIDTH, first_stage_factor) / WIDTH
y0 = _helper(y, HEIGHT, first_stage_factor) / HEIGHT
w = first_stage_factor / WIDTH
h = first_stage_factor / HEIGHT
return x0, y0, w, h
def get_z_indices_crop_out(z_indices: Tensor, predict_x: int, predict_y: int) -> Tensor:
WIDTH, HEIGHT = desired_z_shape[1], desired_z_shape[0]
x0 = _helper(predict_x, WIDTH, first_stage_factor)
y0 = _helper(predict_y, HEIGHT, first_stage_factor)
no_images = z_indices.shape[0]
cut_out_1 = z_indices[:, y0:predict_y, x0:x0+first_stage_factor].reshape((no_images, -1))
cut_out_2 = z_indices[:, predict_y, x0:predict_x]
return torch.cat((cut_out_1, cut_out_2), dim=1)
@torch.no_grad()
def sample(model: Net2NetTransformer, annotations: List[Annotation], dataset: AnnotatedObjectsDataset,
conditional_builder: ObjectsCenterPointsConditionalBuilder, no_samples: int,
temperature: float, top_k: int) -> Tensor:
x_max, y_max = desired_z_shape[1], desired_z_shape[0]
annotations = [a._replace(category_no=dataset.get_category_number(a.category_id)) for a in annotations]
recompute_conditional = any((desired_resolution[0] > trained_on_res, desired_resolution[1] > trained_on_res))
if not recompute_conditional:
crop_coordinates = get_crop_coordinates(0, 0)
conditional_indices = conditional_builder.build(annotations, crop_coordinates)
c_indices = conditional_indices.to(device).repeat(no_samples, 1)
z_indices = torch.zeros((no_samples, 0), device=device).long()
output_indices = model.sample(z_indices, c_indices, steps=x_max*y_max, temperature=temperature,
sample=True, top_k=top_k)
else:
output_indices = torch.zeros((no_samples, y_max, x_max), device=device).long()
for predict_y, predict_x in tqdm(product(range(y_max), range(x_max)), desc='sampling_image', total=x_max*y_max):
crop_coordinates = get_crop_coordinates(predict_x, predict_y)
z_indices = get_z_indices_crop_out(output_indices, predict_x, predict_y)
conditional_indices = conditional_builder.build(annotations, crop_coordinates)
c_indices = conditional_indices.to(device).repeat(no_samples, 1)
new_index = model.sample(z_indices, c_indices, steps=1, temperature=temperature, sample=True, top_k=top_k)
output_indices[:, predict_y, predict_x] = new_index[:, -1]
z_shape = (
no_samples,
model.first_stage_model.quantize.e_dim, # codebook embed_dim
desired_z_shape[0], # z_height
desired_z_shape[1] # z_width
)
x_sample = model.decode_to_img(output_indices, z_shape) * 0.5 + 0.5
x_sample = x_sample.to('cpu')
plotter = conditional_builder.plot
figure_size = (x_sample.shape[2], x_sample.shape[3])
scene_graph = conditional_builder.build(annotations, (0., 0., 1., 1.))
plot = plotter(scene_graph, dataset.get_textual_label_for_category_no, figure_size)
return torch.cat((x_sample, plot.unsqueeze(0)))
def get_resolution(resolution_str: str) -> (Tuple[int, int], Tuple[int, int]):
if not resolution_str.count(',') == 1:
raise ValueError("Give resolution as in 'height,width'")
res_h, res_w = resolution_str.split(',')
res_h = max(int(res_h), trained_on_res)
res_w = max(int(res_w), trained_on_res)
z_h = int(round(res_h/first_stage_factor))
z_w = int(round(res_w/first_stage_factor))
return (z_h, z_w), (z_h*first_stage_factor, z_w*first_stage_factor)
def add_arg_to_parser(parser):
parser.add_argument(
"-R",
"--resolution",
type=str,
default='256,256',
help=f"give resolution in multiples of {first_stage_factor}, default is '256,256'",
)
parser.add_argument(
"-C",
"--conditional",
type=str,
default='objects_bbox',
help=f"objects_bbox or objects_center_points",
)
parser.add_argument(
"-N",
"--n_samples_per_layout",
type=int,
default=4,
help=f"how many samples to generate per layout",
)
return parser
if __name__ == "__main__":
sys.path.append(os.getcwd())
parser = get_parser()
parser = add_arg_to_parser(parser)
opt, unknown = parser.parse_known_args()
ckpt = None
if opt.resume:
if not os.path.exists(opt.resume):
raise ValueError("Cannot find {}".format(opt.resume))
if os.path.isfile(opt.resume):
paths = opt.resume.split("/")
try:
idx = len(paths)-paths[::-1].index("logs")+1
except ValueError:
idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt
logdir = "/".join(paths[:idx])
ckpt = opt.resume
else:
assert os.path.isdir(opt.resume), opt.resume
logdir = opt.resume.rstrip("/")
ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
print(f"logdir:{logdir}")
base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*-project.yaml")))
opt.base = base_configs+opt.base
if opt.config:
if type(opt.config) == str:
opt.base = [opt.config]
else:
opt.base = [opt.base[-1]]
configs = [OmegaConf.load(cfg) for cfg in opt.base]
cli = OmegaConf.from_dotlist(unknown)
if opt.ignore_base_data:
for config in configs:
if hasattr(config, "data"):
del config["data"]
config = OmegaConf.merge(*configs, cli)
desired_z_shape, desired_resolution = get_resolution(opt.resolution)
conditional = opt.conditional
print(ckpt)
gpu = True
eval_mode = True
show_config = False
if show_config:
print(OmegaConf.to_container(config))
dsets, model, global_step = load_model_and_dset(config, ckpt, gpu, eval_mode)
print(f"Global step: {global_step}")
data_loader = dsets.val_dataloader()
print(dsets.datasets["validation"].conditional_builders)
conditional_builder = dsets.datasets["validation"].conditional_builders[conditional]
outdir = Path(opt.outdir).joinpath(f"{global_step:06}_{opt.top_k}_{opt.temperature}")
outdir.mkdir(exist_ok=True, parents=True)
print("Writing samples to ", outdir)
p_bar_1 = tqdm(enumerate(iter(data_loader)), desc='batch', total=len(data_loader))
for batch_no, batch in p_bar_1:
save_img: Optional[Tensor] = None
for i, annotations in tqdm(enumerate(batch['annotations']), desc='within_batch', total=data_loader.batch_size):
imgs = sample(model, annotations, dsets.datasets["validation"], conditional_builder,
opt.n_samples_per_layout, opt.temperature, opt.top_k)
save_image(imgs, outdir.joinpath(f'{batch_no:04}_{i:02}.png'), n_row=opt.n_samples_per_layout+1)
| 8,092 | 39.668342 | 120 | py |
taming-transformers | taming-transformers-master/scripts/extract_depth.py | import os
import torch
import numpy as np
from tqdm import trange
from PIL import Image
def get_state(gpu):
import torch
midas = torch.hub.load("intel-isl/MiDaS", "MiDaS")
if gpu:
midas.cuda()
midas.eval()
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
transform = midas_transforms.default_transform
state = {"model": midas,
"transform": transform}
return state
def depth_to_rgba(x):
assert x.dtype == np.float32
assert len(x.shape) == 2
y = x.copy()
y.dtype = np.uint8
y = y.reshape(x.shape+(4,))
return np.ascontiguousarray(y)
def rgba_to_depth(x):
assert x.dtype == np.uint8
assert len(x.shape) == 3 and x.shape[2] == 4
y = x.copy()
y.dtype = np.float32
y = y.reshape(x.shape[:2])
return np.ascontiguousarray(y)
def run(x, state):
model = state["model"]
transform = state["transform"]
hw = x.shape[:2]
with torch.no_grad():
prediction = model(transform((x + 1.0) * 127.5).cuda())
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=hw,
mode="bicubic",
align_corners=False,
).squeeze()
output = prediction.cpu().numpy()
return output
def get_filename(relpath, level=-2):
# save class folder structure and filename:
fn = relpath.split(os.sep)[level:]
folder = fn[-2]
file = fn[-1].split('.')[0]
return folder, file
def save_depth(dataset, path, debug=False):
os.makedirs(path)
N = len(dset)
if debug:
N = 10
state = get_state(gpu=True)
for idx in trange(N, desc="Data"):
ex = dataset[idx]
image, relpath = ex["image"], ex["relpath"]
folder, filename = get_filename(relpath)
# prepare
folderabspath = os.path.join(path, folder)
os.makedirs(folderabspath, exist_ok=True)
savepath = os.path.join(folderabspath, filename)
# run model
xout = run(image, state)
I = depth_to_rgba(xout)
Image.fromarray(I).save("{}.png".format(savepath))
if __name__ == "__main__":
from taming.data.imagenet import ImageNetTrain, ImageNetValidation
out = "data/imagenet_depth"
if not os.path.exists(out):
print("Please create a folder or symlink '{}' to extract depth data ".format(out) +
"(be prepared that the output size will be larger than ImageNet itself).")
exit(1)
# go
dset = ImageNetValidation()
abspath = os.path.join(out, "val")
if os.path.exists(abspath):
print("{} exists - not doing anything.".format(abspath))
else:
print("preparing {}".format(abspath))
save_depth(dset, abspath)
print("done with validation split")
dset = ImageNetTrain()
abspath = os.path.join(out, "train")
if os.path.exists(abspath):
print("{} exists - not doing anything.".format(abspath))
else:
print("preparing {}".format(abspath))
save_depth(dset, abspath)
print("done with train split")
print("done done.")
| 3,121 | 26.628319 | 91 | py |
taming-transformers | taming-transformers-master/scripts/extract_submodel.py | import torch
import sys
if __name__ == "__main__":
inpath = sys.argv[1]
outpath = sys.argv[2]
submodel = "cond_stage_model"
if len(sys.argv) > 3:
submodel = sys.argv[3]
print("Extracting {} from {} to {}.".format(submodel, inpath, outpath))
sd = torch.load(inpath, map_location="cpu")
new_sd = {"state_dict": dict((k.split(".", 1)[-1],v)
for k,v in sd["state_dict"].items()
if k.startswith("cond_stage_model"))}
torch.save(new_sd, outpath)
| 549 | 29.555556 | 75 | py |
taming-transformers | taming-transformers-master/scripts/make_samples.py | import argparse, os, sys, glob, math, time
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from main import instantiate_from_config, DataModuleFromConfig
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from tqdm import trange
def save_image(x, path):
c,h,w = x.shape
assert c==3
x = ((x.detach().cpu().numpy().transpose(1,2,0)+1.0)*127.5).clip(0,255).astype(np.uint8)
Image.fromarray(x).save(path)
@torch.no_grad()
def run_conditional(model, dsets, outdir, top_k, temperature, batch_size=1):
if len(dsets.datasets) > 1:
split = sorted(dsets.datasets.keys())[0]
dset = dsets.datasets[split]
else:
dset = next(iter(dsets.datasets.values()))
print("Dataset: ", dset.__class__.__name__)
for start_idx in trange(0,len(dset)-batch_size+1,batch_size):
indices = list(range(start_idx, start_idx+batch_size))
example = default_collate([dset[i] for i in indices])
x = model.get_input("image", example).to(model.device)
for i in range(x.shape[0]):
save_image(x[i], os.path.join(outdir, "originals",
"{:06}.png".format(indices[i])))
cond_key = model.cond_stage_key
c = model.get_input(cond_key, example).to(model.device)
scale_factor = 1.0
quant_z, z_indices = model.encode_to_z(x)
quant_c, c_indices = model.encode_to_c(c)
cshape = quant_z.shape
xrec = model.first_stage_model.decode(quant_z)
for i in range(xrec.shape[0]):
save_image(xrec[i], os.path.join(outdir, "reconstructions",
"{:06}.png".format(indices[i])))
if cond_key == "segmentation":
# get image from segmentation mask
num_classes = c.shape[1]
c = torch.argmax(c, dim=1, keepdim=True)
c = torch.nn.functional.one_hot(c, num_classes=num_classes)
c = c.squeeze(1).permute(0, 3, 1, 2).float()
c = model.cond_stage_model.to_rgb(c)
idx = z_indices
half_sample = False
if half_sample:
start = idx.shape[1]//2
else:
start = 0
idx[:,start:] = 0
idx = idx.reshape(cshape[0],cshape[2],cshape[3])
start_i = start//cshape[3]
start_j = start %cshape[3]
cidx = c_indices
cidx = cidx.reshape(quant_c.shape[0],quant_c.shape[2],quant_c.shape[3])
sample = True
for i in range(start_i,cshape[2]-0):
if i <= 8:
local_i = i
elif cshape[2]-i < 8:
local_i = 16-(cshape[2]-i)
else:
local_i = 8
for j in range(start_j,cshape[3]-0):
if j <= 8:
local_j = j
elif cshape[3]-j < 8:
local_j = 16-(cshape[3]-j)
else:
local_j = 8
i_start = i-local_i
i_end = i_start+16
j_start = j-local_j
j_end = j_start+16
patch = idx[:,i_start:i_end,j_start:j_end]
patch = patch.reshape(patch.shape[0],-1)
cpatch = cidx[:, i_start:i_end, j_start:j_end]
cpatch = cpatch.reshape(cpatch.shape[0], -1)
patch = torch.cat((cpatch, patch), dim=1)
logits,_ = model.transformer(patch[:,:-1])
logits = logits[:, -256:, :]
logits = logits.reshape(cshape[0],16,16,-1)
logits = logits[:,local_i,local_j,:]
logits = logits/temperature
if top_k is not None:
logits = model.top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = torch.nn.functional.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
idx[:,i,j] = ix
xsample = model.decode_to_img(idx[:,:cshape[2],:cshape[3]], cshape)
for i in range(xsample.shape[0]):
save_image(xsample[i], os.path.join(outdir, "samples",
"{:06}.png".format(indices[i])))
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--resume",
type=str,
nargs="?",
help="load from logdir or checkpoint in logdir",
)
parser.add_argument(
"-b",
"--base",
nargs="*",
metavar="base_config.yaml",
help="paths to base configs. Loaded from left-to-right. "
"Parameters can be overwritten or added with command-line options of the form `--key value`.",
default=list(),
)
parser.add_argument(
"-c",
"--config",
nargs="?",
metavar="single_config.yaml",
help="path to single config. If specified, base configs will be ignored "
"(except for the last one if left unspecified).",
const=True,
default="",
)
parser.add_argument(
"--ignore_base_data",
action="store_true",
help="Ignore data specification from base configs. Useful if you want "
"to specify a custom datasets on the command line.",
)
parser.add_argument(
"--outdir",
required=True,
type=str,
help="Where to write outputs to.",
)
parser.add_argument(
"--top_k",
type=int,
default=100,
help="Sample from among top-k predictions.",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="Sampling temperature.",
)
return parser
def load_model_from_config(config, sd, gpu=True, eval_mode=True):
if "ckpt_path" in config.params:
print("Deleting the restore-ckpt path from the config...")
config.params.ckpt_path = None
if "downsample_cond_size" in config.params:
print("Deleting downsample-cond-size from the config and setting factor=0.5 instead...")
config.params.downsample_cond_size = -1
config.params["downsample_cond_factor"] = 0.5
try:
if "ckpt_path" in config.params.first_stage_config.params:
config.params.first_stage_config.params.ckpt_path = None
print("Deleting the first-stage restore-ckpt path from the config...")
if "ckpt_path" in config.params.cond_stage_config.params:
config.params.cond_stage_config.params.ckpt_path = None
print("Deleting the cond-stage restore-ckpt path from the config...")
except:
pass
model = instantiate_from_config(config)
if sd is not None:
missing, unexpected = model.load_state_dict(sd, strict=False)
print(f"Missing Keys in State Dict: {missing}")
print(f"Unexpected Keys in State Dict: {unexpected}")
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def get_data(config):
# get data
data = instantiate_from_config(config.data)
data.prepare_data()
data.setup()
return data
def load_model_and_dset(config, ckpt, gpu, eval_mode):
# get data
dsets = get_data(config) # calls data.config ...
# now load the specified checkpoint
if ckpt:
pl_sd = torch.load(ckpt, map_location="cpu")
global_step = pl_sd["global_step"]
else:
pl_sd = {"state_dict": None}
global_step = None
model = load_model_from_config(config.model,
pl_sd["state_dict"],
gpu=gpu,
eval_mode=eval_mode)["model"]
return dsets, model, global_step
if __name__ == "__main__":
sys.path.append(os.getcwd())
parser = get_parser()
opt, unknown = parser.parse_known_args()
ckpt = None
if opt.resume:
if not os.path.exists(opt.resume):
raise ValueError("Cannot find {}".format(opt.resume))
if os.path.isfile(opt.resume):
paths = opt.resume.split("/")
try:
idx = len(paths)-paths[::-1].index("logs")+1
except ValueError:
idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt
logdir = "/".join(paths[:idx])
ckpt = opt.resume
else:
assert os.path.isdir(opt.resume), opt.resume
logdir = opt.resume.rstrip("/")
ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
print(f"logdir:{logdir}")
base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*-project.yaml")))
opt.base = base_configs+opt.base
if opt.config:
if type(opt.config) == str:
opt.base = [opt.config]
else:
opt.base = [opt.base[-1]]
configs = [OmegaConf.load(cfg) for cfg in opt.base]
cli = OmegaConf.from_dotlist(unknown)
if opt.ignore_base_data:
for config in configs:
if hasattr(config, "data"): del config["data"]
config = OmegaConf.merge(*configs, cli)
print(ckpt)
gpu = True
eval_mode = True
show_config = False
if show_config:
print(OmegaConf.to_container(config))
dsets, model, global_step = load_model_and_dset(config, ckpt, gpu, eval_mode)
print(f"Global step: {global_step}")
outdir = os.path.join(opt.outdir, "{:06}_{}_{}".format(global_step,
opt.top_k,
opt.temperature))
os.makedirs(outdir, exist_ok=True)
print("Writing samples to ", outdir)
for k in ["originals", "reconstructions", "samples"]:
os.makedirs(os.path.join(outdir, k), exist_ok=True)
run_conditional(model, dsets, outdir, opt.top_k, opt.temperature)
| 10,146 | 33.631399 | 102 | py |
taming-transformers | taming-transformers-master/taming/modules/util.py | import torch
import torch.nn as nn
def count_params(model):
total_params = sum(p.numel() for p in model.parameters())
return total_params
class ActNorm(nn.Module):
def __init__(self, num_features, logdet=False, affine=True,
allow_reverse_init=False):
assert affine
super().__init__()
self.logdet = logdet
self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.allow_reverse_init = allow_reverse_init
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input, reverse=False):
if reverse:
return self.reverse(input)
if len(input.shape) == 2:
input = input[:,:,None,None]
squeeze = True
else:
squeeze = False
_, _, height, width = input.shape
if self.training and self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
h = self.scale * (input + self.loc)
if squeeze:
h = h.squeeze(-1).squeeze(-1)
if self.logdet:
log_abs = torch.log(torch.abs(self.scale))
logdet = height*width*torch.sum(log_abs)
logdet = logdet * torch.ones(input.shape[0]).to(input)
return h, logdet
return h
def reverse(self, output):
if self.training and self.initialized.item() == 0:
if not self.allow_reverse_init:
raise RuntimeError(
"Initializing ActNorm in reverse direction is "
"disabled by default. Use allow_reverse_init=True to enable."
)
else:
self.initialize(output)
self.initialized.fill_(1)
if len(output.shape) == 2:
output = output[:,:,None,None]
squeeze = True
else:
squeeze = False
h = output / self.scale - self.loc
if squeeze:
h = h.squeeze(-1).squeeze(-1)
return h
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
class Labelator(AbstractEncoder):
"""Net2Net Interface for Class-Conditional Model"""
def __init__(self, n_classes, quantize_interface=True):
super().__init__()
self.n_classes = n_classes
self.quantize_interface = quantize_interface
def encode(self, c):
c = c[:,None]
if self.quantize_interface:
return c, None, [None, None, c.long()]
return c
class SOSProvider(AbstractEncoder):
# for unconditional training
def __init__(self, sos_token, quantize_interface=True):
super().__init__()
self.sos_token = sos_token
self.quantize_interface = quantize_interface
def encode(self, x):
# get batch size from data and replicate sos_token
c = torch.ones(x.shape[0], 1)*self.sos_token
c = c.long().to(x.device)
if self.quantize_interface:
return c, None, [None, None, c]
return c
| 3,847 | 28.374046 | 85 | py |
taming-transformers | taming-transformers-master/taming/modules/vqvae/quantize.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch import einsum
from einops import rearrange
class VectorQuantizer(nn.Module):
"""
see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py
____________________________________________
Discretization bottleneck part of the VQ-VAE.
Inputs:
- n_e : number of embeddings
- e_dim : dimension of embedding
- beta : commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
_____________________________________________
"""
# NOTE: this class contains a bug regarding beta; see VectorQuantizer2 for
# a fix and use legacy=False to apply that fix. VectorQuantizer2 can be
# used wherever VectorQuantizer has been used before and is additionally
# more efficient.
def __init__(self, n_e, e_dim, beta):
super(VectorQuantizer, self).__init__()
self.n_e = n_e
self.e_dim = e_dim
self.beta = beta
self.embedding = nn.Embedding(self.n_e, self.e_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
def forward(self, z):
"""
Inputs the output of the encoder network z and maps it to a discrete
one-hot vector that is the index of the closest embedding vector e_j
z (continuous) -> z_q (discrete)
z.shape = (batch, channel, height, width)
quantization pipeline:
1. get encoder input (B,C,H,W)
2. flatten input to (B*H*W,C)
"""
# reshape z -> (batch, height, width, channel) and flatten
z = z.permute(0, 2, 3, 1).contiguous()
z_flattened = z.view(-1, self.e_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
torch.sum(self.embedding.weight**2, dim=1) - 2 * \
torch.matmul(z_flattened, self.embedding.weight.t())
## could possible replace this here
# #\start...
# find closest encodings
min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
min_encodings = torch.zeros(
min_encoding_indices.shape[0], self.n_e).to(z)
min_encodings.scatter_(1, min_encoding_indices, 1)
# dtype min encodings: torch.float32
# min_encodings shape: torch.Size([2048, 512])
# min_encoding_indices.shape: torch.Size([2048, 1])
# get quantized latent vectors
z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape)
#.........\end
# with:
# .........\start
#min_encoding_indices = torch.argmin(d, dim=1)
#z_q = self.embedding(min_encoding_indices)
# ......\end......... (TODO)
# compute loss for embedding
loss = torch.mean((z_q.detach()-z)**2) + self.beta * \
torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
z_q = z + (z_q - z).detach()
# perplexity
e_mean = torch.mean(min_encodings, dim=0)
perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def get_codebook_entry(self, indices, shape):
# shape specifying (batch, height, width, channel)
# TODO: check for more easy handling with nn.Embedding
min_encodings = torch.zeros(indices.shape[0], self.n_e).to(indices)
min_encodings.scatter_(1, indices[:,None], 1)
# get quantized latent vectors
z_q = torch.matmul(min_encodings.float(), self.embedding.weight)
if shape is not None:
z_q = z_q.view(shape)
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q
class GumbelQuantize(nn.Module):
"""
credit to @karpathy: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py (thanks!)
Gumbel Softmax trick quantizer
Categorical Reparameterization with Gumbel-Softmax, Jang et al. 2016
https://arxiv.org/abs/1611.01144
"""
def __init__(self, num_hiddens, embedding_dim, n_embed, straight_through=True,
kl_weight=5e-4, temp_init=1.0, use_vqinterface=True,
remap=None, unknown_index="random"):
super().__init__()
self.embedding_dim = embedding_dim
self.n_embed = n_embed
self.straight_through = straight_through
self.temperature = temp_init
self.kl_weight = kl_weight
self.proj = nn.Conv2d(num_hiddens, n_embed, 1)
self.embed = nn.Embedding(n_embed, embedding_dim)
self.use_vqinterface = use_vqinterface
self.remap = remap
if self.remap is not None:
self.register_buffer("used", torch.tensor(np.load(self.remap)))
self.re_embed = self.used.shape[0]
self.unknown_index = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed+1
print(f"Remapping {self.n_embed} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
self.re_embed = n_embed
def remap_to_used(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
match = (inds[:,:,None]==used[None,None,...]).long()
new = match.argmax(-1)
unknown = match.sum(2)<1
if self.unknown_index == "random":
new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)
else:
new[unknown] = self.unknown_index
return new.reshape(ishape)
def unmap_to_all(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
if self.re_embed > self.used.shape[0]: # extra token
inds[inds>=self.used.shape[0]] = 0 # simply set to zero
back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)
return back.reshape(ishape)
def forward(self, z, temp=None, return_logits=False):
# force hard = True when we are in eval mode, as we must quantize. actually, always true seems to work
hard = self.straight_through if self.training else True
temp = self.temperature if temp is None else temp
logits = self.proj(z)
if self.remap is not None:
# continue only with used logits
full_zeros = torch.zeros_like(logits)
logits = logits[:,self.used,...]
soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=hard)
if self.remap is not None:
# go back to all entries but unused set to zero
full_zeros[:,self.used,...] = soft_one_hot
soft_one_hot = full_zeros
z_q = einsum('b n h w, n d -> b d h w', soft_one_hot, self.embed.weight)
# + kl divergence to the prior loss
qy = F.softmax(logits, dim=1)
diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.n_embed + 1e-10), dim=1).mean()
ind = soft_one_hot.argmax(dim=1)
if self.remap is not None:
ind = self.remap_to_used(ind)
if self.use_vqinterface:
if return_logits:
return z_q, diff, (None, None, ind), logits
return z_q, diff, (None, None, ind)
return z_q, diff, ind
def get_codebook_entry(self, indices, shape):
b, h, w, c = shape
assert b*h*w == indices.shape[0]
indices = rearrange(indices, '(b h w) -> b h w', b=b, h=h, w=w)
if self.remap is not None:
indices = self.unmap_to_all(indices)
one_hot = F.one_hot(indices, num_classes=self.n_embed).permute(0, 3, 1, 2).float()
z_q = einsum('b n h w, n d -> b d h w', one_hot, self.embed.weight)
return z_q
class VectorQuantizer2(nn.Module):
"""
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
"""
# NOTE: due to a bug the beta term was applied to the wrong term. for
# backwards compatibility we use the buggy version by default, but you can
# specify legacy=False to fix it.
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
sane_index_shape=False, legacy=True):
super().__init__()
self.n_e = n_e
self.e_dim = e_dim
self.beta = beta
self.legacy = legacy
self.embedding = nn.Embedding(self.n_e, self.e_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
self.remap = remap
if self.remap is not None:
self.register_buffer("used", torch.tensor(np.load(self.remap)))
self.re_embed = self.used.shape[0]
self.unknown_index = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed+1
print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
self.re_embed = n_e
self.sane_index_shape = sane_index_shape
def remap_to_used(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
match = (inds[:,:,None]==used[None,None,...]).long()
new = match.argmax(-1)
unknown = match.sum(2)<1
if self.unknown_index == "random":
new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)
else:
new[unknown] = self.unknown_index
return new.reshape(ishape)
def unmap_to_all(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
if self.re_embed > self.used.shape[0]: # extra token
inds[inds>=self.used.shape[0]] = 0 # simply set to zero
back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)
return back.reshape(ishape)
def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
assert temp is None or temp==1.0, "Only for interface compatible with Gumbel"
assert rescale_logits==False, "Only for interface compatible with Gumbel"
assert return_logits==False, "Only for interface compatible with Gumbel"
# reshape z -> (batch, height, width, channel) and flatten
z = rearrange(z, 'b c h w -> b h w c').contiguous()
z_flattened = z.view(-1, self.e_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
torch.sum(self.embedding.weight**2, dim=1) - 2 * \
torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
min_encoding_indices = torch.argmin(d, dim=1)
z_q = self.embedding(min_encoding_indices).view(z.shape)
perplexity = None
min_encodings = None
# compute loss for embedding
if not self.legacy:
loss = self.beta * torch.mean((z_q.detach()-z)**2) + \
torch.mean((z_q - z.detach()) ** 2)
else:
loss = torch.mean((z_q.detach()-z)**2) + self.beta * \
torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
z_q = z + (z_q - z).detach()
# reshape back to match original input shape
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
if self.remap is not None:
min_encoding_indices = min_encoding_indices.reshape(z.shape[0],-1) # add batch axis
min_encoding_indices = self.remap_to_used(min_encoding_indices)
min_encoding_indices = min_encoding_indices.reshape(-1,1) # flatten
if self.sane_index_shape:
min_encoding_indices = min_encoding_indices.reshape(
z_q.shape[0], z_q.shape[2], z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def get_codebook_entry(self, indices, shape):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
indices = indices.reshape(shape[0],-1) # add batch axis
indices = self.unmap_to_all(indices)
indices = indices.reshape(-1) # flatten again
# get quantized latent vectors
z_q = self.embedding(indices)
if shape is not None:
z_q = z_q.view(shape)
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q
class EmbeddingEMA(nn.Module):
def __init__(self, num_tokens, codebook_dim, decay=0.99, eps=1e-5):
super().__init__()
self.decay = decay
self.eps = eps
weight = torch.randn(num_tokens, codebook_dim)
self.weight = nn.Parameter(weight, requires_grad = False)
self.cluster_size = nn.Parameter(torch.zeros(num_tokens), requires_grad = False)
self.embed_avg = nn.Parameter(weight.clone(), requires_grad = False)
self.update = True
def forward(self, embed_id):
return F.embedding(embed_id, self.weight)
def cluster_size_ema_update(self, new_cluster_size):
self.cluster_size.data.mul_(self.decay).add_(new_cluster_size, alpha=1 - self.decay)
def embed_avg_ema_update(self, new_embed_avg):
self.embed_avg.data.mul_(self.decay).add_(new_embed_avg, alpha=1 - self.decay)
def weight_update(self, num_tokens):
n = self.cluster_size.sum()
smoothed_cluster_size = (
(self.cluster_size + self.eps) / (n + num_tokens * self.eps) * n
)
#normalize embedding average with smoothed cluster size
embed_normalized = self.embed_avg / smoothed_cluster_size.unsqueeze(1)
self.weight.data.copy_(embed_normalized)
class EMAVectorQuantizer(nn.Module):
def __init__(self, n_embed, embedding_dim, beta, decay=0.99, eps=1e-5,
remap=None, unknown_index="random"):
super().__init__()
self.codebook_dim = codebook_dim
self.num_tokens = num_tokens
self.beta = beta
self.embedding = EmbeddingEMA(self.num_tokens, self.codebook_dim, decay, eps)
self.remap = remap
if self.remap is not None:
self.register_buffer("used", torch.tensor(np.load(self.remap)))
self.re_embed = self.used.shape[0]
self.unknown_index = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed+1
print(f"Remapping {self.n_embed} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
self.re_embed = n_embed
def remap_to_used(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
match = (inds[:,:,None]==used[None,None,...]).long()
new = match.argmax(-1)
unknown = match.sum(2)<1
if self.unknown_index == "random":
new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)
else:
new[unknown] = self.unknown_index
return new.reshape(ishape)
def unmap_to_all(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
if self.re_embed > self.used.shape[0]: # extra token
inds[inds>=self.used.shape[0]] = 0 # simply set to zero
back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)
return back.reshape(ishape)
def forward(self, z):
# reshape z -> (batch, height, width, channel) and flatten
#z, 'b c h w -> b h w c'
z = rearrange(z, 'b c h w -> b h w c')
z_flattened = z.reshape(-1, self.codebook_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
d = z_flattened.pow(2).sum(dim=1, keepdim=True) + \
self.embedding.weight.pow(2).sum(dim=1) - 2 * \
torch.einsum('bd,nd->bn', z_flattened, self.embedding.weight) # 'n d -> d n'
encoding_indices = torch.argmin(d, dim=1)
z_q = self.embedding(encoding_indices).view(z.shape)
encodings = F.one_hot(encoding_indices, self.num_tokens).type(z.dtype)
avg_probs = torch.mean(encodings, dim=0)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))
if self.training and self.embedding.update:
#EMA cluster size
encodings_sum = encodings.sum(0)
self.embedding.cluster_size_ema_update(encodings_sum)
#EMA embedding average
embed_sum = encodings.transpose(0,1) @ z_flattened
self.embedding.embed_avg_ema_update(embed_sum)
#normalize embed_avg and update weight
self.embedding.weight_update(self.num_tokens)
# compute loss for embedding
loss = self.beta * F.mse_loss(z_q.detach(), z)
# preserve gradients
z_q = z + (z_q - z).detach()
# reshape back to match original input shape
#z_q, 'b h w c -> b c h w'
z_q = rearrange(z_q, 'b h w c -> b c h w')
return z_q, loss, (perplexity, encodings, encoding_indices)
| 18,182 | 39.769058 | 110 | py |
taming-transformers | taming-transformers-master/taming/modules/discriminator/model.py | import functools
import torch.nn as nn
from taming.modules.util import ActNorm
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator as in Pix2Pix
--> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
"""
def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if not use_actnorm:
norm_layer = nn.BatchNorm2d
else:
norm_layer = ActNorm
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.main = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.main(input)
| 2,550 | 36.514706 | 116 | py |
taming-transformers | taming-transformers-master/taming/modules/misc/coord.py | import torch
class CoordStage(object):
def __init__(self, n_embed, down_factor):
self.n_embed = n_embed
self.down_factor = down_factor
def eval(self):
return self
def encode(self, c):
"""fake vqmodel interface"""
assert 0.0 <= c.min() and c.max() <= 1.0
b,ch,h,w = c.shape
assert ch == 1
c = torch.nn.functional.interpolate(c, scale_factor=1/self.down_factor,
mode="area")
c = c.clamp(0.0, 1.0)
c = self.n_embed*c
c_quant = c.round()
c_ind = c_quant.to(dtype=torch.long)
info = None, None, c_ind
return c_quant, None, info
def decode(self, c):
c = c/self.n_embed
c = torch.nn.functional.interpolate(c, scale_factor=self.down_factor,
mode="nearest")
return c
| 904 | 27.28125 | 79 | py |
taming-transformers | taming-transformers-master/taming/modules/diffusionmodules/model.py | # pytorch_diffusion + derived encoder decoder
import math
import torch
import torch.nn as nn
import numpy as np
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0,1,0,0))
return emb
def nonlinearity(x):
# swish
return x*torch.sigmoid(x)
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
class Upsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
if self.with_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=2,
padding=0)
def forward(self, x):
if self.with_conv:
pad = (0,1,0,1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
else:
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
return x
class ResnetBlock(nn.Module):
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
dropout, temb_channels=512):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.norm1 = Normalize(in_channels)
self.conv1 = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if temb_channels > 0:
self.temb_proj = torch.nn.Linear(temb_channels,
out_channels)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
else:
self.nin_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x, temb):
h = x
h = self.norm1(h)
h = nonlinearity(h)
h = self.conv1(h)
if temb is not None:
h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
h = self.norm2(h)
h = nonlinearity(h)
h = self.dropout(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
x = self.conv_shortcut(x)
else:
x = self.nin_shortcut(x)
return x+h
class AttnBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.k = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.v = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.proj_out = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
b,c,h,w = q.shape
q = q.reshape(b,c,h*w)
q = q.permute(0,2,1) # b,hw,c
k = k.reshape(b,c,h*w) # b,c,hw
w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
w_ = w_ * (int(c)**(-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
v = v.reshape(b,c,h*w)
w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
h_ = h_.reshape(b,c,h,w)
h_ = self.proj_out(h_)
return x+h_
class Model(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, use_timestep=True):
super().__init__()
self.ch = ch
self.temb_ch = self.ch*4
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.use_timestep = use_timestep
if self.use_timestep:
# timestep embedding
self.temb = nn.Module()
self.temb.dense = nn.ModuleList([
torch.nn.Linear(self.ch,
self.temb_ch),
torch.nn.Linear(self.temb_ch,
self.temb_ch),
])
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
skip_in = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
if i_block == self.num_res_blocks:
skip_in = ch*in_ch_mult[i_level]
block.append(ResnetBlock(in_channels=block_in+skip_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x, t=None):
#assert x.shape[2] == x.shape[3] == self.resolution
if self.use_timestep:
# timestep embedding
assert t is not None
temb = get_timestep_embedding(t, self.ch)
temb = self.temb.dense[0](temb)
temb = nonlinearity(temb)
temb = self.temb.dense[1](temb)
else:
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](
torch.cat([h, hs.pop()], dim=1), temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class Encoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, double_z=True, **ignore_kwargs):
super().__init__()
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
2*z_channels if double_z else z_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
#assert x.shape[2] == x.shape[3] == self.resolution, "{}, {}, {}".format(x.shape[2], x.shape[3], self.resolution)
# timestep embedding
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class Decoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, give_pre_end=False, **ignorekwargs):
super().__init__()
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.give_pre_end = give_pre_end
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,)+tuple(ch_mult)
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
print("Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)))
# z to block_in
self.conv_in = torch.nn.Conv2d(z_channels,
block_in,
kernel_size=3,
stride=1,
padding=1)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, z):
#assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
# timestep embedding
temb = None
# z to block_in
h = self.conv_in(z)
# middle
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](h, temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
if self.give_pre_end:
return h
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class VUNet(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True,
in_channels, c_channels,
resolution, z_channels, use_timestep=False, **ignore_kwargs):
super().__init__()
self.ch = ch
self.temb_ch = self.ch*4
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.use_timestep = use_timestep
if self.use_timestep:
# timestep embedding
self.temb = nn.Module()
self.temb.dense = nn.ModuleList([
torch.nn.Linear(self.ch,
self.temb_ch),
torch.nn.Linear(self.temb_ch,
self.temb_ch),
])
# downsampling
self.conv_in = torch.nn.Conv2d(c_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
self.z_in = torch.nn.Conv2d(z_channels,
block_in,
kernel_size=1,
stride=1,
padding=0)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=2*block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
skip_in = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
if i_block == self.num_res_blocks:
skip_in = ch*in_ch_mult[i_level]
block.append(ResnetBlock(in_channels=block_in+skip_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x, z):
#assert x.shape[2] == x.shape[3] == self.resolution
if self.use_timestep:
# timestep embedding
assert t is not None
temb = get_timestep_embedding(t, self.ch)
temb = self.temb.dense[0](temb)
temb = nonlinearity(temb)
temb = self.temb.dense[1](temb)
else:
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
z = self.z_in(z)
h = torch.cat((h,z),dim=1)
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](
torch.cat([h, hs.pop()], dim=1), temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class SimpleDecoder(nn.Module):
def __init__(self, in_channels, out_channels, *args, **kwargs):
super().__init__()
self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
ResnetBlock(in_channels=in_channels,
out_channels=2 * in_channels,
temb_channels=0, dropout=0.0),
ResnetBlock(in_channels=2 * in_channels,
out_channels=4 * in_channels,
temb_channels=0, dropout=0.0),
ResnetBlock(in_channels=4 * in_channels,
out_channels=2 * in_channels,
temb_channels=0, dropout=0.0),
nn.Conv2d(2*in_channels, in_channels, 1),
Upsample(in_channels, with_conv=True)])
# end
self.norm_out = Normalize(in_channels)
self.conv_out = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
for i, layer in enumerate(self.model):
if i in [1,2,3]:
x = layer(x, None)
else:
x = layer(x)
h = self.norm_out(x)
h = nonlinearity(h)
x = self.conv_out(h)
return x
class UpsampleDecoder(nn.Module):
def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
ch_mult=(2,2), dropout=0.0):
super().__init__()
# upsampling
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
block_in = in_channels
curr_res = resolution // 2 ** (self.num_resolutions - 1)
self.res_blocks = nn.ModuleList()
self.upsample_blocks = nn.ModuleList()
for i_level in range(self.num_resolutions):
res_block = []
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks + 1):
res_block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
self.res_blocks.append(nn.ModuleList(res_block))
if i_level != self.num_resolutions - 1:
self.upsample_blocks.append(Upsample(block_in, True))
curr_res = curr_res * 2
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
# upsampling
h = x
for k, i_level in enumerate(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.res_blocks[i_level][i_block](h, None)
if i_level != self.num_resolutions - 1:
h = self.upsample_blocks[k](h)
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
| 30,221 | 37.895753 | 121 | py |
taming-transformers | taming-transformers-master/taming/modules/transformer/mingpt.py | """
taken from: https://github.com/karpathy/minGPT/
GPT model:
- the initial stem consists of a combination of token encoding and a positional encoding
- the meat of it is a uniform sequence of Transformer blocks
- each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block
- all blocks feed into a central residual pathway similar to resnets
- the final decoder is a linear projection into a vanilla Softmax classifier
"""
import math
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
from transformers import top_k_top_p_filtering
logger = logging.getLogger(__name__)
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.1
resid_pdrop = 0.1
attn_pdrop = 0.1
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k,v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
# causal mask to ensure that attention is only applied to the left in the input sequence
mask = torch.tril(torch.ones(config.block_size,
config.block_size))
if hasattr(config, "n_unmasked"):
mask[:config.n_unmasked, :config.n_unmasked] = 1
self.register_buffer("mask", mask.view(1, 1, config.block_size, config.block_size))
self.n_head = config.n_head
def forward(self, x, layer_past=None):
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
present = torch.stack((k, v))
if layer_past is not None:
past_key, past_value = layer_past
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
if layer_past is None:
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y, present # TODO: check that this does not break anything
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(), # nice
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x, layer_past=None, return_present=False):
# TODO: check that training still works
if return_present: assert not self.training
# layer past: tuple of length two with B, nh, T, hs
attn, present = self.attn(self.ln1(x), layer_past=layer_past)
x = x + attn
x = x + self.mlp(self.ln2(x))
if layer_past is not None or return_present:
return x, present
return x
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, vocab_size, block_size, n_layer=12, n_head=8, n_embd=256,
embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):
super().__init__()
config = GPTConfig(vocab_size=vocab_size, block_size=block_size,
embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,
n_layer=n_layer, n_head=n_head, n_embd=n_embd,
n_unmasked=n_unmasked)
# input embedding stem
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
self.config = config
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, idx, embeddings=None, targets=None):
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
if embeddings is not None: # prepend explicit embeddings
token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)
t = token_embeddings.shape[1]
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
def forward_with_past(self, idx, embeddings=None, targets=None, past=None, past_length=None):
# inference only
assert not self.training
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
if embeddings is not None: # prepend explicit embeddings
token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)
if past is not None:
assert past_length is not None
past = torch.cat(past, dim=-2) # n_layer, 2, b, nh, len_past, dim_head
past_shape = list(past.shape)
expected_shape = [self.config.n_layer, 2, idx.shape[0], self.config.n_head, past_length, self.config.n_embd//self.config.n_head]
assert past_shape == expected_shape, f"{past_shape} =/= {expected_shape}"
position_embeddings = self.pos_emb[:, past_length, :] # each position maps to a (learnable) vector
else:
position_embeddings = self.pos_emb[:, :token_embeddings.shape[1], :]
x = self.drop(token_embeddings + position_embeddings)
presents = [] # accumulate over layers
for i, block in enumerate(self.blocks):
x, present = block(x, layer_past=past[i, ...] if past is not None else None, return_present=True)
presents.append(present)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss, torch.stack(presents) # _, _, n_layer, 2, b, nh, 1, dim_head
class DummyGPT(nn.Module):
# for debugging
def __init__(self, add_value=1):
super().__init__()
self.add_value = add_value
def forward(self, idx):
return idx + self.add_value, None
class CodeGPT(nn.Module):
"""Takes in semi-embeddings"""
def __init__(self, vocab_size, block_size, in_channels, n_layer=12, n_head=8, n_embd=256,
embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):
super().__init__()
config = GPTConfig(vocab_size=vocab_size, block_size=block_size,
embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,
n_layer=n_layer, n_head=n_head, n_embd=n_embd,
n_unmasked=n_unmasked)
# input embedding stem
self.tok_emb = nn.Linear(in_channels, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
self.config = config
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, idx, embeddings=None, targets=None):
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
if embeddings is not None: # prepend explicit embeddings
token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)
t = token_embeddings.shape[1]
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.taming_cinln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
#### sampling utils
def top_k_logits(logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float('Inf')
return out
@torch.no_grad()
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.get_block_size()
model.eval()
for k in range(steps):
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
return x
@torch.no_grad()
def sample_with_past(x, model, steps, temperature=1., sample_logits=True,
top_k=None, top_p=None, callback=None):
# x is conditioning
sample = x
cond_len = x.shape[1]
past = None
for n in range(steps):
if callback is not None:
callback(n)
logits, _, present = model.forward_with_past(x, past=past, past_length=(n+cond_len-1))
if past is None:
past = [present]
else:
past.append(present)
logits = logits[:, -1, :] / temperature
if top_k is not None:
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
probs = F.softmax(logits, dim=-1)
if not sample_logits:
_, x = torch.topk(probs, k=1, dim=-1)
else:
x = torch.multinomial(probs, num_samples=1)
# append to the sequence and continue
sample = torch.cat((sample, x), dim=1)
del past
sample = sample[:, cond_len:] # cut conditioning off
return sample
#### clustering utils
class KMeans(nn.Module):
def __init__(self, ncluster=512, nc=3, niter=10):
super().__init__()
self.ncluster = ncluster
self.nc = nc
self.niter = niter
self.shape = (3,32,32)
self.register_buffer("C", torch.zeros(self.ncluster,nc))
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
def is_initialized(self):
return self.initialized.item() == 1
@torch.no_grad()
def initialize(self, x):
N, D = x.shape
assert D == self.nc, D
c = x[torch.randperm(N)[:self.ncluster]] # init clusters at random
for i in range(self.niter):
# assign all pixels to the closest codebook element
a = ((x[:, None, :] - c[None, :, :])**2).sum(-1).argmin(1)
# move each codebook element to be the mean of the pixels that assigned to it
c = torch.stack([x[a==k].mean(0) for k in range(self.ncluster)])
# re-assign any poorly positioned codebook elements
nanix = torch.any(torch.isnan(c), dim=1)
ndead = nanix.sum().item()
print('done step %d/%d, re-initialized %d dead clusters' % (i+1, self.niter, ndead))
c[nanix] = x[torch.randperm(N)[:ndead]] # re-init dead clusters
self.C.copy_(c)
self.initialized.fill_(1)
def forward(self, x, reverse=False, shape=None):
if not reverse:
# flatten
bs,c,h,w = x.shape
assert c == self.nc
x = x.reshape(bs,c,h*w,1)
C = self.C.permute(1,0)
C = C.reshape(1,c,1,self.ncluster)
a = ((x-C)**2).sum(1).argmin(-1) # bs, h*w indices
return a
else:
# flatten
bs, HW = x.shape
"""
c = self.C.reshape( 1, self.nc, 1, self.ncluster)
c = c[bs*[0],:,:,:]
c = c[:,:,HW*[0],:]
x = x.reshape(bs, 1, HW, 1)
x = x[:,3*[0],:,:]
x = torch.gather(c, dim=3, index=x)
"""
x = self.C[x]
x = x.permute(0,2,1)
shape = shape if shape is not None else self.shape
x = x.reshape(bs, *shape)
return x
| 16,836 | 39.473558 | 140 | py |
taming-transformers | taming-transformers-master/taming/modules/transformer/permuter.py | import torch
import torch.nn as nn
import numpy as np
class AbstractPermuter(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, reverse=False):
raise NotImplementedError
class Identity(AbstractPermuter):
def __init__(self):
super().__init__()
def forward(self, x, reverse=False):
return x
class Subsample(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
C = 1
indices = np.arange(H*W).reshape(C,H,W)
while min(H, W) > 1:
indices = indices.reshape(C,H//2,2,W//2,2)
indices = indices.transpose(0,2,4,1,3)
indices = indices.reshape(C*4,H//2, W//2)
H = H//2
W = W//2
C = C*4
assert H == W == 1
idx = torch.tensor(indices.ravel())
self.register_buffer('forward_shuffle_idx',
nn.Parameter(idx, requires_grad=False))
self.register_buffer('backward_shuffle_idx',
nn.Parameter(torch.argsort(idx), requires_grad=False))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
def mortonify(i, j):
"""(i,j) index to linear morton code"""
i = np.uint64(i)
j = np.uint64(j)
z = np.uint(0)
for pos in range(32):
z = (z |
((j & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos)) |
((i & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos+1))
)
return z
class ZCurve(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
reverseidx = [np.int64(mortonify(i,j)) for i in range(H) for j in range(W)]
idx = np.argsort(reverseidx)
idx = torch.tensor(idx)
reverseidx = torch.tensor(reverseidx)
self.register_buffer('forward_shuffle_idx',
idx)
self.register_buffer('backward_shuffle_idx',
reverseidx)
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class SpiralOut(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
assert H == W
size = W
indices = np.arange(size*size).reshape(size,size)
i0 = size//2
j0 = size//2-1
i = i0
j = j0
idx = [indices[i0, j0]]
step_mult = 0
for c in range(1, size//2+1):
step_mult += 1
# steps left
for k in range(step_mult):
i = i - 1
j = j
idx.append(indices[i, j])
# step down
for k in range(step_mult):
i = i
j = j + 1
idx.append(indices[i, j])
step_mult += 1
if c < size//2:
# step right
for k in range(step_mult):
i = i + 1
j = j
idx.append(indices[i, j])
# step up
for k in range(step_mult):
i = i
j = j - 1
idx.append(indices[i, j])
else:
# end reached
for k in range(step_mult-1):
i = i + 1
idx.append(indices[i, j])
assert len(idx) == size*size
idx = torch.tensor(idx)
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class SpiralIn(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
assert H == W
size = W
indices = np.arange(size*size).reshape(size,size)
i0 = size//2
j0 = size//2-1
i = i0
j = j0
idx = [indices[i0, j0]]
step_mult = 0
for c in range(1, size//2+1):
step_mult += 1
# steps left
for k in range(step_mult):
i = i - 1
j = j
idx.append(indices[i, j])
# step down
for k in range(step_mult):
i = i
j = j + 1
idx.append(indices[i, j])
step_mult += 1
if c < size//2:
# step right
for k in range(step_mult):
i = i + 1
j = j
idx.append(indices[i, j])
# step up
for k in range(step_mult):
i = i
j = j - 1
idx.append(indices[i, j])
else:
# end reached
for k in range(step_mult-1):
i = i + 1
idx.append(indices[i, j])
assert len(idx) == size*size
idx = idx[::-1]
idx = torch.tensor(idx)
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class Random(nn.Module):
def __init__(self, H, W):
super().__init__()
indices = np.random.RandomState(1).permutation(H*W)
idx = torch.tensor(indices.ravel())
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class AlternateParsing(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
indices = np.arange(W*H).reshape(H,W)
for i in range(1, H, 2):
indices[i, :] = indices[i, ::-1]
idx = indices.flatten()
assert len(idx) == H*W
idx = torch.tensor(idx)
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
if __name__ == "__main__":
p0 = AlternateParsing(16, 16)
print(p0.forward_shuffle_idx)
print(p0.backward_shuffle_idx)
x = torch.randint(0, 768, size=(11, 256))
y = p0(x)
xre = p0(y, reverse=True)
assert torch.equal(x, xre)
p1 = SpiralOut(2, 2)
print(p1.forward_shuffle_idx)
print(p1.backward_shuffle_idx)
| 7,093 | 27.48996 | 83 | py |
taming-transformers | taming-transformers-master/taming/modules/losses/lpips.py | """Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
import torch
import torch.nn as nn
from torchvision import models
from collections import namedtuple
from taming.util import get_ckpt_path
class LPIPS(nn.Module):
# Learned perceptual metric
def __init__(self, use_dropout=True):
super().__init__()
self.scaling_layer = ScalingLayer()
self.chns = [64, 128, 256, 512, 512] # vg16 features
self.net = vgg16(pretrained=True, requires_grad=False)
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
self.load_from_pretrained()
for param in self.parameters():
param.requires_grad = False
def load_from_pretrained(self, name="vgg_lpips"):
ckpt = get_ckpt_path(name, "taming/modules/autoencoder/lpips")
self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
print("loaded pretrained LPIPS loss from {}".format(ckpt))
@classmethod
def from_pretrained(cls, name="vgg_lpips"):
if name != "vgg_lpips":
raise NotImplementedError
model = cls()
ckpt = get_ckpt_path(name)
model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
return model
def forward(self, input, target):
in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))
outs0, outs1 = self.net(in0_input), self.net(in1_input)
feats0, feats1, diffs = {}, {}, {}
lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
for kk in range(len(self.chns)):
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))]
val = res[0]
for l in range(1, len(self.chns)):
val += res[l]
return val
class ScalingLayer(nn.Module):
def __init__(self):
super(ScalingLayer, self).__init__()
self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None])
def forward(self, inp):
return (inp - self.shift) / self.scale
class NetLinLayer(nn.Module):
""" A single linear layer which does a 1x1 conv """
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = [nn.Dropout(), ] if (use_dropout) else []
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
self.model = nn.Sequential(*layers)
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
def normalize_tensor(x,eps=1e-10):
norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True))
return x/(norm_factor+eps)
def spatial_average(x, keepdim=True):
return x.mean([2,3],keepdim=keepdim)
| 4,832 | 37.975806 | 104 | py |
taming-transformers | taming-transformers-master/taming/modules/losses/segmentation.py | import torch.nn as nn
import torch.nn.functional as F
class BCELoss(nn.Module):
def forward(self, prediction, target):
loss = F.binary_cross_entropy_with_logits(prediction,target)
return loss, {}
class BCELossWithQuant(nn.Module):
def __init__(self, codebook_weight=1.):
super().__init__()
self.codebook_weight = codebook_weight
def forward(self, qloss, target, prediction, split):
bce_loss = F.binary_cross_entropy_with_logits(prediction,target)
loss = bce_loss + self.codebook_weight*qloss
return loss, {"{}/total_loss".format(split): loss.clone().detach().mean(),
"{}/bce_loss".format(split): bce_loss.detach().mean(),
"{}/quant_loss".format(split): qloss.detach().mean()
}
| 816 | 34.521739 | 82 | py |
taming-transformers | taming-transformers-master/taming/modules/losses/vqperceptual.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from taming.modules.losses.lpips import LPIPS
from taming.modules.discriminator.model import NLayerDiscriminator, weights_init
class DummyLoss(nn.Module):
def __init__(self):
super().__init__()
def adopt_weight(weight, global_step, threshold=0, value=0.):
if global_step < threshold:
weight = value
return weight
def hinge_d_loss(logits_real, logits_fake):
loss_real = torch.mean(F.relu(1. - logits_real))
loss_fake = torch.mean(F.relu(1. + logits_fake))
d_loss = 0.5 * (loss_real + loss_fake)
return d_loss
def vanilla_d_loss(logits_real, logits_fake):
d_loss = 0.5 * (
torch.mean(torch.nn.functional.softplus(-logits_real)) +
torch.mean(torch.nn.functional.softplus(logits_fake)))
return d_loss
class VQLPIPSWithDiscriminator(nn.Module):
def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0,
disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
disc_ndf=64, disc_loss="hinge"):
super().__init__()
assert disc_loss in ["hinge", "vanilla"]
self.codebook_weight = codebook_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPIPS().eval()
self.perceptual_weight = perceptual_weight
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
n_layers=disc_num_layers,
use_actnorm=use_actnorm,
ndf=disc_ndf
).apply(weights_init)
self.discriminator_iter_start = disc_start
if disc_loss == "hinge":
self.disc_loss = hinge_d_loss
elif disc_loss == "vanilla":
self.disc_loss = vanilla_d_loss
else:
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.")
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if last_layer is not None:
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
d_weight = d_weight * self.discriminator_weight
return d_weight
def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx,
global_step, last_layer=None, cond=None, split="train"):
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
if self.perceptual_weight > 0:
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = rec_loss + self.perceptual_weight * p_loss
else:
p_loss = torch.tensor([0.0])
nll_loss = rec_loss
#nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
nll_loss = torch.mean(nll_loss)
# now the GAN part
if optimizer_idx == 0:
# generator update
if cond is None:
assert not self.disc_conditional
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = -torch.mean(logits_fake)
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
except RuntimeError:
assert not self.training
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean()
log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
"{}/quant_loss".format(split): codebook_loss.detach().mean(),
"{}/nll_loss".format(split): nll_loss.detach().mean(),
"{}/rec_loss".format(split): rec_loss.detach().mean(),
"{}/p_loss".format(split): p_loss.detach().mean(),
"{}/d_weight".format(split): d_weight.detach(),
"{}/disc_factor".format(split): torch.tensor(disc_factor),
"{}/g_loss".format(split): g_loss.detach().mean(),
}
return loss, log
if optimizer_idx == 1:
# second pass for discriminator update
if cond is None:
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
"{}/logits_real".format(split): logits_real.detach().mean(),
"{}/logits_fake".format(split): logits_fake.detach().mean()
}
return d_loss, log
| 6,179 | 44.109489 | 113 | py |
taming-transformers | taming-transformers-master/taming/models/dummy_cond_stage.py | from torch import Tensor
class DummyCondStage:
def __init__(self, conditional_key):
self.conditional_key = conditional_key
self.train = None
def eval(self):
return self
@staticmethod
def encode(c: Tensor):
return c, None, (None, None, c)
@staticmethod
def decode(c: Tensor):
return c
@staticmethod
def to_rgb(c: Tensor):
return c
| 416 | 17.130435 | 46 | py |
taming-transformers | taming-transformers-master/taming/models/vqgan.py | import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from main import instantiate_from_config
from taming.modules.diffusionmodules.model import Encoder, Decoder
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
from taming.modules.vqvae.quantize import GumbelQuantize
from taming.modules.vqvae.quantize import EMAVectorQuantizer
class VQModel(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
remap=None,
sane_index_shape=False, # tell vector quantizer to return indices as bhw
):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
remap=remap, sane_index_shape=sane_index_shape)
self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.image_key = image_key
if colorize_nlabels is not None:
assert type(colorize_nlabels)==int
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
if monitor is not None:
self.monitor = monitor
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f"Restored from {path}")
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
quant, emb_loss, info = self.quantize(h)
return quant, emb_loss, info
def decode(self, quant):
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
def decode_code(self, code_b):
quant_b = self.quantize.embed_code(code_b)
dec = self.decode(quant_b)
return dec
def forward(self, input):
quant, diff, _ = self.encode(input)
dec = self.decode(quant)
return dec, diff
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
return x.float()
def training_step(self, batch, batch_idx, optimizer_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
if optimizer_idx == 0:
# autoencode
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("train/aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if optimizer_idx == 1:
# discriminator
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("train/discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step,
last_layer=self.get_last_layer(), split="val")
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step,
last_layer=self.get_last_layer(), split="val")
rec_loss = log_dict_ae["val/rec_loss"]
self.log("val/rec_loss", rec_loss,
prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log("val/aeloss", aeloss,
prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quantize.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
lr=lr, betas=(0.5, 0.9))
return [opt_ae, opt_disc], []
def get_last_layer(self):
return self.decoder.conv_out.weight
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
xrec, _ = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["inputs"] = x
log["reconstructions"] = xrec
return log
def to_rgb(self, x):
assert self.image_key == "segmentation"
if not hasattr(self, "colorize"):
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
return x
class VQSegmentationModel(VQModel):
def __init__(self, n_labels, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_buffer("colorize", torch.randn(3, n_labels, 1, 1))
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quantize.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=lr, betas=(0.5, 0.9))
return opt_ae
def training_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, split="train")
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, split="val")
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
total_loss = log_dict_ae["val/total_loss"]
self.log("val/total_loss", total_loss,
prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
return aeloss
@torch.no_grad()
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
xrec, _ = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
# convert logits to indices
xrec = torch.argmax(xrec, dim=1, keepdim=True)
xrec = F.one_hot(xrec, num_classes=x.shape[1])
xrec = xrec.squeeze(1).permute(0, 3, 1, 2).float()
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["inputs"] = x
log["reconstructions"] = xrec
return log
class VQNoDiscModel(VQModel):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None
):
super().__init__(ddconfig=ddconfig, lossconfig=lossconfig, n_embed=n_embed, embed_dim=embed_dim,
ckpt_path=ckpt_path, ignore_keys=ignore_keys, image_key=image_key,
colorize_nlabels=colorize_nlabels)
def training_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
# autoencode
aeloss, log_dict_ae = self.loss(qloss, x, xrec, self.global_step, split="train")
output = pl.TrainResult(minimize=aeloss)
output.log("train/aeloss", aeloss,
prog_bar=True, logger=True, on_step=True, on_epoch=True)
output.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return output
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, self.global_step, split="val")
rec_loss = log_dict_ae["val/rec_loss"]
output = pl.EvalResult(checkpoint_on=rec_loss)
output.log("val/rec_loss", rec_loss,
prog_bar=True, logger=True, on_step=True, on_epoch=True)
output.log("val/aeloss", aeloss,
prog_bar=True, logger=True, on_step=True, on_epoch=True)
output.log_dict(log_dict_ae)
return output
def configure_optimizers(self):
optimizer = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quantize.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=self.learning_rate, betas=(0.5, 0.9))
return optimizer
class GumbelVQ(VQModel):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
temperature_scheduler_config,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
kl_weight=1e-8,
remap=None,
):
z_channels = ddconfig["z_channels"]
super().__init__(ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=ignore_keys,
image_key=image_key,
colorize_nlabels=colorize_nlabels,
monitor=monitor,
)
self.loss.n_classes = n_embed
self.vocab_size = n_embed
self.quantize = GumbelQuantize(z_channels, embed_dim,
n_embed=n_embed,
kl_weight=kl_weight, temp_init=1.0,
remap=remap)
self.temperature_scheduler = instantiate_from_config(temperature_scheduler_config) # annealing of temp
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
def temperature_scheduling(self):
self.quantize.temperature = self.temperature_scheduler(self.global_step)
def encode_to_prequant(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
return h
def decode_code(self, code_b):
raise NotImplementedError
def training_step(self, batch, batch_idx, optimizer_idx):
self.temperature_scheduling()
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
if optimizer_idx == 0:
# autoencode
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
self.log("temperature", self.quantize.temperature, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if optimizer_idx == 1:
# discriminator
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x, return_pred_indices=True)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step,
last_layer=self.get_last_layer(), split="val")
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step,
last_layer=self.get_last_layer(), split="val")
rec_loss = log_dict_ae["val/rec_loss"]
self.log("val/rec_loss", rec_loss,
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log("val/aeloss", aeloss,
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
# encode
h = self.encoder(x)
h = self.quant_conv(h)
quant, _, _ = self.quantize(h)
# decode
x_rec = self.decode(quant)
log["inputs"] = x
log["reconstructions"] = x_rec
return log
class EMAVQ(VQModel):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
remap=None,
sane_index_shape=False, # tell vector quantizer to return indices as bhw
):
super().__init__(ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=ignore_keys,
image_key=image_key,
colorize_nlabels=colorize_nlabels,
monitor=monitor,
)
self.quantize = EMAVectorQuantizer(n_embed=n_embed,
embedding_dim=embed_dim,
beta=0.25,
remap=remap)
def configure_optimizers(self):
lr = self.learning_rate
#Remove self.quantize from parameter list since it is updated via EMA
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
lr=lr, betas=(0.5, 0.9))
return [opt_ae, opt_disc], [] | 16,760 | 40.487624 | 120 | py |
taming-transformers | taming-transformers-master/taming/models/cond_transformer.py | import os, math
import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from main import instantiate_from_config
from taming.modules.util import SOSProvider
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
class Net2NetTransformer(pl.LightningModule):
def __init__(self,
transformer_config,
first_stage_config,
cond_stage_config,
permuter_config=None,
ckpt_path=None,
ignore_keys=[],
first_stage_key="image",
cond_stage_key="depth",
downsample_cond_size=-1,
pkeep=1.0,
sos_token=0,
unconditional=False,
):
super().__init__()
self.be_unconditional = unconditional
self.sos_token = sos_token
self.first_stage_key = first_stage_key
self.cond_stage_key = cond_stage_key
self.init_first_stage_from_ckpt(first_stage_config)
self.init_cond_stage_from_ckpt(cond_stage_config)
if permuter_config is None:
permuter_config = {"target": "taming.modules.transformer.permuter.Identity"}
self.permuter = instantiate_from_config(config=permuter_config)
self.transformer = instantiate_from_config(config=transformer_config)
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.downsample_cond_size = downsample_cond_size
self.pkeep = pkeep
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")["state_dict"]
for k in sd.keys():
for ik in ignore_keys:
if k.startswith(ik):
self.print("Deleting key {} from state_dict.".format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f"Restored from {path}")
def init_first_stage_from_ckpt(self, config):
model = instantiate_from_config(config)
model = model.eval()
model.train = disabled_train
self.first_stage_model = model
def init_cond_stage_from_ckpt(self, config):
if config == "__is_first_stage__":
print("Using first stage also as cond stage.")
self.cond_stage_model = self.first_stage_model
elif config == "__is_unconditional__" or self.be_unconditional:
print(f"Using no cond stage. Assuming the training is intended to be unconditional. "
f"Prepending {self.sos_token} as a sos token.")
self.be_unconditional = True
self.cond_stage_key = self.first_stage_key
self.cond_stage_model = SOSProvider(self.sos_token)
else:
model = instantiate_from_config(config)
model = model.eval()
model.train = disabled_train
self.cond_stage_model = model
def forward(self, x, c):
# one step to produce the logits
_, z_indices = self.encode_to_z(x)
_, c_indices = self.encode_to_c(c)
if self.training and self.pkeep < 1.0:
mask = torch.bernoulli(self.pkeep*torch.ones(z_indices.shape,
device=z_indices.device))
mask = mask.round().to(dtype=torch.int64)
r_indices = torch.randint_like(z_indices, self.transformer.config.vocab_size)
a_indices = mask*z_indices+(1-mask)*r_indices
else:
a_indices = z_indices
cz_indices = torch.cat((c_indices, a_indices), dim=1)
# target includes all sequence elements (no need to handle first one
# differently because we are conditioning)
target = z_indices
# make the prediction
logits, _ = self.transformer(cz_indices[:, :-1])
# cut off conditioning outputs - output i corresponds to p(z_i | z_{<i}, c)
logits = logits[:, c_indices.shape[1]-1:]
return logits, target
def top_k_logits(self, logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[..., [-1]]] = -float('Inf')
return out
@torch.no_grad()
def sample(self, x, c, steps, temperature=1.0, sample=False, top_k=None,
callback=lambda k: None):
x = torch.cat((c,x),dim=1)
block_size = self.transformer.get_block_size()
assert not self.transformer.training
if self.pkeep <= 0.0:
# one pass suffices since input is pure noise anyway
assert len(x.shape)==2
noise_shape = (x.shape[0], steps-1)
#noise = torch.randint(self.transformer.config.vocab_size, noise_shape).to(x)
noise = c.clone()[:,x.shape[1]-c.shape[1]:-1]
x = torch.cat((x,noise),dim=1)
logits, _ = self.transformer(x)
# take all logits for now and scale by temp
logits = logits / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = self.top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
shape = probs.shape
probs = probs.reshape(shape[0]*shape[1],shape[2])
ix = torch.multinomial(probs, num_samples=1)
probs = probs.reshape(shape[0],shape[1],shape[2])
ix = ix.reshape(shape[0],shape[1])
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# cut off conditioning
x = ix[:, c.shape[1]-1:]
else:
for k in range(steps):
callback(k)
assert x.size(1) <= block_size # make sure model can see conditioning
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = self.transformer(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = self.top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
# cut off conditioning
x = x[:, c.shape[1]:]
return x
@torch.no_grad()
def encode_to_z(self, x):
quant_z, _, info = self.first_stage_model.encode(x)
indices = info[2].view(quant_z.shape[0], -1)
indices = self.permuter(indices)
return quant_z, indices
@torch.no_grad()
def encode_to_c(self, c):
if self.downsample_cond_size > -1:
c = F.interpolate(c, size=(self.downsample_cond_size, self.downsample_cond_size))
quant_c, _, [_,_,indices] = self.cond_stage_model.encode(c)
if len(indices.shape) > 2:
indices = indices.view(c.shape[0], -1)
return quant_c, indices
@torch.no_grad()
def decode_to_img(self, index, zshape):
index = self.permuter(index, reverse=True)
bhwc = (zshape[0],zshape[2],zshape[3],zshape[1])
quant_z = self.first_stage_model.quantize.get_codebook_entry(
index.reshape(-1), shape=bhwc)
x = self.first_stage_model.decode(quant_z)
return x
@torch.no_grad()
def log_images(self, batch, temperature=None, top_k=None, callback=None, lr_interface=False, **kwargs):
log = dict()
N = 4
if lr_interface:
x, c = self.get_xc(batch, N, diffuse=False, upsample_factor=8)
else:
x, c = self.get_xc(batch, N)
x = x.to(device=self.device)
c = c.to(device=self.device)
quant_z, z_indices = self.encode_to_z(x)
quant_c, c_indices = self.encode_to_c(c)
# create a "half"" sample
z_start_indices = z_indices[:,:z_indices.shape[1]//2]
index_sample = self.sample(z_start_indices, c_indices,
steps=z_indices.shape[1]-z_start_indices.shape[1],
temperature=temperature if temperature is not None else 1.0,
sample=True,
top_k=top_k if top_k is not None else 100,
callback=callback if callback is not None else lambda k: None)
x_sample = self.decode_to_img(index_sample, quant_z.shape)
# sample
z_start_indices = z_indices[:, :0]
index_sample = self.sample(z_start_indices, c_indices,
steps=z_indices.shape[1],
temperature=temperature if temperature is not None else 1.0,
sample=True,
top_k=top_k if top_k is not None else 100,
callback=callback if callback is not None else lambda k: None)
x_sample_nopix = self.decode_to_img(index_sample, quant_z.shape)
# det sample
z_start_indices = z_indices[:, :0]
index_sample = self.sample(z_start_indices, c_indices,
steps=z_indices.shape[1],
sample=False,
callback=callback if callback is not None else lambda k: None)
x_sample_det = self.decode_to_img(index_sample, quant_z.shape)
# reconstruction
x_rec = self.decode_to_img(z_indices, quant_z.shape)
log["inputs"] = x
log["reconstructions"] = x_rec
if self.cond_stage_key in ["objects_bbox", "objects_center_points"]:
figure_size = (x_rec.shape[2], x_rec.shape[3])
dataset = kwargs["pl_module"].trainer.datamodule.datasets["validation"]
label_for_category_no = dataset.get_textual_label_for_category_no
plotter = dataset.conditional_builders[self.cond_stage_key].plot
log["conditioning"] = torch.zeros_like(log["reconstructions"])
for i in range(quant_c.shape[0]):
log["conditioning"][i] = plotter(quant_c[i], label_for_category_no, figure_size)
log["conditioning_rec"] = log["conditioning"]
elif self.cond_stage_key != "image":
cond_rec = self.cond_stage_model.decode(quant_c)
if self.cond_stage_key == "segmentation":
# get image from segmentation mask
num_classes = cond_rec.shape[1]
c = torch.argmax(c, dim=1, keepdim=True)
c = F.one_hot(c, num_classes=num_classes)
c = c.squeeze(1).permute(0, 3, 1, 2).float()
c = self.cond_stage_model.to_rgb(c)
cond_rec = torch.argmax(cond_rec, dim=1, keepdim=True)
cond_rec = F.one_hot(cond_rec, num_classes=num_classes)
cond_rec = cond_rec.squeeze(1).permute(0, 3, 1, 2).float()
cond_rec = self.cond_stage_model.to_rgb(cond_rec)
log["conditioning_rec"] = cond_rec
log["conditioning"] = c
log["samples_half"] = x_sample
log["samples_nopix"] = x_sample_nopix
log["samples_det"] = x_sample_det
return log
def get_input(self, key, batch):
x = batch[key]
if len(x.shape) == 3:
x = x[..., None]
if len(x.shape) == 4:
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
if x.dtype == torch.double:
x = x.float()
return x
def get_xc(self, batch, N=None):
x = self.get_input(self.first_stage_key, batch)
c = self.get_input(self.cond_stage_key, batch)
if N is not None:
x = x[:N]
c = c[:N]
return x, c
def shared_step(self, batch, batch_idx):
x, c = self.get_xc(batch)
logits, target = self(x, c)
loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), target.reshape(-1))
return loss
def training_step(self, batch, batch_idx):
loss = self.shared_step(batch, batch_idx)
self.log("train/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
loss = self.shared_step(batch, batch_idx)
self.log("val/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
return loss
def configure_optimizers(self):
"""
Following minGPT:
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.transformer.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('pos_emb')
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.transformer.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=self.learning_rate, betas=(0.9, 0.95))
return optimizer
| 15,648 | 43.331445 | 127 | py |
taming-transformers | taming-transformers-master/taming/data/custom.py | import os
import numpy as np
import albumentations
from torch.utils.data import Dataset
from taming.data.base import ImagePaths, NumpyPaths, ConcatDatasetWithIndex
class CustomBase(Dataset):
def __init__(self, *args, **kwargs):
super().__init__()
self.data = None
def __len__(self):
return len(self.data)
def __getitem__(self, i):
example = self.data[i]
return example
class CustomTrain(CustomBase):
def __init__(self, size, training_images_list_file):
super().__init__()
with open(training_images_list_file, "r") as f:
paths = f.read().splitlines()
self.data = ImagePaths(paths=paths, size=size, random_crop=False)
class CustomTest(CustomBase):
def __init__(self, size, test_images_list_file):
super().__init__()
with open(test_images_list_file, "r") as f:
paths = f.read().splitlines()
self.data = ImagePaths(paths=paths, size=size, random_crop=False)
| 998 | 24.615385 | 75 | py |
taming-transformers | taming-transformers-master/taming/data/base.py | import bisect
import numpy as np
import albumentations
from PIL import Image
from torch.utils.data import Dataset, ConcatDataset
class ConcatDatasetWithIndex(ConcatDataset):
"""Modified from original pytorch code to return dataset idx"""
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx], dataset_idx
class ImagePaths(Dataset):
def __init__(self, paths, size=None, random_crop=False, labels=None):
self.size = size
self.random_crop = random_crop
self.labels = dict() if labels is None else labels
self.labels["file_path_"] = paths
self._length = len(paths)
if self.size is not None and self.size > 0:
self.rescaler = albumentations.SmallestMaxSize(max_size = self.size)
if not self.random_crop:
self.cropper = albumentations.CenterCrop(height=self.size,width=self.size)
else:
self.cropper = albumentations.RandomCrop(height=self.size,width=self.size)
self.preprocessor = albumentations.Compose([self.rescaler, self.cropper])
else:
self.preprocessor = lambda **kwargs: kwargs
def __len__(self):
return self._length
def preprocess_image(self, image_path):
image = Image.open(image_path)
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
image = self.preprocessor(image=image)["image"]
image = (image/127.5 - 1.0).astype(np.float32)
return image
def __getitem__(self, i):
example = dict()
example["image"] = self.preprocess_image(self.labels["file_path_"][i])
for k in self.labels:
example[k] = self.labels[k][i]
return example
class NumpyPaths(ImagePaths):
def preprocess_image(self, image_path):
image = np.load(image_path).squeeze(0) # 3 x 1024 x 1024
image = np.transpose(image, (1,2,0))
image = Image.fromarray(image, mode="RGB")
image = np.array(image).astype(np.uint8)
image = self.preprocessor(image=image)["image"]
image = (image/127.5 - 1.0).astype(np.float32)
return image
| 2,609 | 35.760563 | 92 | py |
taming-transformers | taming-transformers-master/taming/data/helper_types.py | from typing import Dict, Tuple, Optional, NamedTuple, Union
from PIL.Image import Image as pil_image
from torch import Tensor
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
Image = Union[Tensor, pil_image]
BoundingBox = Tuple[float, float, float, float] # x0, y0, w, h
CropMethodType = Literal['none', 'random', 'center', 'random-2d']
SplitType = Literal['train', 'validation', 'test']
class ImageDescription(NamedTuple):
id: int
file_name: str
original_size: Tuple[int, int] # w, h
url: Optional[str] = None
license: Optional[int] = None
coco_url: Optional[str] = None
date_captured: Optional[str] = None
flickr_url: Optional[str] = None
flickr_id: Optional[str] = None
coco_id: Optional[str] = None
class Category(NamedTuple):
id: str
super_category: Optional[str]
name: str
class Annotation(NamedTuple):
area: float
image_id: str
bbox: BoundingBox
category_no: int
category_id: str
id: Optional[int] = None
source: Optional[str] = None
confidence: Optional[float] = None
is_group_of: Optional[bool] = None
is_truncated: Optional[bool] = None
is_occluded: Optional[bool] = None
is_depiction: Optional[bool] = None
is_inside: Optional[bool] = None
segmentation: Optional[Dict] = None
| 1,350 | 26.02 | 65 | py |
taming-transformers | taming-transformers-master/taming/data/ade20k.py | import os
import numpy as np
import cv2
import albumentations
from PIL import Image
from torch.utils.data import Dataset
from taming.data.sflckr import SegmentationBase # for examples included in repo
class Examples(SegmentationBase):
def __init__(self, size=256, random_crop=False, interpolation="bicubic"):
super().__init__(data_csv="data/ade20k_examples.txt",
data_root="data/ade20k_images",
segmentation_root="data/ade20k_segmentations",
size=size, random_crop=random_crop,
interpolation=interpolation,
n_labels=151, shift_segmentation=False)
# With semantic map and scene label
class ADE20kBase(Dataset):
def __init__(self, config=None, size=None, random_crop=False, interpolation="bicubic", crop_size=None):
self.split = self.get_split()
self.n_labels = 151 # unknown + 150
self.data_csv = {"train": "data/ade20k_train.txt",
"validation": "data/ade20k_test.txt"}[self.split]
self.data_root = "data/ade20k_root"
with open(os.path.join(self.data_root, "sceneCategories.txt"), "r") as f:
self.scene_categories = f.read().splitlines()
self.scene_categories = dict(line.split() for line in self.scene_categories)
with open(self.data_csv, "r") as f:
self.image_paths = f.read().splitlines()
self._length = len(self.image_paths)
self.labels = {
"relative_file_path_": [l for l in self.image_paths],
"file_path_": [os.path.join(self.data_root, "images", l)
for l in self.image_paths],
"relative_segmentation_path_": [l.replace(".jpg", ".png")
for l in self.image_paths],
"segmentation_path_": [os.path.join(self.data_root, "annotations",
l.replace(".jpg", ".png"))
for l in self.image_paths],
"scene_category": [self.scene_categories[l.split("/")[1].replace(".jpg", "")]
for l in self.image_paths],
}
size = None if size is not None and size<=0 else size
self.size = size
if crop_size is None:
self.crop_size = size if size is not None else None
else:
self.crop_size = crop_size
if self.size is not None:
self.interpolation = interpolation
self.interpolation = {
"nearest": cv2.INTER_NEAREST,
"bilinear": cv2.INTER_LINEAR,
"bicubic": cv2.INTER_CUBIC,
"area": cv2.INTER_AREA,
"lanczos": cv2.INTER_LANCZOS4}[self.interpolation]
self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=self.interpolation)
self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=cv2.INTER_NEAREST)
if crop_size is not None:
self.center_crop = not random_crop
if self.center_crop:
self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size)
else:
self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size)
self.preprocessor = self.cropper
def __len__(self):
return self._length
def __getitem__(self, i):
example = dict((k, self.labels[k][i]) for k in self.labels)
image = Image.open(example["file_path_"])
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
if self.size is not None:
image = self.image_rescaler(image=image)["image"]
segmentation = Image.open(example["segmentation_path_"])
segmentation = np.array(segmentation).astype(np.uint8)
if self.size is not None:
segmentation = self.segmentation_rescaler(image=segmentation)["image"]
if self.size is not None:
processed = self.preprocessor(image=image, mask=segmentation)
else:
processed = {"image": image, "mask": segmentation}
example["image"] = (processed["image"]/127.5 - 1.0).astype(np.float32)
segmentation = processed["mask"]
onehot = np.eye(self.n_labels)[segmentation]
example["segmentation"] = onehot
return example
class ADE20kTrain(ADE20kBase):
# default to random_crop=True
def __init__(self, config=None, size=None, random_crop=True, interpolation="bicubic", crop_size=None):
super().__init__(config=config, size=size, random_crop=random_crop,
interpolation=interpolation, crop_size=crop_size)
def get_split(self):
return "train"
class ADE20kValidation(ADE20kBase):
def get_split(self):
return "validation"
if __name__ == "__main__":
dset = ADE20kValidation()
ex = dset[0]
for k in ["image", "scene_category", "segmentation"]:
print(type(ex[k]))
try:
print(ex[k].shape)
except:
print(ex[k])
| 5,378 | 42.032 | 107 | py |
taming-transformers | taming-transformers-master/taming/data/utils.py | import collections
import os
import tarfile
import urllib
import zipfile
from pathlib import Path
import numpy as np
import torch
from taming.data.helper_types import Annotation
from torch._six import string_classes
from torch.utils.data._utils.collate import np_str_obj_array_pattern, default_collate_err_msg_format
from tqdm import tqdm
def unpack(path):
if path.endswith("tar.gz"):
with tarfile.open(path, "r:gz") as tar:
tar.extractall(path=os.path.split(path)[0])
elif path.endswith("tar"):
with tarfile.open(path, "r:") as tar:
tar.extractall(path=os.path.split(path)[0])
elif path.endswith("zip"):
with zipfile.ZipFile(path, "r") as f:
f.extractall(path=os.path.split(path)[0])
else:
raise NotImplementedError(
"Unknown file extension: {}".format(os.path.splitext(path)[1])
)
def reporthook(bar):
"""tqdm progress bar for downloads."""
def hook(b=1, bsize=1, tsize=None):
if tsize is not None:
bar.total = tsize
bar.update(b * bsize - bar.n)
return hook
def get_root(name):
base = "data/"
root = os.path.join(base, name)
os.makedirs(root, exist_ok=True)
return root
def is_prepared(root):
return Path(root).joinpath(".ready").exists()
def mark_prepared(root):
Path(root).joinpath(".ready").touch()
def prompt_download(file_, source, target_dir, content_dir=None):
targetpath = os.path.join(target_dir, file_)
while not os.path.exists(targetpath):
if content_dir is not None and os.path.exists(
os.path.join(target_dir, content_dir)
):
break
print(
"Please download '{}' from '{}' to '{}'.".format(file_, source, targetpath)
)
if content_dir is not None:
print(
"Or place its content into '{}'.".format(
os.path.join(target_dir, content_dir)
)
)
input("Press Enter when done...")
return targetpath
def download_url(file_, url, target_dir):
targetpath = os.path.join(target_dir, file_)
os.makedirs(target_dir, exist_ok=True)
with tqdm(
unit="B", unit_scale=True, unit_divisor=1024, miniters=1, desc=file_
) as bar:
urllib.request.urlretrieve(url, targetpath, reporthook=reporthook(bar))
return targetpath
def download_urls(urls, target_dir):
paths = dict()
for fname, url in urls.items():
outpath = download_url(fname, url, target_dir)
paths[fname] = outpath
return paths
def quadratic_crop(x, bbox, alpha=1.0):
"""bbox is xmin, ymin, xmax, ymax"""
im_h, im_w = x.shape[:2]
bbox = np.array(bbox, dtype=np.float32)
bbox = np.clip(bbox, 0, max(im_h, im_w))
center = 0.5 * (bbox[0] + bbox[2]), 0.5 * (bbox[1] + bbox[3])
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
l = int(alpha * max(w, h))
l = max(l, 2)
required_padding = -1 * min(
center[0] - l, center[1] - l, im_w - (center[0] + l), im_h - (center[1] + l)
)
required_padding = int(np.ceil(required_padding))
if required_padding > 0:
padding = [
[required_padding, required_padding],
[required_padding, required_padding],
]
padding += [[0, 0]] * (len(x.shape) - 2)
x = np.pad(x, padding, "reflect")
center = center[0] + required_padding, center[1] + required_padding
xmin = int(center[0] - l / 2)
ymin = int(center[1] - l / 2)
return np.array(x[ymin : ymin + l, xmin : xmin + l, ...])
def custom_collate(batch):
r"""source: pytorch 1.9.0, only one modification to original code """
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return custom_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: custom_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(custom_collate(samples) for samples in zip(*batch)))
if isinstance(elem, collections.abc.Sequence) and isinstance(elem[0], Annotation): # added
return batch # added
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = zip(*batch)
return [custom_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type))
| 5,903 | 33.729412 | 100 | py |
taming-transformers | taming-transformers-master/taming/data/faceshq.py | import os
import numpy as np
import albumentations
from torch.utils.data import Dataset
from taming.data.base import ImagePaths, NumpyPaths, ConcatDatasetWithIndex
class FacesBase(Dataset):
def __init__(self, *args, **kwargs):
super().__init__()
self.data = None
self.keys = None
def __len__(self):
return len(self.data)
def __getitem__(self, i):
example = self.data[i]
ex = {}
if self.keys is not None:
for k in self.keys:
ex[k] = example[k]
else:
ex = example
return ex
class CelebAHQTrain(FacesBase):
def __init__(self, size, keys=None):
super().__init__()
root = "data/celebahq"
with open("data/celebahqtrain.txt", "r") as f:
relpaths = f.read().splitlines()
paths = [os.path.join(root, relpath) for relpath in relpaths]
self.data = NumpyPaths(paths=paths, size=size, random_crop=False)
self.keys = keys
class CelebAHQValidation(FacesBase):
def __init__(self, size, keys=None):
super().__init__()
root = "data/celebahq"
with open("data/celebahqvalidation.txt", "r") as f:
relpaths = f.read().splitlines()
paths = [os.path.join(root, relpath) for relpath in relpaths]
self.data = NumpyPaths(paths=paths, size=size, random_crop=False)
self.keys = keys
class FFHQTrain(FacesBase):
def __init__(self, size, keys=None):
super().__init__()
root = "data/ffhq"
with open("data/ffhqtrain.txt", "r") as f:
relpaths = f.read().splitlines()
paths = [os.path.join(root, relpath) for relpath in relpaths]
self.data = ImagePaths(paths=paths, size=size, random_crop=False)
self.keys = keys
class FFHQValidation(FacesBase):
def __init__(self, size, keys=None):
super().__init__()
root = "data/ffhq"
with open("data/ffhqvalidation.txt", "r") as f:
relpaths = f.read().splitlines()
paths = [os.path.join(root, relpath) for relpath in relpaths]
self.data = ImagePaths(paths=paths, size=size, random_crop=False)
self.keys = keys
class FacesHQTrain(Dataset):
# CelebAHQ [0] + FFHQ [1]
def __init__(self, size, keys=None, crop_size=None, coord=False):
d1 = CelebAHQTrain(size=size, keys=keys)
d2 = FFHQTrain(size=size, keys=keys)
self.data = ConcatDatasetWithIndex([d1, d2])
self.coord = coord
if crop_size is not None:
self.cropper = albumentations.RandomCrop(height=crop_size,width=crop_size)
if self.coord:
self.cropper = albumentations.Compose([self.cropper],
additional_targets={"coord": "image"})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
ex, y = self.data[i]
if hasattr(self, "cropper"):
if not self.coord:
out = self.cropper(image=ex["image"])
ex["image"] = out["image"]
else:
h,w,_ = ex["image"].shape
coord = np.arange(h*w).reshape(h,w,1)/(h*w)
out = self.cropper(image=ex["image"], coord=coord)
ex["image"] = out["image"]
ex["coord"] = out["coord"]
ex["class"] = y
return ex
class FacesHQValidation(Dataset):
# CelebAHQ [0] + FFHQ [1]
def __init__(self, size, keys=None, crop_size=None, coord=False):
d1 = CelebAHQValidation(size=size, keys=keys)
d2 = FFHQValidation(size=size, keys=keys)
self.data = ConcatDatasetWithIndex([d1, d2])
self.coord = coord
if crop_size is not None:
self.cropper = albumentations.CenterCrop(height=crop_size,width=crop_size)
if self.coord:
self.cropper = albumentations.Compose([self.cropper],
additional_targets={"coord": "image"})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
ex, y = self.data[i]
if hasattr(self, "cropper"):
if not self.coord:
out = self.cropper(image=ex["image"])
ex["image"] = out["image"]
else:
h,w,_ = ex["image"].shape
coord = np.arange(h*w).reshape(h,w,1)/(h*w)
out = self.cropper(image=ex["image"], coord=coord)
ex["image"] = out["image"]
ex["coord"] = out["coord"]
ex["class"] = y
return ex
| 4,640 | 33.377778 | 92 | py |
taming-transformers | taming-transformers-master/taming/data/annotated_objects_dataset.py | from pathlib import Path
from typing import Optional, List, Callable, Dict, Any, Union
import warnings
import PIL.Image as pil_image
from torch import Tensor
from torch.utils.data import Dataset
from torchvision import transforms
from taming.data.conditional_builder.objects_bbox import ObjectsBoundingBoxConditionalBuilder
from taming.data.conditional_builder.objects_center_points import ObjectsCenterPointsConditionalBuilder
from taming.data.conditional_builder.utils import load_object_from_string
from taming.data.helper_types import BoundingBox, CropMethodType, Image, Annotation, SplitType
from taming.data.image_transforms import CenterCropReturnCoordinates, RandomCrop1dReturnCoordinates, \
Random2dCropReturnCoordinates, RandomHorizontalFlipReturn, convert_pil_to_tensor
class AnnotatedObjectsDataset(Dataset):
def __init__(self, data_path: Union[str, Path], split: SplitType, keys: List[str], target_image_size: int,
min_object_area: float, min_objects_per_image: int, max_objects_per_image: int,
crop_method: CropMethodType, random_flip: bool, no_tokens: int, use_group_parameter: bool,
encode_crop: bool, category_allow_list_target: str = "", category_mapping_target: str = "",
no_object_classes: Optional[int] = None):
self.data_path = data_path
self.split = split
self.keys = keys
self.target_image_size = target_image_size
self.min_object_area = min_object_area
self.min_objects_per_image = min_objects_per_image
self.max_objects_per_image = max_objects_per_image
self.crop_method = crop_method
self.random_flip = random_flip
self.no_tokens = no_tokens
self.use_group_parameter = use_group_parameter
self.encode_crop = encode_crop
self.annotations = None
self.image_descriptions = None
self.categories = None
self.category_ids = None
self.category_number = None
self.image_ids = None
self.transform_functions: List[Callable] = self.setup_transform(target_image_size, crop_method, random_flip)
self.paths = self.build_paths(self.data_path)
self._conditional_builders = None
self.category_allow_list = None
if category_allow_list_target:
allow_list = load_object_from_string(category_allow_list_target)
self.category_allow_list = {name for name, _ in allow_list}
self.category_mapping = {}
if category_mapping_target:
self.category_mapping = load_object_from_string(category_mapping_target)
self.no_object_classes = no_object_classes
def build_paths(self, top_level: Union[str, Path]) -> Dict[str, Path]:
top_level = Path(top_level)
sub_paths = {name: top_level.joinpath(sub_path) for name, sub_path in self.get_path_structure().items()}
for path in sub_paths.values():
if not path.exists():
raise FileNotFoundError(f'{type(self).__name__} data structure error: [{path}] does not exist.')
return sub_paths
@staticmethod
def load_image_from_disk(path: Path) -> Image:
return pil_image.open(path).convert('RGB')
@staticmethod
def setup_transform(target_image_size: int, crop_method: CropMethodType, random_flip: bool):
transform_functions = []
if crop_method == 'none':
transform_functions.append(transforms.Resize((target_image_size, target_image_size)))
elif crop_method == 'center':
transform_functions.extend([
transforms.Resize(target_image_size),
CenterCropReturnCoordinates(target_image_size)
])
elif crop_method == 'random-1d':
transform_functions.extend([
transforms.Resize(target_image_size),
RandomCrop1dReturnCoordinates(target_image_size)
])
elif crop_method == 'random-2d':
transform_functions.extend([
Random2dCropReturnCoordinates(target_image_size),
transforms.Resize(target_image_size)
])
elif crop_method is None:
return None
else:
raise ValueError(f'Received invalid crop method [{crop_method}].')
if random_flip:
transform_functions.append(RandomHorizontalFlipReturn())
transform_functions.append(transforms.Lambda(lambda x: x / 127.5 - 1.))
return transform_functions
def image_transform(self, x: Tensor) -> (Optional[BoundingBox], Optional[bool], Tensor):
crop_bbox = None
flipped = None
for t in self.transform_functions:
if isinstance(t, (RandomCrop1dReturnCoordinates, CenterCropReturnCoordinates, Random2dCropReturnCoordinates)):
crop_bbox, x = t(x)
elif isinstance(t, RandomHorizontalFlipReturn):
flipped, x = t(x)
else:
x = t(x)
return crop_bbox, flipped, x
@property
def no_classes(self) -> int:
return self.no_object_classes if self.no_object_classes else len(self.categories)
@property
def conditional_builders(self) -> ObjectsCenterPointsConditionalBuilder:
# cannot set this up in init because no_classes is only known after loading data in init of superclass
if self._conditional_builders is None:
self._conditional_builders = {
'objects_center_points': ObjectsCenterPointsConditionalBuilder(
self.no_classes,
self.max_objects_per_image,
self.no_tokens,
self.encode_crop,
self.use_group_parameter,
getattr(self, 'use_additional_parameters', False)
),
'objects_bbox': ObjectsBoundingBoxConditionalBuilder(
self.no_classes,
self.max_objects_per_image,
self.no_tokens,
self.encode_crop,
self.use_group_parameter,
getattr(self, 'use_additional_parameters', False)
)
}
return self._conditional_builders
def filter_categories(self) -> None:
if self.category_allow_list:
self.categories = {id_: cat for id_, cat in self.categories.items() if cat.name in self.category_allow_list}
if self.category_mapping:
self.categories = {id_: cat for id_, cat in self.categories.items() if cat.id not in self.category_mapping}
def setup_category_id_and_number(self) -> None:
self.category_ids = list(self.categories.keys())
self.category_ids.sort()
if '/m/01s55n' in self.category_ids:
self.category_ids.remove('/m/01s55n')
self.category_ids.append('/m/01s55n')
self.category_number = {category_id: i for i, category_id in enumerate(self.category_ids)}
if self.category_allow_list is not None and self.category_mapping is None \
and len(self.category_ids) != len(self.category_allow_list):
warnings.warn('Unexpected number of categories: Mismatch with category_allow_list. '
'Make sure all names in category_allow_list exist.')
def clean_up_annotations_and_image_descriptions(self) -> None:
image_id_set = set(self.image_ids)
self.annotations = {k: v for k, v in self.annotations.items() if k in image_id_set}
self.image_descriptions = {k: v for k, v in self.image_descriptions.items() if k in image_id_set}
@staticmethod
def filter_object_number(all_annotations: Dict[str, List[Annotation]], min_object_area: float,
min_objects_per_image: int, max_objects_per_image: int) -> Dict[str, List[Annotation]]:
filtered = {}
for image_id, annotations in all_annotations.items():
annotations_with_min_area = [a for a in annotations if a.area > min_object_area]
if min_objects_per_image <= len(annotations_with_min_area) <= max_objects_per_image:
filtered[image_id] = annotations_with_min_area
return filtered
def __len__(self):
return len(self.image_ids)
def __getitem__(self, n: int) -> Dict[str, Any]:
image_id = self.get_image_id(n)
sample = self.get_image_description(image_id)
sample['annotations'] = self.get_annotation(image_id)
if 'image' in self.keys:
sample['image_path'] = str(self.get_image_path(image_id))
sample['image'] = self.load_image_from_disk(sample['image_path'])
sample['image'] = convert_pil_to_tensor(sample['image'])
sample['crop_bbox'], sample['flipped'], sample['image'] = self.image_transform(sample['image'])
sample['image'] = sample['image'].permute(1, 2, 0)
for conditional, builder in self.conditional_builders.items():
if conditional in self.keys:
sample[conditional] = builder.build(sample['annotations'], sample['crop_bbox'], sample['flipped'])
if self.keys:
# only return specified keys
sample = {key: sample[key] for key in self.keys}
return sample
def get_image_id(self, no: int) -> str:
return self.image_ids[no]
def get_annotation(self, image_id: str) -> str:
return self.annotations[image_id]
def get_textual_label_for_category_id(self, category_id: str) -> str:
return self.categories[category_id].name
def get_textual_label_for_category_no(self, category_no: int) -> str:
return self.categories[self.get_category_id(category_no)].name
def get_category_number(self, category_id: str) -> int:
return self.category_number[category_id]
def get_category_id(self, category_no: int) -> str:
return self.category_ids[category_no]
def get_image_description(self, image_id: str) -> Dict[str, Any]:
raise NotImplementedError()
def get_path_structure(self):
raise NotImplementedError
def get_image_path(self, image_id: str) -> Path:
raise NotImplementedError
| 10,226 | 45.69863 | 122 | py |
taming-transformers | taming-transformers-master/taming/data/sflckr.py | import os
import numpy as np
import cv2
import albumentations
from PIL import Image
from torch.utils.data import Dataset
class SegmentationBase(Dataset):
def __init__(self,
data_csv, data_root, segmentation_root,
size=None, random_crop=False, interpolation="bicubic",
n_labels=182, shift_segmentation=False,
):
self.n_labels = n_labels
self.shift_segmentation = shift_segmentation
self.data_csv = data_csv
self.data_root = data_root
self.segmentation_root = segmentation_root
with open(self.data_csv, "r") as f:
self.image_paths = f.read().splitlines()
self._length = len(self.image_paths)
self.labels = {
"relative_file_path_": [l for l in self.image_paths],
"file_path_": [os.path.join(self.data_root, l)
for l in self.image_paths],
"segmentation_path_": [os.path.join(self.segmentation_root, l.replace(".jpg", ".png"))
for l in self.image_paths]
}
size = None if size is not None and size<=0 else size
self.size = size
if self.size is not None:
self.interpolation = interpolation
self.interpolation = {
"nearest": cv2.INTER_NEAREST,
"bilinear": cv2.INTER_LINEAR,
"bicubic": cv2.INTER_CUBIC,
"area": cv2.INTER_AREA,
"lanczos": cv2.INTER_LANCZOS4}[self.interpolation]
self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=self.interpolation)
self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=cv2.INTER_NEAREST)
self.center_crop = not random_crop
if self.center_crop:
self.cropper = albumentations.CenterCrop(height=self.size, width=self.size)
else:
self.cropper = albumentations.RandomCrop(height=self.size, width=self.size)
self.preprocessor = self.cropper
def __len__(self):
return self._length
def __getitem__(self, i):
example = dict((k, self.labels[k][i]) for k in self.labels)
image = Image.open(example["file_path_"])
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
if self.size is not None:
image = self.image_rescaler(image=image)["image"]
segmentation = Image.open(example["segmentation_path_"])
assert segmentation.mode == "L", segmentation.mode
segmentation = np.array(segmentation).astype(np.uint8)
if self.shift_segmentation:
# used to support segmentations containing unlabeled==255 label
segmentation = segmentation+1
if self.size is not None:
segmentation = self.segmentation_rescaler(image=segmentation)["image"]
if self.size is not None:
processed = self.preprocessor(image=image,
mask=segmentation
)
else:
processed = {"image": image,
"mask": segmentation
}
example["image"] = (processed["image"]/127.5 - 1.0).astype(np.float32)
segmentation = processed["mask"]
onehot = np.eye(self.n_labels)[segmentation]
example["segmentation"] = onehot
return example
class Examples(SegmentationBase):
def __init__(self, size=None, random_crop=False, interpolation="bicubic"):
super().__init__(data_csv="data/sflckr_examples.txt",
data_root="data/sflckr_images",
segmentation_root="data/sflckr_segmentations",
size=size, random_crop=random_crop, interpolation=interpolation)
| 4,097 | 43.543478 | 104 | py |
taming-transformers | taming-transformers-master/taming/data/imagenet.py | import os, tarfile, glob, shutil
import yaml
import numpy as np
from tqdm import tqdm
from PIL import Image
import albumentations
from omegaconf import OmegaConf
from torch.utils.data import Dataset
from taming.data.base import ImagePaths
from taming.util import download, retrieve
import taming.data.utils as bdu
def give_synsets_from_indices(indices, path_to_yaml="data/imagenet_idx_to_synset.yaml"):
synsets = []
with open(path_to_yaml) as f:
di2s = yaml.load(f)
for idx in indices:
synsets.append(str(di2s[idx]))
print("Using {} different synsets for construction of Restriced Imagenet.".format(len(synsets)))
return synsets
def str_to_indices(string):
"""Expects a string in the format '32-123, 256, 280-321'"""
assert not string.endswith(","), "provided string '{}' ends with a comma, pls remove it".format(string)
subs = string.split(",")
indices = []
for sub in subs:
subsubs = sub.split("-")
assert len(subsubs) > 0
if len(subsubs) == 1:
indices.append(int(subsubs[0]))
else:
rang = [j for j in range(int(subsubs[0]), int(subsubs[1]))]
indices.extend(rang)
return sorted(indices)
class ImageNetBase(Dataset):
def __init__(self, config=None):
self.config = config or OmegaConf.create()
if not type(self.config)==dict:
self.config = OmegaConf.to_container(self.config)
self._prepare()
self._prepare_synset_to_human()
self._prepare_idx_to_synset()
self._load()
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def _prepare(self):
raise NotImplementedError()
def _filter_relpaths(self, relpaths):
ignore = set([
"n06596364_9591.JPEG",
])
relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore]
if "sub_indices" in self.config:
indices = str_to_indices(self.config["sub_indices"])
synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings
files = []
for rpath in relpaths:
syn = rpath.split("/")[0]
if syn in synsets:
files.append(rpath)
return files
else:
return relpaths
def _prepare_synset_to_human(self):
SIZE = 2655750
URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1"
self.human_dict = os.path.join(self.root, "synset_human.txt")
if (not os.path.exists(self.human_dict) or
not os.path.getsize(self.human_dict)==SIZE):
download(URL, self.human_dict)
def _prepare_idx_to_synset(self):
URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1"
self.idx2syn = os.path.join(self.root, "index_synset.yaml")
if (not os.path.exists(self.idx2syn)):
download(URL, self.idx2syn)
def _load(self):
with open(self.txt_filelist, "r") as f:
self.relpaths = f.read().splitlines()
l1 = len(self.relpaths)
self.relpaths = self._filter_relpaths(self.relpaths)
print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths)))
self.synsets = [p.split("/")[0] for p in self.relpaths]
self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]
unique_synsets = np.unique(self.synsets)
class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))
self.class_labels = [class_dict[s] for s in self.synsets]
with open(self.human_dict, "r") as f:
human_dict = f.read().splitlines()
human_dict = dict(line.split(maxsplit=1) for line in human_dict)
self.human_labels = [human_dict[s] for s in self.synsets]
labels = {
"relpath": np.array(self.relpaths),
"synsets": np.array(self.synsets),
"class_label": np.array(self.class_labels),
"human_label": np.array(self.human_labels),
}
self.data = ImagePaths(self.abspaths,
labels=labels,
size=retrieve(self.config, "size", default=0),
random_crop=self.random_crop)
class ImageNetTrain(ImageNetBase):
NAME = "ILSVRC2012_train"
URL = "http://www.image-net.org/challenges/LSVRC/2012/"
AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2"
FILES = [
"ILSVRC2012_img_train.tar",
]
SIZES = [
147897477120,
]
def _prepare(self):
self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop",
default=True)
cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
self.datadir = os.path.join(self.root, "data")
self.txt_filelist = os.path.join(self.root, "filelist.txt")
self.expected_length = 1281167
if not bdu.is_prepared(self.root):
# prep
print("Preparing dataset {} in {}".format(self.NAME, self.root))
datadir = self.datadir
if not os.path.exists(datadir):
path = os.path.join(self.root, self.FILES[0])
if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
import academictorrents as at
atpath = at.get(self.AT_HASH, datastore=self.root)
assert atpath == path
print("Extracting {} to {}".format(path, datadir))
os.makedirs(datadir, exist_ok=True)
with tarfile.open(path, "r:") as tar:
tar.extractall(path=datadir)
print("Extracting sub-tars.")
subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar")))
for subpath in tqdm(subpaths):
subdir = subpath[:-len(".tar")]
os.makedirs(subdir, exist_ok=True)
with tarfile.open(subpath, "r:") as tar:
tar.extractall(path=subdir)
filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
filelist = [os.path.relpath(p, start=datadir) for p in filelist]
filelist = sorted(filelist)
filelist = "\n".join(filelist)+"\n"
with open(self.txt_filelist, "w") as f:
f.write(filelist)
bdu.mark_prepared(self.root)
class ImageNetValidation(ImageNetBase):
NAME = "ILSVRC2012_validation"
URL = "http://www.image-net.org/challenges/LSVRC/2012/"
AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5"
VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1"
FILES = [
"ILSVRC2012_img_val.tar",
"validation_synset.txt",
]
SIZES = [
6744924160,
1950000,
]
def _prepare(self):
self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop",
default=False)
cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
self.datadir = os.path.join(self.root, "data")
self.txt_filelist = os.path.join(self.root, "filelist.txt")
self.expected_length = 50000
if not bdu.is_prepared(self.root):
# prep
print("Preparing dataset {} in {}".format(self.NAME, self.root))
datadir = self.datadir
if not os.path.exists(datadir):
path = os.path.join(self.root, self.FILES[0])
if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
import academictorrents as at
atpath = at.get(self.AT_HASH, datastore=self.root)
assert atpath == path
print("Extracting {} to {}".format(path, datadir))
os.makedirs(datadir, exist_ok=True)
with tarfile.open(path, "r:") as tar:
tar.extractall(path=datadir)
vspath = os.path.join(self.root, self.FILES[1])
if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]:
download(self.VS_URL, vspath)
with open(vspath, "r") as f:
synset_dict = f.read().splitlines()
synset_dict = dict(line.split() for line in synset_dict)
print("Reorganizing into synset folders")
synsets = np.unique(list(synset_dict.values()))
for s in synsets:
os.makedirs(os.path.join(datadir, s), exist_ok=True)
for k, v in synset_dict.items():
src = os.path.join(datadir, k)
dst = os.path.join(datadir, v)
shutil.move(src, dst)
filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
filelist = [os.path.relpath(p, start=datadir) for p in filelist]
filelist = sorted(filelist)
filelist = "\n".join(filelist)+"\n"
with open(self.txt_filelist, "w") as f:
f.write(filelist)
bdu.mark_prepared(self.root)
def get_preprocessor(size=None, random_crop=False, additional_targets=None,
crop_size=None):
if size is not None and size > 0:
transforms = list()
rescaler = albumentations.SmallestMaxSize(max_size = size)
transforms.append(rescaler)
if not random_crop:
cropper = albumentations.CenterCrop(height=size,width=size)
transforms.append(cropper)
else:
cropper = albumentations.RandomCrop(height=size,width=size)
transforms.append(cropper)
flipper = albumentations.HorizontalFlip()
transforms.append(flipper)
preprocessor = albumentations.Compose(transforms,
additional_targets=additional_targets)
elif crop_size is not None and crop_size > 0:
if not random_crop:
cropper = albumentations.CenterCrop(height=crop_size,width=crop_size)
else:
cropper = albumentations.RandomCrop(height=crop_size,width=crop_size)
transforms = [cropper]
preprocessor = albumentations.Compose(transforms,
additional_targets=additional_targets)
else:
preprocessor = lambda **kwargs: kwargs
return preprocessor
def rgba_to_depth(x):
assert x.dtype == np.uint8
assert len(x.shape) == 3 and x.shape[2] == 4
y = x.copy()
y.dtype = np.float32
y = y.reshape(x.shape[:2])
return np.ascontiguousarray(y)
class BaseWithDepth(Dataset):
DEFAULT_DEPTH_ROOT="data/imagenet_depth"
def __init__(self, config=None, size=None, random_crop=False,
crop_size=None, root=None):
self.config = config
self.base_dset = self.get_base_dset()
self.preprocessor = get_preprocessor(
size=size,
crop_size=crop_size,
random_crop=random_crop,
additional_targets={"depth": "image"})
self.crop_size = crop_size
if self.crop_size is not None:
self.rescaler = albumentations.Compose(
[albumentations.SmallestMaxSize(max_size = self.crop_size)],
additional_targets={"depth": "image"})
if root is not None:
self.DEFAULT_DEPTH_ROOT = root
def __len__(self):
return len(self.base_dset)
def preprocess_depth(self, path):
rgba = np.array(Image.open(path))
depth = rgba_to_depth(rgba)
depth = (depth - depth.min())/max(1e-8, depth.max()-depth.min())
depth = 2.0*depth-1.0
return depth
def __getitem__(self, i):
e = self.base_dset[i]
e["depth"] = self.preprocess_depth(self.get_depth_path(e))
# up if necessary
h,w,c = e["image"].shape
if self.crop_size and min(h,w) < self.crop_size:
# have to upscale to be able to crop - this just uses bilinear
out = self.rescaler(image=e["image"], depth=e["depth"])
e["image"] = out["image"]
e["depth"] = out["depth"]
transformed = self.preprocessor(image=e["image"], depth=e["depth"])
e["image"] = transformed["image"]
e["depth"] = transformed["depth"]
return e
class ImageNetTrainWithDepth(BaseWithDepth):
# default to random_crop=True
def __init__(self, random_crop=True, sub_indices=None, **kwargs):
self.sub_indices = sub_indices
super().__init__(random_crop=random_crop, **kwargs)
def get_base_dset(self):
if self.sub_indices is None:
return ImageNetTrain()
else:
return ImageNetTrain({"sub_indices": self.sub_indices})
def get_depth_path(self, e):
fid = os.path.splitext(e["relpath"])[0]+".png"
fid = os.path.join(self.DEFAULT_DEPTH_ROOT, "train", fid)
return fid
class ImageNetValidationWithDepth(BaseWithDepth):
def __init__(self, sub_indices=None, **kwargs):
self.sub_indices = sub_indices
super().__init__(**kwargs)
def get_base_dset(self):
if self.sub_indices is None:
return ImageNetValidation()
else:
return ImageNetValidation({"sub_indices": self.sub_indices})
def get_depth_path(self, e):
fid = os.path.splitext(e["relpath"])[0]+".png"
fid = os.path.join(self.DEFAULT_DEPTH_ROOT, "val", fid)
return fid
class RINTrainWithDepth(ImageNetTrainWithDepth):
def __init__(self, config=None, size=None, random_crop=True, crop_size=None):
sub_indices = "30-32, 33-37, 151-268, 281-285, 80-100, 365-382, 389-397, 118-121, 300-319"
super().__init__(config=config, size=size, random_crop=random_crop,
sub_indices=sub_indices, crop_size=crop_size)
class RINValidationWithDepth(ImageNetValidationWithDepth):
def __init__(self, config=None, size=None, random_crop=False, crop_size=None):
sub_indices = "30-32, 33-37, 151-268, 281-285, 80-100, 365-382, 389-397, 118-121, 300-319"
super().__init__(config=config, size=size, random_crop=random_crop,
sub_indices=sub_indices, crop_size=crop_size)
class DRINExamples(Dataset):
def __init__(self):
self.preprocessor = get_preprocessor(size=256, additional_targets={"depth": "image"})
with open("data/drin_examples.txt", "r") as f:
relpaths = f.read().splitlines()
self.image_paths = [os.path.join("data/drin_images",
relpath) for relpath in relpaths]
self.depth_paths = [os.path.join("data/drin_depth",
relpath.replace(".JPEG", ".png")) for relpath in relpaths]
def __len__(self):
return len(self.image_paths)
def preprocess_image(self, image_path):
image = Image.open(image_path)
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
image = self.preprocessor(image=image)["image"]
image = (image/127.5 - 1.0).astype(np.float32)
return image
def preprocess_depth(self, path):
rgba = np.array(Image.open(path))
depth = rgba_to_depth(rgba)
depth = (depth - depth.min())/max(1e-8, depth.max()-depth.min())
depth = 2.0*depth-1.0
return depth
def __getitem__(self, i):
e = dict()
e["image"] = self.preprocess_image(self.image_paths[i])
e["depth"] = self.preprocess_depth(self.depth_paths[i])
transformed = self.preprocessor(image=e["image"], depth=e["depth"])
e["image"] = transformed["image"]
e["depth"] = transformed["depth"]
return e
def imscale(x, factor, keepshapes=False, keepmode="bicubic"):
if factor is None or factor==1:
return x
dtype = x.dtype
assert dtype in [np.float32, np.float64]
assert x.min() >= -1
assert x.max() <= 1
keepmode = {"nearest": Image.NEAREST, "bilinear": Image.BILINEAR,
"bicubic": Image.BICUBIC}[keepmode]
lr = (x+1.0)*127.5
lr = lr.clip(0,255).astype(np.uint8)
lr = Image.fromarray(lr)
h, w, _ = x.shape
nh = h//factor
nw = w//factor
assert nh > 0 and nw > 0, (nh, nw)
lr = lr.resize((nw,nh), Image.BICUBIC)
if keepshapes:
lr = lr.resize((w,h), keepmode)
lr = np.array(lr)/127.5-1.0
lr = lr.astype(dtype)
return lr
class ImageNetScale(Dataset):
def __init__(self, size=None, crop_size=None, random_crop=False,
up_factor=None, hr_factor=None, keep_mode="bicubic"):
self.base = self.get_base()
self.size = size
self.crop_size = crop_size if crop_size is not None else self.size
self.random_crop = random_crop
self.up_factor = up_factor
self.hr_factor = hr_factor
self.keep_mode = keep_mode
transforms = list()
if self.size is not None and self.size > 0:
rescaler = albumentations.SmallestMaxSize(max_size = self.size)
self.rescaler = rescaler
transforms.append(rescaler)
if self.crop_size is not None and self.crop_size > 0:
if len(transforms) == 0:
self.rescaler = albumentations.SmallestMaxSize(max_size = self.crop_size)
if not self.random_crop:
cropper = albumentations.CenterCrop(height=self.crop_size,width=self.crop_size)
else:
cropper = albumentations.RandomCrop(height=self.crop_size,width=self.crop_size)
transforms.append(cropper)
if len(transforms) > 0:
if self.up_factor is not None:
additional_targets = {"lr": "image"}
else:
additional_targets = None
self.preprocessor = albumentations.Compose(transforms,
additional_targets=additional_targets)
else:
self.preprocessor = lambda **kwargs: kwargs
def __len__(self):
return len(self.base)
def __getitem__(self, i):
example = self.base[i]
image = example["image"]
# adjust resolution
image = imscale(image, self.hr_factor, keepshapes=False)
h,w,c = image.shape
if self.crop_size and min(h,w) < self.crop_size:
# have to upscale to be able to crop - this just uses bilinear
image = self.rescaler(image=image)["image"]
if self.up_factor is None:
image = self.preprocessor(image=image)["image"]
example["image"] = image
else:
lr = imscale(image, self.up_factor, keepshapes=True,
keepmode=self.keep_mode)
out = self.preprocessor(image=image, lr=lr)
example["image"] = out["image"]
example["lr"] = out["lr"]
return example
class ImageNetScaleTrain(ImageNetScale):
def __init__(self, random_crop=True, **kwargs):
super().__init__(random_crop=random_crop, **kwargs)
def get_base(self):
return ImageNetTrain()
class ImageNetScaleValidation(ImageNetScale):
def get_base(self):
return ImageNetValidation()
from skimage.feature import canny
from skimage.color import rgb2gray
class ImageNetEdges(ImageNetScale):
def __init__(self, up_factor=1, **kwargs):
super().__init__(up_factor=1, **kwargs)
def __getitem__(self, i):
example = self.base[i]
image = example["image"]
h,w,c = image.shape
if self.crop_size and min(h,w) < self.crop_size:
# have to upscale to be able to crop - this just uses bilinear
image = self.rescaler(image=image)["image"]
lr = canny(rgb2gray(image), sigma=2)
lr = lr.astype(np.float32)
lr = lr[:,:,None][:,:,[0,0,0]]
out = self.preprocessor(image=image, lr=lr)
example["image"] = out["image"]
example["lr"] = out["lr"]
return example
class ImageNetEdgesTrain(ImageNetEdges):
def __init__(self, random_crop=True, **kwargs):
super().__init__(random_crop=random_crop, **kwargs)
def get_base(self):
return ImageNetTrain()
class ImageNetEdgesValidation(ImageNetEdges):
def get_base(self):
return ImageNetValidation()
| 20,815 | 36.237925 | 112 | py |
taming-transformers | taming-transformers-master/taming/data/coco.py | import os
import json
import albumentations
import numpy as np
from PIL import Image
from tqdm import tqdm
from torch.utils.data import Dataset
from taming.data.sflckr import SegmentationBase # for examples included in repo
class Examples(SegmentationBase):
def __init__(self, size=256, random_crop=False, interpolation="bicubic"):
super().__init__(data_csv="data/coco_examples.txt",
data_root="data/coco_images",
segmentation_root="data/coco_segmentations",
size=size, random_crop=random_crop,
interpolation=interpolation,
n_labels=183, shift_segmentation=True)
class CocoBase(Dataset):
"""needed for (image, caption, segmentation) pairs"""
def __init__(self, size=None, dataroot="", datajson="", onehot_segmentation=False, use_stuffthing=False,
crop_size=None, force_no_crop=False, given_files=None):
self.split = self.get_split()
self.size = size
if crop_size is None:
self.crop_size = size
else:
self.crop_size = crop_size
self.onehot = onehot_segmentation # return segmentation as rgb or one hot
self.stuffthing = use_stuffthing # include thing in segmentation
if self.onehot and not self.stuffthing:
raise NotImplemented("One hot mode is only supported for the "
"stuffthings version because labels are stored "
"a bit different.")
data_json = datajson
with open(data_json) as json_file:
self.json_data = json.load(json_file)
self.img_id_to_captions = dict()
self.img_id_to_filepath = dict()
self.img_id_to_segmentation_filepath = dict()
assert data_json.split("/")[-1] in ["captions_train2017.json",
"captions_val2017.json"]
if self.stuffthing:
self.segmentation_prefix = (
"data/cocostuffthings/val2017" if
data_json.endswith("captions_val2017.json") else
"data/cocostuffthings/train2017")
else:
self.segmentation_prefix = (
"data/coco/annotations/stuff_val2017_pixelmaps" if
data_json.endswith("captions_val2017.json") else
"data/coco/annotations/stuff_train2017_pixelmaps")
imagedirs = self.json_data["images"]
self.labels = {"image_ids": list()}
for imgdir in tqdm(imagedirs, desc="ImgToPath"):
self.img_id_to_filepath[imgdir["id"]] = os.path.join(dataroot, imgdir["file_name"])
self.img_id_to_captions[imgdir["id"]] = list()
pngfilename = imgdir["file_name"].replace("jpg", "png")
self.img_id_to_segmentation_filepath[imgdir["id"]] = os.path.join(
self.segmentation_prefix, pngfilename)
if given_files is not None:
if pngfilename in given_files:
self.labels["image_ids"].append(imgdir["id"])
else:
self.labels["image_ids"].append(imgdir["id"])
capdirs = self.json_data["annotations"]
for capdir in tqdm(capdirs, desc="ImgToCaptions"):
# there are in average 5 captions per image
self.img_id_to_captions[capdir["image_id"]].append(np.array([capdir["caption"]]))
self.rescaler = albumentations.SmallestMaxSize(max_size=self.size)
if self.split=="validation":
self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size)
else:
self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size)
self.preprocessor = albumentations.Compose(
[self.rescaler, self.cropper],
additional_targets={"segmentation": "image"})
if force_no_crop:
self.rescaler = albumentations.Resize(height=self.size, width=self.size)
self.preprocessor = albumentations.Compose(
[self.rescaler],
additional_targets={"segmentation": "image"})
def __len__(self):
return len(self.labels["image_ids"])
def preprocess_image(self, image_path, segmentation_path):
image = Image.open(image_path)
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
segmentation = Image.open(segmentation_path)
if not self.onehot and not segmentation.mode == "RGB":
segmentation = segmentation.convert("RGB")
segmentation = np.array(segmentation).astype(np.uint8)
if self.onehot:
assert self.stuffthing
# stored in caffe format: unlabeled==255. stuff and thing from
# 0-181. to be compatible with the labels in
# https://github.com/nightrome/cocostuff/blob/master/labels.txt
# we shift stuffthing one to the right and put unlabeled in zero
# as long as segmentation is uint8 shifting to right handles the
# latter too
assert segmentation.dtype == np.uint8
segmentation = segmentation + 1
processed = self.preprocessor(image=image, segmentation=segmentation)
image, segmentation = processed["image"], processed["segmentation"]
image = (image / 127.5 - 1.0).astype(np.float32)
if self.onehot:
assert segmentation.dtype == np.uint8
# make it one hot
n_labels = 183
flatseg = np.ravel(segmentation)
onehot = np.zeros((flatseg.size, n_labels), dtype=np.bool)
onehot[np.arange(flatseg.size), flatseg] = True
onehot = onehot.reshape(segmentation.shape + (n_labels,)).astype(int)
segmentation = onehot
else:
segmentation = (segmentation / 127.5 - 1.0).astype(np.float32)
return image, segmentation
def __getitem__(self, i):
img_path = self.img_id_to_filepath[self.labels["image_ids"][i]]
seg_path = self.img_id_to_segmentation_filepath[self.labels["image_ids"][i]]
image, segmentation = self.preprocess_image(img_path, seg_path)
captions = self.img_id_to_captions[self.labels["image_ids"][i]]
# randomly draw one of all available captions per image
caption = captions[np.random.randint(0, len(captions))]
example = {"image": image,
"caption": [str(caption[0])],
"segmentation": segmentation,
"img_path": img_path,
"seg_path": seg_path,
"filename_": img_path.split(os.sep)[-1]
}
return example
class CocoImagesAndCaptionsTrain(CocoBase):
"""returns a pair of (image, caption)"""
def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False):
super().__init__(size=size,
dataroot="data/coco/train2017",
datajson="data/coco/annotations/captions_train2017.json",
onehot_segmentation=onehot_segmentation,
use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop)
def get_split(self):
return "train"
class CocoImagesAndCaptionsValidation(CocoBase):
"""returns a pair of (image, caption)"""
def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False,
given_files=None):
super().__init__(size=size,
dataroot="data/coco/val2017",
datajson="data/coco/annotations/captions_val2017.json",
onehot_segmentation=onehot_segmentation,
use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop,
given_files=given_files)
def get_split(self):
return "validation"
| 8,121 | 44.887006 | 115 | py |
taming-transformers | taming-transformers-master/taming/data/image_transforms.py | import random
import warnings
from typing import Union
import torch
from torch import Tensor
from torchvision.transforms import RandomCrop, functional as F, CenterCrop, RandomHorizontalFlip, PILToTensor
from torchvision.transforms.functional import _get_image_size as get_image_size
from taming.data.helper_types import BoundingBox, Image
pil_to_tensor = PILToTensor()
def convert_pil_to_tensor(image: Image) -> Tensor:
with warnings.catch_warnings():
# to filter PyTorch UserWarning as described here: https://github.com/pytorch/vision/issues/2194
warnings.simplefilter("ignore")
return pil_to_tensor(image)
class RandomCrop1dReturnCoordinates(RandomCrop):
def forward(self, img: Image) -> (BoundingBox, Image):
"""
Additionally to cropping, returns the relative coordinates of the crop bounding box.
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
Bounding box: x0, y0, w, h
PIL Image or Tensor: Cropped image.
Based on:
torchvision.transforms.RandomCrop, torchvision 1.7.0
"""
if self.padding is not None:
img = F.pad(img, self.padding, self.fill, self.padding_mode)
width, height = get_image_size(img)
# pad the width if needed
if self.pad_if_needed and width < self.size[1]:
padding = [self.size[1] - width, 0]
img = F.pad(img, padding, self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and height < self.size[0]:
padding = [0, self.size[0] - height]
img = F.pad(img, padding, self.fill, self.padding_mode)
i, j, h, w = self.get_params(img, self.size)
bbox = (j / width, i / height, w / width, h / height) # x0, y0, w, h
return bbox, F.crop(img, i, j, h, w)
class Random2dCropReturnCoordinates(torch.nn.Module):
"""
Additionally to cropping, returns the relative coordinates of the crop bounding box.
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
Bounding box: x0, y0, w, h
PIL Image or Tensor: Cropped image.
Based on:
torchvision.transforms.RandomCrop, torchvision 1.7.0
"""
def __init__(self, min_size: int):
super().__init__()
self.min_size = min_size
def forward(self, img: Image) -> (BoundingBox, Image):
width, height = get_image_size(img)
max_size = min(width, height)
if max_size <= self.min_size:
size = max_size
else:
size = random.randint(self.min_size, max_size)
top = random.randint(0, height - size)
left = random.randint(0, width - size)
bbox = left / width, top / height, size / width, size / height
return bbox, F.crop(img, top, left, size, size)
class CenterCropReturnCoordinates(CenterCrop):
@staticmethod
def get_bbox_of_center_crop(width: int, height: int) -> BoundingBox:
if width > height:
w = height / width
h = 1.0
x0 = 0.5 - w / 2
y0 = 0.
else:
w = 1.0
h = width / height
x0 = 0.
y0 = 0.5 - h / 2
return x0, y0, w, h
def forward(self, img: Union[Image, Tensor]) -> (BoundingBox, Union[Image, Tensor]):
"""
Additionally to cropping, returns the relative coordinates of the crop bounding box.
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
Bounding box: x0, y0, w, h
PIL Image or Tensor: Cropped image.
Based on:
torchvision.transforms.RandomHorizontalFlip (version 1.7.0)
"""
width, height = get_image_size(img)
return self.get_bbox_of_center_crop(width, height), F.center_crop(img, self.size)
class RandomHorizontalFlipReturn(RandomHorizontalFlip):
def forward(self, img: Image) -> (bool, Image):
"""
Additionally to flipping, returns a boolean whether it was flipped or not.
Args:
img (PIL Image or Tensor): Image to be flipped.
Returns:
flipped: whether the image was flipped or not
PIL Image or Tensor: Randomly flipped image.
Based on:
torchvision.transforms.RandomHorizontalFlip (version 1.7.0)
"""
if torch.rand(1) < self.p:
return True, F.hflip(img)
return False, img
| 4,511 | 32.924812 | 109 | py |
taming-transformers | taming-transformers-master/taming/data/conditional_builder/objects_center_points.py | import math
import random
import warnings
from itertools import cycle
from typing import List, Optional, Tuple, Callable
from PIL import Image as pil_image, ImageDraw as pil_img_draw, ImageFont
from more_itertools.recipes import grouper
from taming.data.conditional_builder.utils import COLOR_PALETTE, WHITE, GRAY_75, BLACK, FULL_CROP, filter_annotations, \
additional_parameters_string, horizontally_flip_bbox, pad_list, get_circle_size, get_plot_font_size, \
absolute_bbox, rescale_annotations
from taming.data.helper_types import BoundingBox, Annotation
from taming.data.image_transforms import convert_pil_to_tensor
from torch import LongTensor, Tensor
class ObjectsCenterPointsConditionalBuilder:
def __init__(self, no_object_classes: int, no_max_objects: int, no_tokens: int, encode_crop: bool,
use_group_parameter: bool, use_additional_parameters: bool):
self.no_object_classes = no_object_classes
self.no_max_objects = no_max_objects
self.no_tokens = no_tokens
self.encode_crop = encode_crop
self.no_sections = int(math.sqrt(self.no_tokens))
self.use_group_parameter = use_group_parameter
self.use_additional_parameters = use_additional_parameters
@property
def none(self) -> int:
return self.no_tokens - 1
@property
def object_descriptor_length(self) -> int:
return 2
@property
def embedding_dim(self) -> int:
extra_length = 2 if self.encode_crop else 0
return self.no_max_objects * self.object_descriptor_length + extra_length
def tokenize_coordinates(self, x: float, y: float) -> int:
"""
Express 2d coordinates with one number.
Example: assume self.no_tokens = 16, then no_sections = 4:
0 0 0 0
0 0 # 0
0 0 0 0
0 0 0 x
Then the # position corresponds to token 6, the x position to token 15.
@param x: float in [0, 1]
@param y: float in [0, 1]
@return: discrete tokenized coordinate
"""
x_discrete = int(round(x * (self.no_sections - 1)))
y_discrete = int(round(y * (self.no_sections - 1)))
return y_discrete * self.no_sections + x_discrete
def coordinates_from_token(self, token: int) -> (float, float):
x = token % self.no_sections
y = token // self.no_sections
return x / (self.no_sections - 1), y / (self.no_sections - 1)
def bbox_from_token_pair(self, token1: int, token2: int) -> BoundingBox:
x0, y0 = self.coordinates_from_token(token1)
x1, y1 = self.coordinates_from_token(token2)
return x0, y0, x1 - x0, y1 - y0
def token_pair_from_bbox(self, bbox: BoundingBox) -> Tuple[int, int]:
return self.tokenize_coordinates(bbox[0], bbox[1]), \
self.tokenize_coordinates(bbox[0] + bbox[2], bbox[1] + bbox[3])
def inverse_build(self, conditional: LongTensor) \
-> Tuple[List[Tuple[int, Tuple[float, float]]], Optional[BoundingBox]]:
conditional_list = conditional.tolist()
crop_coordinates = None
if self.encode_crop:
crop_coordinates = self.bbox_from_token_pair(conditional_list[-2], conditional_list[-1])
conditional_list = conditional_list[:-2]
table_of_content = grouper(conditional_list, self.object_descriptor_length)
assert conditional.shape[0] == self.embedding_dim
return [
(object_tuple[0], self.coordinates_from_token(object_tuple[1]))
for object_tuple in table_of_content if object_tuple[0] != self.none
], crop_coordinates
def plot(self, conditional: LongTensor, label_for_category_no: Callable[[int], str], figure_size: Tuple[int, int],
line_width: int = 3, font_size: Optional[int] = None) -> Tensor:
plot = pil_image.new('RGB', figure_size, WHITE)
draw = pil_img_draw.Draw(plot)
circle_size = get_circle_size(figure_size)
font = ImageFont.truetype('/usr/share/fonts/truetype/lato/Lato-Regular.ttf',
size=get_plot_font_size(font_size, figure_size))
width, height = plot.size
description, crop_coordinates = self.inverse_build(conditional)
for (representation, (x, y)), color in zip(description, cycle(COLOR_PALETTE)):
x_abs, y_abs = x * width, y * height
ann = self.representation_to_annotation(representation)
label = label_for_category_no(ann.category_no) + ' ' + additional_parameters_string(ann)
ellipse_bbox = [x_abs - circle_size, y_abs - circle_size, x_abs + circle_size, y_abs + circle_size]
draw.ellipse(ellipse_bbox, fill=color, width=0)
draw.text((x_abs, y_abs), label, anchor='md', fill=BLACK, font=font)
if crop_coordinates is not None:
draw.rectangle(absolute_bbox(crop_coordinates, width, height), outline=GRAY_75, width=line_width)
return convert_pil_to_tensor(plot) / 127.5 - 1.
def object_representation(self, annotation: Annotation) -> int:
modifier = 0
if self.use_group_parameter:
modifier |= 1 * (annotation.is_group_of is True)
if self.use_additional_parameters:
modifier |= 2 * (annotation.is_occluded is True)
modifier |= 4 * (annotation.is_depiction is True)
modifier |= 8 * (annotation.is_inside is True)
return annotation.category_no + self.no_object_classes * modifier
def representation_to_annotation(self, representation: int) -> Annotation:
category_no = representation % self.no_object_classes
modifier = representation // self.no_object_classes
# noinspection PyTypeChecker
return Annotation(
area=None, image_id=None, bbox=None, category_id=None, id=None, source=None, confidence=None,
category_no=category_no,
is_group_of=bool((modifier & 1) * self.use_group_parameter),
is_occluded=bool((modifier & 2) * self.use_additional_parameters),
is_depiction=bool((modifier & 4) * self.use_additional_parameters),
is_inside=bool((modifier & 8) * self.use_additional_parameters)
)
def _crop_encoder(self, crop_coordinates: BoundingBox) -> List[int]:
return list(self.token_pair_from_bbox(crop_coordinates))
def _make_object_descriptors(self, annotations: List[Annotation]) -> List[Tuple[int, ...]]:
object_tuples = [
(self.object_representation(a),
self.tokenize_coordinates(a.bbox[0] + a.bbox[2] / 2, a.bbox[1] + a.bbox[3] / 2))
for a in annotations
]
empty_tuple = (self.none, self.none)
object_tuples = pad_list(object_tuples, empty_tuple, self.no_max_objects)
return object_tuples
def build(self, annotations: List, crop_coordinates: Optional[BoundingBox] = None, horizontal_flip: bool = False) \
-> LongTensor:
if len(annotations) == 0:
warnings.warn('Did not receive any annotations.')
if len(annotations) > self.no_max_objects:
warnings.warn('Received more annotations than allowed.')
annotations = annotations[:self.no_max_objects]
if not crop_coordinates:
crop_coordinates = FULL_CROP
random.shuffle(annotations)
annotations = filter_annotations(annotations, crop_coordinates)
if self.encode_crop:
annotations = rescale_annotations(annotations, FULL_CROP, horizontal_flip)
if horizontal_flip:
crop_coordinates = horizontally_flip_bbox(crop_coordinates)
extra = self._crop_encoder(crop_coordinates)
else:
annotations = rescale_annotations(annotations, crop_coordinates, horizontal_flip)
extra = []
object_tuples = self._make_object_descriptors(annotations)
flattened = [token for tuple_ in object_tuples for token in tuple_] + extra
assert len(flattened) == self.embedding_dim
assert all(0 <= value < self.no_tokens for value in flattened)
return LongTensor(flattened)
| 8,165 | 47.319527 | 120 | py |
taming-transformers | taming-transformers-master/taming/data/conditional_builder/objects_bbox.py | from itertools import cycle
from typing import List, Tuple, Callable, Optional
from PIL import Image as pil_image, ImageDraw as pil_img_draw, ImageFont
from more_itertools.recipes import grouper
from taming.data.image_transforms import convert_pil_to_tensor
from torch import LongTensor, Tensor
from taming.data.helper_types import BoundingBox, Annotation
from taming.data.conditional_builder.objects_center_points import ObjectsCenterPointsConditionalBuilder
from taming.data.conditional_builder.utils import COLOR_PALETTE, WHITE, GRAY_75, BLACK, additional_parameters_string, \
pad_list, get_plot_font_size, absolute_bbox
class ObjectsBoundingBoxConditionalBuilder(ObjectsCenterPointsConditionalBuilder):
@property
def object_descriptor_length(self) -> int:
return 3
def _make_object_descriptors(self, annotations: List[Annotation]) -> List[Tuple[int, ...]]:
object_triples = [
(self.object_representation(ann), *self.token_pair_from_bbox(ann.bbox))
for ann in annotations
]
empty_triple = (self.none, self.none, self.none)
object_triples = pad_list(object_triples, empty_triple, self.no_max_objects)
return object_triples
def inverse_build(self, conditional: LongTensor) -> Tuple[List[Tuple[int, BoundingBox]], Optional[BoundingBox]]:
conditional_list = conditional.tolist()
crop_coordinates = None
if self.encode_crop:
crop_coordinates = self.bbox_from_token_pair(conditional_list[-2], conditional_list[-1])
conditional_list = conditional_list[:-2]
object_triples = grouper(conditional_list, 3)
assert conditional.shape[0] == self.embedding_dim
return [
(object_triple[0], self.bbox_from_token_pair(object_triple[1], object_triple[2]))
for object_triple in object_triples if object_triple[0] != self.none
], crop_coordinates
def plot(self, conditional: LongTensor, label_for_category_no: Callable[[int], str], figure_size: Tuple[int, int],
line_width: int = 3, font_size: Optional[int] = None) -> Tensor:
plot = pil_image.new('RGB', figure_size, WHITE)
draw = pil_img_draw.Draw(plot)
font = ImageFont.truetype(
"/usr/share/fonts/truetype/lato/Lato-Regular.ttf",
size=get_plot_font_size(font_size, figure_size)
)
width, height = plot.size
description, crop_coordinates = self.inverse_build(conditional)
for (representation, bbox), color in zip(description, cycle(COLOR_PALETTE)):
annotation = self.representation_to_annotation(representation)
class_label = label_for_category_no(annotation.category_no) + ' ' + additional_parameters_string(annotation)
bbox = absolute_bbox(bbox, width, height)
draw.rectangle(bbox, outline=color, width=line_width)
draw.text((bbox[0] + line_width, bbox[1] + line_width), class_label, anchor='la', fill=BLACK, font=font)
if crop_coordinates is not None:
draw.rectangle(absolute_bbox(crop_coordinates, width, height), outline=GRAY_75, width=line_width)
return convert_pil_to_tensor(plot) / 127.5 - 1.
| 3,223 | 51.852459 | 120 | py |
ppgn | ppgn-master/settings.py | # Set this to the path to Caffe installation on your system
caffe_root = "/path/to/your/caffe/python"
gpu = True
# -------------------------------------
# The following are hard-coded and hardly change unless we change to use a different generator.
# -------------------------------------
# Generator G
generator_weights = "nets/generator/noiseless/generator.caffemodel"
generator_definition = "nets/generator/noiseless/generator.prototxt"
# input / output layers in the generator prototxt
generator_in_layer = "feat"
generator_out_layer = "deconv0"
# Encoder E
encoder_weights = "nets/caffenet/bvlc_reference_caffenet.caffemodel"
encoder_definition = "nets/caffenet/caffenet.prototxt"
# Text files
synset_file = "misc/synset_words.txt"
vocab_file = "misc/vocabulary.txt"
| 777 | 32.826087 | 95 | py |
ppgn | ppgn-master/sampling_caption.py | #!/usr/bin/env python
'''
Anh Nguyen <anh.ng8@gmail.com>
2016
'''
import os, sys
os.environ['GLOG_minloglevel'] = '2' # suprress Caffe verbose prints
import settings
sys.path.insert(0, settings.caffe_root)
import caffe
import numpy as np
from numpy.linalg import norm
import scipy.misc, scipy.io
import argparse
import util
from sampler import Sampler
if settings.gpu:
caffe.set_mode_gpu() # sampling on GPU (recommended for speed)
class CaptionConditionalSampler(Sampler):
def __init__ (self, lstm_definition, lstm_weights):
self.lstm = caffe.Net(lstm_definition, lstm_weights, caffe.TEST)
def forward_backward_from_x_to_condition(self, net, end, image, condition):
'''
Forward and backward passes through 'net', the condition model p(y|x), here an image classifier.
'''
src = net.blobs['data'] # input image
dst = net.blobs[end]
sentence = condition['sentence']
previous_word = 0
lstm_layer = "log_prob"
feature_layer = "image_features"
grad_sum = np.zeros_like(self.lstm.blobs[feature_layer].data)
probs = []
for idx, word in enumerate(sentence):
if idx > 0:
previous_word = sentence[idx - 1]
# preparing lstm feature vectors
cont = 0 if previous_word == 0 else 1
cont_input = np.array([cont])
word_input = np.array([previous_word]) # Previous word == 0 : meaning this is the start of the sentence
# 1. Get feature descriptors from fc8
net.forward(data=image, end=end)
descriptor = net.blobs[end].data
# 2. Pass this to lstm
image_features = np.zeros_like(self.lstm.blobs[feature_layer].data)
image_features[:] = descriptor
self.lstm.forward(image_features=image_features, cont_sentence=cont_input,
input_sentence=word_input, end=lstm_layer)
# Display the prediction
probs.append ( self.lstm.blobs["probs"].data[0,idx, word] )
self.lstm.blobs[lstm_layer].diff[:, :, word] = 1
diffs = self.lstm.backward(start=lstm_layer, diffs=[feature_layer])
g_word = diffs[feature_layer] # (1000,)
grad_sum += g_word # accumulate the gradient from all words
# reset objective after each step
self.lstm.blobs[lstm_layer].diff.fill(0.)
# Average softmax probabilities of all words
obj_prob = np.mean(probs)
# Backpropagate the gradient from LSTM to the feature extractor convnet
dst.diff[...] = grad_sum[0]
net.backward(start=end)
g = src.diff.copy()
dst.diff.fill(0.) # reset objective after each step
# Info to be printed out in the below 'print_progress' method
info = { }
return g, obj_prob, info
def get_label(self, condition):
return None
def print_progress(self, i, info, condition, prob, grad):
print "step: %04d\t %s [%.2f]\t norm: [%.2f]" % ( i, condition['readable'], prob, norm(grad) )
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--sentence', metavar='w', type=str, default="", nargs='?', help='Sentence to condition on')
parser.add_argument('--n_iters', metavar='iter', type=int, default=10, help='Number of sampling steps per each unit')
parser.add_argument('--threshold', metavar='w', type=float, default=-1.0, nargs='?', help='The probability threshold to decide whether to keep an image')
parser.add_argument('--save_every', metavar='save_iter', type=int, default=1, help='Save a sample every N iterations. 0 to disable saving')
parser.add_argument('--reset_every', metavar='reset_iter', type=int, default=0, help='Reset the code every N iterations')
parser.add_argument('--lr', metavar='lr', type=float, default=2.0, nargs='?', help='Learning rate')
parser.add_argument('--lr_end', metavar='lr', type=float, default=-1.0, nargs='?', help='Ending Learning rate')
parser.add_argument('--epsilon2', metavar='lr', type=float, default=1.0, nargs='?', help='Ending Learning rate')
parser.add_argument('--epsilon1', metavar='lr', type=float, default=1.0, nargs='?', help='Ending Learning rate')
parser.add_argument('--epsilon3', metavar='lr', type=float, default=1.0, nargs='?', help='Ending Learning rate')
parser.add_argument('--seed', metavar='n', type=int, default=0, nargs='?', help='Random seed')
parser.add_argument('--xy', metavar='n', type=int, default=0, nargs='?', help='Spatial position for conv units')
parser.add_argument('--opt_layer', metavar='s', type=str, help='Layer at which we optimize a code')
parser.add_argument('--act_layer', metavar='s', type=str, default="fc8", help='Layer at which we activate a neuron')
parser.add_argument('--init_file', metavar='s', type=str, default="None", help='Init image')
parser.add_argument('--write_labels', action='store_true', default=False, help='Write class labels to images')
parser.add_argument('--output_dir', metavar='b', type=str, default=".", help='Output directory for saving results')
parser.add_argument('--net_weights', metavar='b', type=str, default=settings.encoder_weights, help='Weights of the net being visualized')
parser.add_argument('--net_definition', metavar='b', type=str, default=settings.encoder_definition, help='Definition of the net being visualized')
parser.add_argument('--captioner_definition', metavar='b', type=str, help='Definition of the net being visualized')
args = parser.parse_args()
# Default to constant learning rate
if args.lr_end < 0:
args.lr_end = args.lr
# summary
print "-------------"
print " sentence: %s" % args.sentence
print " n_iters: %s" % args.n_iters
print " reset_every: %s" % args.reset_every
print " save_every: %s" % args.save_every
print " threshold: %s" % args.threshold
print " epsilon1: %s" % args.epsilon1
print " epsilon2: %s" % args.epsilon2
print " epsilon3: %s" % args.epsilon3
print " start learning rate: %s" % args.lr
print " end learning rate: %s" % args.lr_end
print " seed: %s" % args.seed
print " opt_layer: %s" % args.opt_layer
print " act_layer: %s" % args.act_layer
print " init_file: %s" % args.init_file
print "-------------"
print " output dir: %s" % args.output_dir
print " net weights: %s" % args.net_weights
print " net definition: %s" % args.net_definition
print " captioner definition: %s" % args.captioner_definition
print "-------------"
# encoder and generator for images
encoder = caffe.Net(settings.encoder_definition, settings.encoder_weights, caffe.TEST)
generator = caffe.Net(settings.generator_definition, settings.generator_weights, caffe.TEST)
# condition network, here an image classification net
# this LRCN image captioning net has 1 binary weights but 2 definitions: 1 for feature extractor (AlexNet), 1 for LSTM
net = caffe.Net(args.net_definition, args.net_weights, caffe.TEST)
# Fix the seed
np.random.seed(args.seed)
if args.init_file != "None":
start_code, start_image = get_code(encoder=encoder, path=args.init_file, layer=args.opt_layer)
print "Loaded start code: ", start_code.shape
else:
# shape of the code being optimized
shape = generator.blobs[settings.generator_in_layer].data.shape
start_code = np.random.normal(0, 1, shape)
# Split the sentence into words
words = args.sentence.split("_")
sentence = util.convert_words_into_numbers(settings.vocab_file, words)
# Condition here is the sentence
conditions = [ { "sentence": sentence, "readable": args.sentence.replace("_", " ")} ]
# Optimize a code via gradient ascent
sampler = CaptionConditionalSampler(args.captioner_definition, args.net_weights)
output_image, list_samples = sampler.sampling( condition_net=net, image_encoder=encoder, image_generator=generator,
gen_in_layer=settings.generator_in_layer, gen_out_layer=settings.generator_out_layer, start_code=start_code,
n_iters=args.n_iters, lr=args.lr, lr_end=args.lr_end, threshold=args.threshold,
layer=args.act_layer, conditions=conditions,
epsilon1=args.epsilon1, epsilon2=args.epsilon2, epsilon3=args.epsilon3,
output_dir=args.output_dir,
reset_every=args.reset_every, save_every=args.save_every)
# Output image
filename = "%s/%s_%04d_%s_h_%s_%s_%s__%s.jpg" % (
args.output_dir,
args.act_layer,
args.n_iters,
args.lr,
str(args.epsilon1),
str(args.epsilon2),
str(args.epsilon3),
args.seed
)
# Save the final image
util.save_image(output_image, filename)
print "%s/%s" % (os.getcwd(), filename)
# Write labels to images
print "Saving images..."
for p in list_samples:
img, name, label = p
util.save_image(img, name)
if args.write_labels:
util.write_label_to_img(name, label)
if __name__ == '__main__':
main()
| 9,371 | 41.6 | 157 | py |
ppgn | ppgn-master/sampler.py | #!/usr/bin/env python
'''
Anh Nguyen <anh.ng8@gmail.com>
2017
'''
import os, sys
os.environ['GLOG_minloglevel'] = '2' # suprress Caffe verbose prints
import settings
sys.path.insert(0, settings.caffe_root)
import caffe
import numpy as np
from numpy.linalg import norm
import scipy.misc, scipy.io
import util
class Sampler(object):
def backward_from_x_to_h(self, generator, diff, start, end):
'''
Backpropagate the gradient from the image (start) back to the latent space (end) of the generator network.
'''
dst = generator.blobs[end]
dst.diff[...] = diff
generator.backward(start=end)
g = generator.blobs[start].diff.copy()
dst.diff.fill(0.) # reset objective after each step
return g
def h_autoencoder_grad(self, h, encoder, decoder, gen_out_layer, topleft, inpainting):
'''
Compute the gradient of the energy of P(input) wrt input, which is given by decode(encode(input))-input {see Alain & Bengio, 2014}.
Specifically, we compute E(G(h)) - h.
Note: this is an "upside down" auto-encoder for h that goes h -> x -> h with G modeling h -> x and E modeling x -> h.
'''
generated = encoder.forward(feat=h)
x = encoder.blobs[gen_out_layer].data.copy() # 256x256
# Crop from 256x256 to 227x227
image_size = decoder.blobs['data'].shape # (1, 3, 227, 227)
cropped_x = x[:,:,topleft[0]:topleft[0]+image_size[2], topleft[1]:topleft[1]+image_size[3]]
# Mask the image when inpainting
if inpainting is not None:
cropped_x = util.apply_mask(img=cropped_x, mask=inpainting['mask'], context=inpainting['image'])
# Push this 227x227 image through net
decoder.forward(data=cropped_x)
code = decoder.blobs['fc6'].data
g = code - h
return g
def sampling( self, condition_net, image_encoder, image_generator,
gen_in_layer, gen_out_layer, start_code,
n_iters, lr, lr_end, threshold,
layer, conditions, #units=None, xy=0,
epsilon1=1, epsilon2=1, epsilon3=1e-10,
inpainting=None, # in-painting args
output_dir=None, reset_every=0, save_every=1):
# Get the input and output sizes
image_shape = condition_net.blobs['data'].data.shape
generator_output_shape = image_generator.blobs[gen_out_layer].data.shape
encoder_input_shape = image_encoder.blobs['data'].data.shape
# Calculate the difference between the input image of the condition net
# and the output image from the generator
image_size = util.get_image_size(image_shape)
generator_output_size = util.get_image_size(generator_output_shape)
encoder_input_size = util.get_image_size(encoder_input_shape)
# The top left offset to crop the output image to get a 227x227 image
topleft = util.compute_topleft(image_size, generator_output_size)
topleft_DAE = util.compute_topleft(encoder_input_size, generator_output_size)
src = image_generator.blobs[gen_in_layer] # the input feature layer of the generator
# Make sure the layer size and initial vector size match
assert src.data.shape == start_code.shape
# Variables to store the best sample
last_xx = np.zeros(image_shape) # best image
last_prob = -sys.maxint # highest probability
h = start_code.copy()
condition_idx = 0
list_samples = []
i = 0
while True:
step_size = lr + ((lr_end - lr) * i) / n_iters
condition = conditions[condition_idx] # Select a class
# 1. Compute the epsilon1 term ---
# compute gradient d log(p(h)) / dh per DAE results in Alain & Bengio 2014
d_prior = self.h_autoencoder_grad(h=h, encoder=image_generator, decoder=image_encoder, gen_out_layer=gen_out_layer, topleft=topleft_DAE, inpainting=inpainting)
# 2. Compute the epsilon2 term ---
# Push the code through the generator to get an image x
image_generator.blobs["feat"].data[:] = h
generated = image_generator.forward()
x = generated[gen_out_layer].copy() # 256x256
# Crop from 256x256 to 227x227
cropped_x = x[:,:,topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]]
cropped_x_copy = cropped_x.copy()
if inpainting is not None:
cropped_x = util.apply_mask(img=cropped_x, mask=inpainting['mask'], context=inpainting['image'])
# Forward pass the image x to the condition net up to an unit k at the given layer
# Backprop the gradient through the condition net to the image layer to get a gradient image
d_condition_x, prob, info = self.forward_backward_from_x_to_condition(net=condition_net, end=layer, image=cropped_x, condition=condition)
if inpainting is not None:
# Mask out the class gradient image
d_condition_x[:] *= inpainting["mask"]
# An additional objective for matching the context image
d_context_x256 = np.zeros_like(x.copy())
d_context_x256[:,:,topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]] = (inpainting["image"] - cropped_x_copy) * inpainting["mask_neg"]
d_context_h = self.backward_from_x_to_h(generator=image_generator, diff=d_context_x256, start=gen_in_layer, end=gen_out_layer)
# Put the gradient back in the 256x256 format
d_condition_x256 = np.zeros_like(x)
d_condition_x256[:,:,topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]] = d_condition_x.copy()
# Backpropagate the above gradient all the way to h (through generator)
# This gradient 'd_condition' is d log(p(y|h)) / dh (the epsilon2 term in Eq. 11 in the paper)
d_condition = self.backward_from_x_to_h(generator=image_generator, diff=d_condition_x256, start=gen_in_layer, end=gen_out_layer)
self.print_progress(i, info, condition, prob, d_condition)
# 3. Compute the epsilon3 term ---
noise = np.zeros_like(h)
if epsilon3 > 0:
noise = np.random.normal(0, epsilon3, h.shape) # Gaussian noise
# Update h according to Eq.11 in the paper
d_h = epsilon1 * d_prior + epsilon2 * d_condition + noise
# Plus the optional epsilon4 for matching the context region when in-painting
if inpainting is not None:
d_h += inpainting["epsilon4"] * d_context_h
h += step_size/np.abs(d_h).mean() * d_h
h = np.clip(h, a_min=0, a_max=30) # Keep the code within a realistic range
# Reset the code every N iters (for diversity when running a long sampling chain)
if reset_every > 0 and i % reset_every == 0 and i > 0:
h = np.random.normal(0, 1, h.shape)
# Experimental: For sample diversity, it's a good idea to randomly pick epsilon1 as well
epsilon1 = np.random.uniform(low=1e-6, high=1e-2)
# Save every sample
last_xx = cropped_x.copy()
last_prob = prob
# Filter samples based on threshold or every N iterations
if save_every > 0 and i % save_every == 0 and prob > threshold:
name = "%s/samples/%05d.jpg" % (output_dir, i)
label = self.get_label(condition)
list_samples.append( (last_xx.copy(), name, label) )
# Stop if grad is 0
if norm(d_h) == 0:
print " d_h is 0"
break
# Randomly sample a class every N iterations
if i > 0 and i % n_iters == 0:
condition_idx += 1
if condition_idx == len(conditions):
break
i += 1 # Next iter
# returning the last sample
print "-------------------------"
print "Last sample: prob [%s] " % last_prob
return last_xx, list_samples
| 8,319 | 41.020202 | 174 | py |
ppgn | ppgn-master/sampling_class.py | #!/usr/bin/env python
'''
Anh Nguyen <anh.ng8@gmail.com>
2016
'''
import os, sys
os.environ['GLOG_minloglevel'] = '2' # suprress Caffe verbose prints
import settings
sys.path.insert(0, settings.caffe_root)
import caffe
import numpy as np
from numpy.linalg import norm
import scipy.misc, scipy.io
import argparse
import util
from sampler import Sampler
if settings.gpu:
caffe.set_mode_gpu() # sampling on GPU
class ClassConditionalSampler(Sampler):
def __init__ (self):
# Load the list of class names
with open(settings.synset_file, 'r') as synset_file:
self.class_names = [ line.split(",")[0].split(" ", 1)[1].rstrip('\n') for line in synset_file.readlines()]
# Hard-coded list of layers that has been tested
self.fc_layers = ["fc6", "fc7", "fc8", "loss3/classifier", "fc1000", "prob"]
self.conv_layers = ["conv1", "conv2", "conv3", "conv4", "conv5"]
def forward_backward_from_x_to_condition(self, net, end, image, condition):
'''
Forward and backward passes through 'net', the condition model p(y|x), here an image classifier.
'''
unit = condition['unit']
xy = condition['xy']
dst = net.blobs[end]
acts = net.forward(data=image, end=end)
one_hot = np.zeros_like(dst.data)
# Get the activations
if end in self.fc_layers:
layer_acts = acts[end][0]
elif end in self.conv_layers:
layer_acts = acts[end][0, :, xy, xy]
best_unit = layer_acts.argmax() # highest probability unit
# Compute the softmax probs by hand because it's handy in case we want to condition on hidden units as well
exp_acts = np.exp(layer_acts - np.max(layer_acts))
probs = exp_acts / (1e-10 + np.sum(exp_acts, keepdims=True))
# The gradient of log of softmax, log(p(y|x)), reduces to:
softmax_grad = 1 - probs.copy()
obj_prob = probs.flat[unit]
# Assign the gradient
if end in self.fc_layers:
one_hot.flat[unit] = softmax_grad[unit]
elif end in self.conv_layers:
one_hot[:, unit, xy, xy] = softmax_grad[unit]
else:
raise Exception("Invalid layer type!")
dst.diff[:] = one_hot
# Backpropagate the gradient to the image layer
diffs = net.backward(start=end, diffs=['data'])
g = diffs['data'].copy()
dst.diff.fill(0.) # reset objective after each step
# Info to be printed out in the below 'print_progress' method
info = {
'best_unit': best_unit,
'best_unit_prob': probs.flat[best_unit]
}
return g, obj_prob, info
def get_label(self, condition):
unit = condition['unit']
return self.class_names[unit]
def print_progress(self, i, info, condition, prob, grad):
print "step: %04d\t max: %4s [%.2f]\t obj: %4s [%.2f]\t norm: [%.2f]" % ( i, info['best_unit'], info['best_unit_prob'], condition['unit'], prob, norm(grad) )
def get_code(encoder, path, layer, mask=None):
'''
Push the given image through an encoder (here, AlexNet) to get a code.
'''
# set up the inputs for the net:
image_size = encoder.blobs['data'].shape[2:] # (1, 3, 227, 227)
images = np.zeros_like(encoder.blobs["data"].data, dtype='float32')
in_image = scipy.misc.imread(path)
in_image = scipy.misc.imresize(in_image, (image_size[0], image_size[1]))
images[0] = np.transpose(in_image, (2, 0, 1)) # convert to (3, 227, 227) format
data = images[:,::-1] # convert from RGB to BGR
# subtract the ImageNet mean
image_mean = scipy.io.loadmat('misc/ilsvrc_2012_mean.mat')['image_mean'] # (256, 256, 3)
topleft = util.compute_topleft(image_size, image_mean.shape[:2])
image_mean = image_mean[topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]] # crop the image mean
data -= np.expand_dims(np.transpose(image_mean, (2,0,1)), 0) # mean is already BGR
if mask is not None:
data *= mask
# initialize the encoder
encoder = caffe.Net(settings.encoder_definition, settings.encoder_weights, caffe.TEST)
# extract the features
encoder.forward(data=data)
features = encoder.blobs[layer].data.copy()
return features, data
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--units', metavar='units', type=str, help='an unit to visualize e.g. [0, 999]')
parser.add_argument('--n_iters', metavar='iter', type=int, default=10, help='Number of sampling steps per each unit')
parser.add_argument('--threshold', metavar='w', type=float, default=-1.0, nargs='?', help='The probability threshold to decide whether to keep an image')
parser.add_argument('--save_every', metavar='save_iter', type=int, default=1, help='Save a sample every N iterations. 0 to disable saving')
parser.add_argument('--reset_every', metavar='reset_iter', type=int, default=0, help='Reset the code every N iterations')
parser.add_argument('--lr', metavar='lr', type=float, default=2.0, nargs='?', help='Learning rate')
parser.add_argument('--lr_end', metavar='lr', type=float, default=-1.0, nargs='?', help='Ending Learning rate')
parser.add_argument('--epsilon1', metavar='lr', type=float, default=1.0, nargs='?', help='Prior')
parser.add_argument('--epsilon2', metavar='lr', type=float, default=1.0, nargs='?', help='Condition')
parser.add_argument('--epsilon3', metavar='lr', type=float, default=1.0, nargs='?', help='Noise')
parser.add_argument('--epsilon4', metavar='lr', type=float, default=0.0, nargs='?', help='Context')
parser.add_argument('--seed', metavar='n', type=int, default=0, nargs='?', help='Random seed')
parser.add_argument('--xy', metavar='n', type=int, default=0, nargs='?', help='Spatial position for conv units')
parser.add_argument('--opt_layer', metavar='s', type=str, help='Layer at which we optimize a code')
parser.add_argument('--act_layer', metavar='s', type=str, default="fc8", help='Layer at which we activate a neuron')
parser.add_argument('--init_file', metavar='s', type=str, default="None", help='Init image')
parser.add_argument('--write_labels', action='store_true', default=False, help='Write class labels to images')
parser.add_argument('--output_dir', metavar='b', type=str, default=".", help='Output directory for saving results')
parser.add_argument('--net_weights', metavar='b', type=str, default=settings.encoder_weights, help='Weights of the net being visualized')
parser.add_argument('--net_definition', metavar='b', type=str, default=settings.encoder_definition, help='Definition of the net being visualized')
args = parser.parse_args()
# Default to constant learning rate
if args.lr_end < 0:
args.lr_end = args.lr
# summary
print "-------------"
print " units: %s xy: %s" % (args.units, args.xy)
print " n_iters: %s" % args.n_iters
print " reset_every: %s" % args.reset_every
print " save_every: %s" % args.save_every
print " threshold: %s" % args.threshold
print " epsilon1: %s" % args.epsilon1
print " epsilon2: %s" % args.epsilon2
print " epsilon3: %s" % args.epsilon3
print " epsilon4: %s" % args.epsilon4
print " start learning rate: %s" % args.lr
print " end learning rate: %s" % args.lr_end
print " seed: %s" % args.seed
print " opt_layer: %s" % args.opt_layer
print " act_layer: %s" % args.act_layer
print " init_file: %s" % args.init_file
print "-------------"
print " output dir: %s" % args.output_dir
print " net weights: %s" % args.net_weights
print " net definition: %s" % args.net_definition
print "-------------"
# encoder and generator for images
encoder = caffe.Net(settings.encoder_definition, settings.encoder_weights, caffe.TEST)
generator = caffe.Net(settings.generator_definition, settings.generator_weights, caffe.TEST)
# condition network, here an image classification net
net = caffe.Classifier(args.net_definition, args.net_weights,
mean = np.float32([104.0, 117.0, 123.0]), # ImageNet mean
channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB
# Fix the seed
np.random.seed(args.seed)
# Sampler for class-conditional generation
sampler = ClassConditionalSampler()
inpainting = None
if args.init_file != "None":
# Pre-compute masks if we want to perform inpainting
if args.epsilon4 > 0:
mask, neg = util.get_mask()
else:
neg = None
# Get the code for the masked image
start_code, start_image = get_code(encoder=encoder, path=args.init_file, layer=args.opt_layer, mask=neg)
# Package settings for in-painting experiments
if args.epsilon4 > 0:
inpainting = {
"mask" : mask,
"mask_neg" : neg,
"image" : start_image,
"epsilon4" : args.epsilon4
}
print "Loaded init code: ", start_code.shape
else:
# shape of the code being optimized
shape = generator.blobs[settings.generator_in_layer].data.shape
start_code = np.random.normal(0, 1, shape)
print ">>", np.min(start_code), np.max(start_code)
# Separate the dash-separated list of units into numbers
conditions = [ { "unit": int(u), "xy": args.xy } for u in args.units.split("_") ]
# Optimize a code via gradient ascent
output_image, list_samples = sampler.sampling( condition_net=net, image_encoder=encoder, image_generator=generator,
gen_in_layer=settings.generator_in_layer, gen_out_layer=settings.generator_out_layer, start_code=start_code,
n_iters=args.n_iters, lr=args.lr, lr_end=args.lr_end, threshold=args.threshold,
layer=args.act_layer, conditions=conditions,
epsilon1=args.epsilon1, epsilon2=args.epsilon2, epsilon3=args.epsilon3,
inpainting=inpainting,
output_dir=args.output_dir,
reset_every=args.reset_every, save_every=args.save_every)
# Output image
filename = "%s/%s_%04d_%04d_%s_h_%s_%s_%s_%s__%s.jpg" % (
args.output_dir,
args.act_layer,
conditions[0]["unit"],
args.n_iters,
args.lr,
str(args.epsilon1),
str(args.epsilon2),
str(args.epsilon3),
str(args.epsilon4),
args.seed
)
if inpainting != None:
output_image = util.stitch(start_image, output_image)
# Save the final image
util.save_image(output_image, filename)
print "%s/%s" % (os.getcwd(), filename)
# Write labels to images
print "Saving images..."
for p in list_samples:
img, name, label = p
util.save_image(img, name)
if args.write_labels:
util.write_label_to_img(name, label)
if __name__ == '__main__':
main()
| 11,251 | 40.216117 | 165 | py |
gcnn-survey-paper | gcnn-survey-paper-master/third_party/gcn/gcn/layers.py | from gcn.inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
"""Dense layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,
act=tf.nn.relu, bias=False, featureless=False, **kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# transform
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0.,
sparse_inputs=False, act=tf.nn.relu, bias=False,
featureless=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
for i in range(len(self.support)):
self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],
name='weights_' + str(i))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# convolve
supports = list()
for i in range(len(self.support)):
if not self.featureless:
pre_sup = dot(x, self.vars['weights_' + str(i)],
sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights_' + str(i)]
support = dot(self.support[i], pre_sup, sparse=True)
supports.append(support)
output = tf.add_n(supports)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
| 5,886 | 30.148148 | 92 | py |
STTS | STTS-main/MViT/setup.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from setuptools import find_packages, setup
setup(
name="slowfast",
version="1.0",
author="FAIR",
url="unknown",
description="SlowFast Video Understanding",
install_requires=[
"yacs>=0.1.6",
"pyyaml>=5.1",
"av",
"matplotlib",
"termcolor>=1.1",
"simplejson",
"tqdm",
"psutil",
"matplotlib",
"detectron2",
"opencv-python",
"pandas",
"torchvision>=0.4.2",
"pillow",
"sklearn",
"tensorboard",
"fairscale",
],
extras_require={"tensorboard_video_visualization": ["moviepy"]},
packages=find_packages(exclude=("configs", "tests")),
) | 794 | 23.090909 | 71 | py |
STTS | STTS-main/MViT/tools/visualization.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import pickle
import torch
import tqdm
import slowfast.datasets.utils as data_utils
import slowfast.utils.checkpoint as cu
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
import slowfast.utils.misc as misc
import slowfast.visualization.tensorboard_vis as tb
from slowfast.datasets import loader
from slowfast.models import build_model
from slowfast.utils.env import pathmgr
from slowfast.visualization.gradcam_utils import GradCAM
from slowfast.visualization.prediction_vis import WrongPredictionVis
from slowfast.visualization.utils import (
GetWeightAndActivation,
process_layer_index_data,
)
from slowfast.visualization.video_visualizer import VideoVisualizer
logger = logging.get_logger(__name__)
def run_visualization(vis_loader, model, cfg, writer=None):
"""
Run model visualization (weights, activations and model inputs) and visualize
them on Tensorboard.
Args:
vis_loader (loader): video visualization loader.
model (model): the video model to visualize.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
n_devices = cfg.NUM_GPUS * cfg.NUM_SHARDS
prefix = "module/" if n_devices > 1 else ""
# Get a list of selected layer names and indexing.
layer_ls, indexing_dict = process_layer_index_data(
cfg.TENSORBOARD.MODEL_VIS.LAYER_LIST, layer_name_prefix=prefix
)
logger.info("Start Model Visualization.")
# Register hooks for activations.
model_vis = GetWeightAndActivation(model, layer_ls)
if writer is not None and cfg.TENSORBOARD.MODEL_VIS.MODEL_WEIGHTS:
layer_weights = model_vis.get_weights()
writer.plot_weights_and_activations(
layer_weights, tag="Layer Weights/", heat_map=False
)
video_vis = VideoVisualizer(
cfg.MODEL.NUM_CLASSES,
cfg.TENSORBOARD.CLASS_NAMES_PATH,
cfg.TENSORBOARD.MODEL_VIS.TOPK_PREDS,
cfg.TENSORBOARD.MODEL_VIS.COLORMAP,
)
if n_devices > 1:
grad_cam_layer_ls = [
"module/" + layer
for layer in cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST
]
else:
grad_cam_layer_ls = cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST
if cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE:
gradcam = GradCAM(
model,
target_layers=grad_cam_layer_ls,
data_mean=cfg.DATA.MEAN,
data_std=cfg.DATA.STD,
colormap=cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.COLORMAP,
)
logger.info("Finish drawing weights.")
global_idx = -1
for inputs, labels, _, meta in tqdm.tqdm(vis_loader):
if cfg.NUM_GPUS:
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
if cfg.DETECTION.ENABLE:
activations, preds = model_vis.get_activations(
inputs, meta["boxes"]
)
else:
activations, preds = model_vis.get_activations(inputs)
if cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE:
if cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.USE_TRUE_LABEL:
inputs, preds = gradcam(inputs, labels=labels)
else:
inputs, preds = gradcam(inputs)
if cfg.NUM_GPUS:
inputs = du.all_gather_unaligned(inputs)
activations = du.all_gather_unaligned(activations)
preds = du.all_gather_unaligned(preds)
if isinstance(inputs[0], list):
for i in range(len(inputs)):
for j in range(len(inputs[0])):
inputs[i][j] = inputs[i][j].cpu()
else:
inputs = [inp.cpu() for inp in inputs]
preds = [pred.cpu() for pred in preds]
else:
inputs, activations, preds = [inputs], [activations], [preds]
boxes = [None] * max(n_devices, 1)
if cfg.DETECTION.ENABLE and cfg.NUM_GPUS:
boxes = du.all_gather_unaligned(meta["boxes"])
boxes = [box.cpu() for box in boxes]
if writer is not None:
total_vids = 0
for i in range(max(n_devices, 1)):
cur_input = inputs[i]
cur_activations = activations[i]
cur_batch_size = cur_input[0].shape[0]
cur_preds = preds[i]
cur_boxes = boxes[i]
for cur_batch_idx in range(cur_batch_size):
global_idx += 1
total_vids += 1
if (
cfg.TENSORBOARD.MODEL_VIS.INPUT_VIDEO
or cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE
):
for path_idx, input_pathway in enumerate(cur_input):
if cfg.TEST.DATASET == "ava" and cfg.AVA.BGR:
video = input_pathway[
cur_batch_idx, [2, 1, 0], ...
]
else:
video = input_pathway[cur_batch_idx]
if not cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE:
# Permute to (T, H, W, C) from (C, T, H, W).
video = video.permute(1, 2, 3, 0)
video = data_utils.revert_tensor_normalize(
video, cfg.DATA.MEAN, cfg.DATA.STD
)
else:
# Permute from (T, C, H, W) to (T, H, W, C)
video = video.permute(0, 2, 3, 1)
bboxes = (
None if cur_boxes is None else cur_boxes[:, 1:]
)
cur_prediction = (
cur_preds
if cfg.DETECTION.ENABLE
else cur_preds[cur_batch_idx]
)
video = video_vis.draw_clip(
video, cur_prediction, bboxes=bboxes
)
video = (
torch.from_numpy(np.array(video))
.permute(0, 3, 1, 2)
.unsqueeze(0)
)
writer.add_video(
video,
tag="Input {}/Pathway {}".format(
global_idx, path_idx + 1
),
)
if cfg.TENSORBOARD.MODEL_VIS.ACTIVATIONS:
writer.plot_weights_and_activations(
cur_activations,
tag="Input {}/Activations: ".format(global_idx),
batch_idx=cur_batch_idx,
indexing_dict=indexing_dict,
)
def perform_wrong_prediction_vis(vis_loader, model, cfg):
"""
Visualize video inputs with wrong predictions on Tensorboard.
Args:
vis_loader (loader): video visualization loader.
model (model): the video model to visualize.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
wrong_prediction_visualizer = WrongPredictionVis(cfg=cfg)
for batch_idx, (inputs, labels, _, _) in tqdm.tqdm(enumerate(vis_loader)):
if cfg.NUM_GPUS:
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
# Some model modify the original input.
inputs_clone = [inp.clone() for inp in inputs]
preds = model(inputs)
if cfg.NUM_GPUS > 1:
preds, labels = du.all_gather([preds, labels])
if isinstance(inputs_clone, (list,)):
inputs_clone = du.all_gather(inputs_clone)
else:
inputs_clone = du.all_gather([inputs_clone])[0]
if cfg.NUM_GPUS:
# Transfer the data to the current CPU device.
labels = labels.cpu()
preds = preds.cpu()
if isinstance(inputs_clone, (list,)):
for i in range(len(inputs_clone)):
inputs_clone[i] = inputs_clone[i].cpu()
else:
inputs_clone = inputs_clone.cpu()
# If using CPU (NUM_GPUS = 0), 1 represent 1 CPU.
n_devices = max(cfg.NUM_GPUS, 1)
for device_idx in range(1, n_devices + 1):
wrong_prediction_visualizer.visualize_vid(
video_input=inputs_clone,
labels=labels,
preds=preds.detach().clone(),
batch_idx=device_idx * batch_idx,
)
logger.info(
"Class indices with wrong predictions: {}".format(
sorted(wrong_prediction_visualizer.wrong_class_prediction)
)
)
wrong_prediction_visualizer.clean()
def visualize(cfg):
"""
Perform layer weights and activations visualization on the model.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
if cfg.TENSORBOARD.ENABLE and (
cfg.TENSORBOARD.MODEL_VIS.ENABLE
or cfg.TENSORBOARD.WRONG_PRED_VIS.ENABLE
):
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Print config.
logger.info("Model Visualization with config:")
logger.info(cfg)
# Build the video model and print model statistics.
model = build_model(cfg)
model.eval()
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=False)
cu.load_test_checkpoint(cfg, model)
# Create video testing loaders.
vis_loader = loader.construct_loader(cfg, "test")
if cfg.DETECTION.ENABLE:
assert cfg.NUM_GPUS == cfg.TEST.BATCH_SIZE or cfg.NUM_GPUS == 0
# Set up writer for logging to Tensorboard format.
if du.is_master_proc(cfg.NUM_GPUS * cfg.NUM_SHARDS):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
if cfg.TENSORBOARD.PREDICTIONS_PATH != "":
assert not cfg.DETECTION.ENABLE, "Detection is not supported."
logger.info(
"Visualizing class-level performance from saved results..."
)
if writer is not None:
with pathmgr.open(cfg.TENSORBOARD.PREDICTIONS_PATH, "rb") as f:
preds, labels = pickle.load(f, encoding="latin1")
writer.plot_eval(preds, labels)
if cfg.TENSORBOARD.MODEL_VIS.ENABLE:
if cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE:
assert (
not cfg.DETECTION.ENABLE
), "Detection task is currently not supported for Grad-CAM visualization."
if cfg.MODEL.ARCH in cfg.MODEL.SINGLE_PATHWAY_ARCH:
assert (
len(cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST) == 1
), "The number of chosen CNN layers must be equal to the number of pathway(s), given {} layer(s).".format(
len(cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST)
)
elif cfg.MODEL.ARCH in cfg.MODEL.MULTI_PATHWAY_ARCH:
assert (
len(cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST) == 2
), "The number of chosen CNN layers must be equal to the number of pathway(s), given {} layer(s).".format(
len(cfg.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST)
)
else:
raise NotImplementedError(
"Model arch {} is not in {}".format(
cfg.MODEL.ARCH,
cfg.MODEL.SINGLE_PATHWAY_ARCH
+ cfg.MODEL.MULTI_PATHWAY_ARCH,
)
)
logger.info(
"Visualize model analysis for {} iterations".format(
len(vis_loader)
)
)
# Run visualization on the model
run_visualization(vis_loader, model, cfg, writer)
if cfg.TENSORBOARD.WRONG_PRED_VIS.ENABLE:
logger.info(
"Visualize Wrong Predictions for {} iterations".format(
len(vis_loader)
)
)
perform_wrong_prediction_vis(vis_loader, model, cfg)
if writer is not None:
writer.close()
| 14,000 | 39.465318 | 126 | py |
STTS | STTS-main/MViT/tools/test_net.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Multi-view test a video classification model."""
import numpy as np
import os
import pickle
import torch
import torch.nn.functional as F
from einops import rearrange
import slowfast.utils.checkpoint as cu
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
import slowfast.utils.misc as misc
import slowfast.visualization.tensorboard_vis as tb
from slowfast.datasets import loader
from slowfast.models import build_model
from slowfast.utils.env import pathmgr
from slowfast.utils.meters import AVAMeter, TestMeter
logger = logging.get_logger(__name__)
@torch.no_grad()
def perform_test(test_loader, model, test_meter, cfg, writer=None):
"""
For classification:
Perform mutli-view testing that uniformly samples N clips from a video along
its temporal axis. For each clip, it takes 3 crops to cover the spatial
dimension, followed by averaging the softmax scores across all Nx3 views to
form a video-level prediction. All video predictions are compared to
ground-truth labels and the final testing performance is logged.
For detection:
Perform fully-convolutional testing on the full frames without crop.
Args:
test_loader (loader): video testing loader.
model (model): the pretrained video model to test.
test_meter (TestMeter): testing meters to log and ensemble the testing
results.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter object, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Enable eval mode.
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels, video_idx, meta) in enumerate(test_loader):
if cfg.NUM_GPUS:
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
# Transfer the data to the current GPU device.
labels = labels.cuda()
video_idx = video_idx.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
test_meter.data_toc()
if cfg.DETECTION.ENABLE:
# Compute the predictions.
preds = model(inputs, meta["boxes"])
ori_boxes = meta["ori_boxes"]
metadata = meta["metadata"]
preds = preds.detach().cpu() if cfg.NUM_GPUS else preds.detach()
ori_boxes = (
ori_boxes.detach().cpu() if cfg.NUM_GPUS else ori_boxes.detach()
)
metadata = (
metadata.detach().cpu() if cfg.NUM_GPUS else metadata.detach()
)
if cfg.NUM_GPUS > 1:
preds = torch.cat(du.all_gather_unaligned(preds), dim=0)
ori_boxes = torch.cat(du.all_gather_unaligned(ori_boxes), dim=0)
metadata = torch.cat(du.all_gather_unaligned(metadata), dim=0)
test_meter.iter_toc()
# Update and log stats.
test_meter.update_stats(preds, ori_boxes, metadata)
test_meter.log_iter_stats(None, cur_iter)
else:
# Perform the forward pass.
preds = model(inputs)
# Gather all the predictions across all the devices to perform ensemble.
if cfg.NUM_GPUS > 1:
preds, labels, video_idx = du.all_gather(
[preds, labels, video_idx]
)
if cfg.NUM_GPUS:
preds = preds.cpu()
labels = labels.cpu()
video_idx = video_idx.cpu()
test_meter.iter_toc()
# Update and log stats.
test_meter.update_stats(
preds.detach(), labels.detach(), video_idx.detach()
)
test_meter.log_iter_stats(cur_iter)
test_meter.iter_tic()
# Log epoch stats and print the final testing results.
if not cfg.DETECTION.ENABLE:
all_preds = test_meter.video_preds.clone().detach()
all_labels = test_meter.video_labels
if cfg.NUM_GPUS:
all_preds = all_preds.cpu()
all_labels = all_labels.cpu()
if writer is not None:
writer.plot_eval(preds=all_preds, labels=all_labels)
if cfg.TEST.SAVE_RESULTS_PATH != "":
save_path = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.SAVE_RESULTS_PATH)
if du.is_root_proc():
with pathmgr.open(save_path, "wb") as f:
pickle.dump([all_preds, all_labels], f)
logger.info(
"Successfully saved prediction results to {}".format(save_path)
)
test_meter.finalize_metrics()
return test_meter
def test(cfg):
"""
Perform multi-view testing on the pretrained video model.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=False)
cu.load_test_checkpoint(cfg, model)
# Create video testing loaders.
test_loader = loader.construct_loader(cfg, "test")
logger.info("Testing model for {} iterations".format(len(test_loader)))
if cfg.DETECTION.ENABLE:
assert cfg.NUM_GPUS == cfg.TEST.BATCH_SIZE or cfg.NUM_GPUS == 0
test_meter = AVAMeter(len(test_loader), cfg, mode="test")
else:
assert (
test_loader.dataset.num_videos
% (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS)
== 0
)
# Create meters for multi-view testing.
test_meter = TestMeter(
test_loader.dataset.num_videos
// (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS),
cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS,
cfg.MODEL.NUM_CLASSES,
len(test_loader),
cfg.DATA.MULTI_LABEL,
cfg.DATA.ENSEMBLE_METHOD,
)
# Set up writer for logging to Tensorboard format.
if cfg.TENSORBOARD.ENABLE and du.is_master_proc(
cfg.NUM_GPUS * cfg.NUM_SHARDS
):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
# # Perform multi-view test on the entire dataset.
test_meter = perform_test(test_loader, model, test_meter, cfg, writer)
if writer is not None:
writer.close()
| 7,234 | 34.816832 | 84 | py |
STTS | STTS-main/MViT/tools/demo_net.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import time
import torch
import tqdm
from slowfast.utils import logging
from slowfast.visualization.async_predictor import AsyncDemo, AsyncVis
from slowfast.visualization.ava_demo_precomputed_boxes import (
AVAVisualizerWithPrecomputedBox,
)
from slowfast.visualization.demo_loader import ThreadVideoManager, VideoManager
from slowfast.visualization.predictor import ActionPredictor
from slowfast.visualization.video_visualizer import VideoVisualizer
logger = logging.get_logger(__name__)
def run_demo(cfg, frame_provider):
"""
Run demo visualization.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
frame_provider (iterator): Python iterator that return task objects that are filled
with necessary information such as `frames`, `id` and `num_buffer_frames` for the
prediction and visualization pipeline.
"""
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Print config.
logger.info("Run demo with config:")
logger.info(cfg)
common_classes = (
cfg.DEMO.COMMON_CLASS_NAMES
if len(cfg.DEMO.LABEL_FILE_PATH) != 0
else None
)
video_vis = VideoVisualizer(
num_classes=cfg.MODEL.NUM_CLASSES,
class_names_path=cfg.DEMO.LABEL_FILE_PATH,
top_k=cfg.TENSORBOARD.MODEL_VIS.TOPK_PREDS,
thres=cfg.DEMO.COMMON_CLASS_THRES,
lower_thres=cfg.DEMO.UNCOMMON_CLASS_THRES,
common_class_names=common_classes,
colormap=cfg.TENSORBOARD.MODEL_VIS.COLORMAP,
mode=cfg.DEMO.VIS_MODE,
)
async_vis = AsyncVis(video_vis, n_workers=cfg.DEMO.NUM_VIS_INSTANCES)
if cfg.NUM_GPUS <= 1:
model = ActionPredictor(cfg=cfg, async_vis=async_vis)
else:
model = AsyncDemo(cfg=cfg, async_vis=async_vis)
seq_len = cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE
assert (
cfg.DEMO.BUFFER_SIZE <= seq_len // 2
), "Buffer size cannot be greater than half of sequence length."
num_task = 0
# Start reading frames.
frame_provider.start()
for able_to_read, task in frame_provider:
if not able_to_read:
break
if task is None:
time.sleep(0.02)
continue
num_task += 1
model.put(task)
try:
task = model.get()
num_task -= 1
yield task
except IndexError:
continue
while num_task != 0:
try:
task = model.get()
num_task -= 1
yield task
except IndexError:
continue
def demo(cfg):
"""
Run inference on an input video or stream from webcam.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# AVA format-specific visualization with precomputed boxes.
if cfg.DETECTION.ENABLE and cfg.DEMO.PREDS_BOXES != "":
precomputed_box_vis = AVAVisualizerWithPrecomputedBox(cfg)
precomputed_box_vis()
else:
start = time.time()
if cfg.DEMO.THREAD_ENABLE:
frame_provider = ThreadVideoManager(cfg)
else:
frame_provider = VideoManager(cfg)
for task in tqdm.tqdm(run_demo(cfg, frame_provider)):
frame_provider.display(task)
frame_provider.join()
frame_provider.clean()
logger.info("Finish demo in: {}".format(time.time() - start))
| 3,683 | 29.7 | 93 | py |
STTS | STTS-main/MViT/tools/submit.py | import argparse
import os
from pathlib import Path
import submitit
import torch
from slowfast.utils.misc import launch_job
from slowfast.utils.parser import load_config
from run_net import get_func
def parse_args():
parser = argparse.ArgumentParser(
"Submitit for onestage training", add_help=False
)
parser.add_argument(
"--num_gpus",
help="Number of GPUs",
default=8,
type=int,
)
parser.add_argument(
"--num_shards",
help="Number of Nodes",
default=1,
type=int,
)
parser.add_argument(
"--partition", default="learnfair", type=str, help="Partition where to submit"
)
parser.add_argument("--timeout", default=60 * 72, type=int, help="Duration of the job")
parser.add_argument("--cfg", dest="cfg_file", help="Path to the config file",
default="configs/test_R50_8GPU.yaml", type=str)
parser.add_argument(
"--job_dir", default="", type=str, help="Job dir. Leave empty for automatic."
)
parser.add_argument(
"--name", default="", type=str, help="Job dir. Leave empty for automatic."
)
parser.add_argument(
"--resume-from",
default="",
type=str,
help=(
"Weights to resume from (.*pth file) or a file (last_checkpoint) that contains "
+ "weight file name from the same directory"
),
)
parser.add_argument("--resume-job", default="", type=str, help="resume training from the job")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument("--postfix", default="experiment", type=str, help="Postfix of the jobs")
parser.add_argument("--mail", default="", type=str,
help="Email this user when the job finishes if specified")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
parser.add_argument(
"opts",
help="See lib/config/defaults.py for all options",
default=None,
nargs=argparse.REMAINDER,
)
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments/TopkMVIT")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def launch(shard_id, num_shards, cfg, init_method):
os.environ["NCCL_MIN_NRINGS"] = "8"
print ("Pytorch version: ", torch.__version__)
cfg.SHARD_ID = shard_id
cfg.NUM_SHARDS = num_shards
print([
shard_id, num_shards, cfg
])
train, test = get_func(cfg)
# Launch job.
if cfg.TRAIN.ENABLE:
launch_job(cfg=cfg, init_method=init_method, func=train)
if cfg.TEST.ENABLE:
launch_job(cfg=cfg, init_method=init_method, func=test)
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
socket_name = os.popen("ip r | grep default | awk '{print $5}'").read().strip('\n')
print("Setting GLOO and NCCL sockets IFNAME to: {}".format(socket_name))
os.environ["GLOO_SOCKET_IFNAME"] = socket_name
# not sure if the next line is really affect anything
os.environ["NCCL_SOCKET_IFNAME"] = socket_name
hostname_first_node = os.popen(
"scontrol show hostnames $SLURM_JOB_NODELIST"
).read().split("\n")[0]
dist_url = "tcp://{}:12399".format(hostname_first_node)
print("We will use the following dist url: {}".format(dist_url))
self._setup_gpu_args()
results = launch(
shard_id=self.args.machine_rank,
num_shards=self.args.num_shards,
cfg=load_config(self.args),
init_method=dist_url,
)
return results
def checkpoint(self):
import submitit
job_env = submitit.JobEnvironment()
slurm_job_id = job_env.job_id
if self.args.resume_job == "":
self.args.resume_job = slurm_job_id
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
job_env = submitit.JobEnvironment()
self.args.output_dir = str(Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))))
print(self.args)
self.args.machine_rank = job_env.global_rank
print(f"Process rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.name == "":
cfg_name = os.path.splitext(os.path.basename(args.cfg_file))[0]
args.name = '_'.join([cfg_name, args.postfix])
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
#executor = submitit.AutoExecutor(folder=Path(args.job_dir) / "%j", slurm_max_num_timeout=30)
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
# cluster setup is defined by environment variables
num_gpus_per_node = args.num_gpus
nodes = args.num_shards
partition = args.partition
timeout_min = args.timeout
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb,ib4'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=60 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=1,
cpus_per_task=10 * num_gpus_per_node,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
print(args.name)
executor.update_parameters(name=args.name)
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
| 6,114 | 30.040609 | 102 | py |
STTS | STTS-main/MViT/tools/train_net.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Train a video classification model."""
import numpy as np
import torch
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
import copy
import slowfast.models.losses as losses
import slowfast.models.optimizer as optim
import slowfast.utils.checkpoint as cu
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
import slowfast.utils.metrics as metrics
import slowfast.utils.misc as misc
import slowfast.visualization.tensorboard_vis as tb
from slowfast.datasets import loader
from slowfast.datasets.mixup import MixUp
from slowfast.models import build_model
from slowfast.utils.meters import AVAMeter, EpochTimer, TrainMeter, ValMeter
from slowfast.utils.multigrid import MultigridSchedule
logger = logging.get_logger(__name__)
def train_epoch(
train_loader,
model,
loss_fun,
optimizer,
scaler,
train_meter,
cur_epoch,
total_epochs,
cfg,
writer=None,
):
"""
Perform the video training for one epoch.
Args:
train_loader (loader): video training loader.
model (model): the video model to train.
optimizer (optim): the optimizer to perform optimization on the model's
parameters.
train_meter (TrainMeter): training meters to log the training performance.
cur_epoch (int): current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Enable train mode.
model.train()
train_meter.iter_tic()
data_size = len(train_loader)
if hasattr(model, "module"):
model_noddp = model.module
else:
model_noddp = model
total_steps = data_size * total_epochs
if cfg.MIXUP.ENABLE:
mixup_fn = MixUp(
mixup_alpha=cfg.MIXUP.ALPHA,
cutmix_alpha=cfg.MIXUP.CUTMIX_ALPHA,
mix_prob=cfg.MIXUP.PROB,
switch_prob=cfg.MIXUP.SWITCH_PROB,
label_smoothing=cfg.MIXUP.LABEL_SMOOTH_VALUE,
num_classes=cfg.MODEL.NUM_CLASSES,
)
for cur_iter, (inputs, labels, _, meta) in enumerate(train_loader):
# Transfer the data to the current GPU device.
if cfg.NUM_GPUS:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
# Update the learning rate.
lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
optim.set_lr(optimizer, lr, cfg)
train_meter.data_toc()
if cfg.MIXUP.ENABLE:
samples, labels = mixup_fn(inputs[0], labels)
inputs[0] = samples
cur_step = cur_epoch * data_size + cur_iter
if hasattr(model_noddp, 'update_sigma') and cfg.MVIT.DECAY_SIGMA:
model_noddp.update_sigma(cur_step, total_steps)
with torch.cuda.amp.autocast(enabled=cfg.TRAIN.MIXED_PRECISION):
if cfg.DETECTION.ENABLE:
preds = model(inputs, meta["boxes"])
else:
preds = model(inputs)
# Compute the loss.
loss = loss_fun(preds, labels)
# check Nan Loss.
misc.check_nan_losses(loss)
# Perform the backward pass.
optimizer.zero_grad()
scaler.scale(loss).backward()
# Unscales the gradients of optimizer's assigned params in-place
scaler.unscale_(optimizer)
# Clip gradients if necessary
if cfg.SOLVER.CLIP_GRAD_VAL:
torch.nn.utils.clip_grad_value_(
model.parameters(), cfg.SOLVER.CLIP_GRAD_VAL
)
elif cfg.SOLVER.CLIP_GRAD_L2NORM:
torch.nn.utils.clip_grad_norm_(
model.parameters(), cfg.SOLVER.CLIP_GRAD_L2NORM
)
# Update the parameters.
scaler.step(optimizer)
scaler.update()
if isinstance(preds, (tuple,)):
preds = preds[0]
if cfg.MIXUP.ENABLE:
_top_max_k_vals, top_max_k_inds = torch.topk(
labels, 2, dim=1, largest=True, sorted=True
)
idx_top1 = torch.arange(labels.shape[0]), top_max_k_inds[:, 0]
idx_top2 = torch.arange(labels.shape[0]), top_max_k_inds[:, 1]
preds = preds.detach()
preds[idx_top1] += preds[idx_top2]
preds[idx_top2] = 0.0
labels = top_max_k_inds[:, 0]
if cfg.DETECTION.ENABLE:
if cfg.NUM_GPUS > 1:
loss = du.all_reduce([loss])[0]
loss = loss.item()
# Update and log stats.
train_meter.update_stats(None, None, None, loss, lr)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Train/loss": loss, "Train/lr": lr},
global_step=data_size * cur_epoch + cur_iter,
)
else:
top1_err, top5_err = None, None
if cfg.DATA.MULTI_LABEL:
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
[loss] = du.all_reduce([loss])
loss = loss.item()
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point).
loss, top1_err, top5_err = (
loss.item(),
top1_err.item(),
top5_err.item(),
)
# Update and log stats.
train_meter.update_stats(
top1_err,
top5_err,
loss,
lr,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{
"Train/loss": loss,
"Train/lr": lr,
"Train/Top1_err": top1_err,
"Train/Top5_err": top5_err,
},
global_step=data_size * cur_epoch + cur_iter,
)
train_meter.iter_toc() # measure allreduce for this meter
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats.
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
@torch.no_grad()
def eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer=None):
"""
Evaluate the model on the val set.
Args:
val_loader (loader): data loader to provide validation data.
model (model): model to evaluate the performance.
val_meter (ValMeter): meter instance to record and calculate the metrics.
cur_epoch (int): number of the current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Evaluation mode enabled. The running stats would not be updated.
model.eval()
val_meter.iter_tic()
if hasattr(model, "module"):
model_noddp = model.module
else:
model_noddp = model
for cur_iter, (inputs, labels, _, meta) in enumerate(val_loader):
if cfg.NUM_GPUS:
# Transferthe data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
val_meter.data_toc()
if hasattr(model_noddp, 'set_warm'):
if cfg.MVIT.USE_WARMUP and (cur_epoch < cfg.MVIT.SCORE_WARMUP_EPOCH):
model_noddp.set_warm(flag=True)
else:
model_noddp.set_warm(flag=False)
if hasattr(model_noddp, 'update_scale_rate') and cfg.MVIT.CURRICULUM:
model_noddp.update_scale_rate(1.0)
if cfg.DETECTION.ENABLE:
# Compute the predictions.
preds = model(inputs, meta["boxes"])
ori_boxes = meta["ori_boxes"]
metadata = meta["metadata"]
if cfg.NUM_GPUS:
preds = preds.cpu()
ori_boxes = ori_boxes.cpu()
metadata = metadata.cpu()
if cfg.NUM_GPUS > 1:
preds = torch.cat(du.all_gather_unaligned(preds), dim=0)
ori_boxes = torch.cat(du.all_gather_unaligned(ori_boxes), dim=0)
metadata = torch.cat(du.all_gather_unaligned(metadata), dim=0)
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(preds, ori_boxes, metadata)
else:
preds = model(inputs)
if cfg.DATA.MULTI_LABEL:
if cfg.NUM_GPUS > 1:
preds, labels = du.all_gather([preds, labels])
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
# Combine the errors across the GPUs.
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point).
top1_err, top5_err = top1_err.item(), top5_err.item()
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(
top1_err,
top5_err,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Val/Top1_err": top1_err, "Val/Top5_err": top5_err},
global_step=len(val_loader) * cur_epoch + cur_iter,
)
val_meter.update_predictions(preds, labels)
val_meter.log_iter_stats(cur_epoch, cur_iter)
val_meter.iter_tic()
# Log epoch stats.
val_meter.log_epoch_stats(cur_epoch)
# write to tensorboard format if available.
if writer is not None:
if cfg.DETECTION.ENABLE:
writer.add_scalars(
{"Val/mAP": val_meter.full_map}, global_step=cur_epoch
)
else:
all_preds = [pred.clone().detach() for pred in val_meter.all_preds]
all_labels = [
label.clone().detach() for label in val_meter.all_labels
]
if cfg.NUM_GPUS:
all_preds = [pred.cpu() for pred in all_preds]
all_labels = [label.cpu() for label in all_labels]
writer.plot_eval(
preds=all_preds, labels=all_labels, global_step=cur_epoch
)
val_meter.reset()
def calculate_and_update_precise_bn(loader, model, num_iters=200, use_gpu=True):
"""
Update the stats in bn layers by calculate the precise stats.
Args:
loader (loader): data loader to provide training data.
model (model): model to update the bn stats.
num_iters (int): number of iterations to compute and update the bn stats.
use_gpu (bool): whether to use GPU or not.
"""
def _gen_loader():
for inputs, *_ in loader:
if use_gpu:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
yield inputs
# Update the bn stats.
update_bn_stats(model, _gen_loader(), num_iters)
def build_trainer(cfg):
"""
Build training model and its associated tools, including optimizer,
dataloaders and meters.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
Returns:
model (nn.Module): training model.
optimizer (Optimizer): optimizer.
train_loader (DataLoader): training data loader.
val_loader (DataLoader): validatoin data loader.
precise_bn_loader (DataLoader): training data loader for computing
precise BN.
train_meter (TrainMeter): tool for measuring training stats.
val_meter (ValMeter): tool for measuring validation stats.
"""
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = loader.construct_loader(
cfg, "train", is_precise_bn=True
)
# Create meters.
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
return (
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
)
def train(cfg):
"""
Train a video model for many epochs on train set and evaluate it on val set.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Init multigrid.
multigrid = None
if cfg.MULTIGRID.LONG_CYCLE or cfg.MULTIGRID.SHORT_CYCLE:
multigrid = MultigridSchedule()
cfg = multigrid.init_multigrid(cfg)
if cfg.MULTIGRID.LONG_CYCLE:
cfg, _ = multigrid.update_long_cycle(cfg, cur_epoch=0)
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
loss_fun = losses.get_loss_func(cfg.MODEL.LOSS_FUNC)(
reduction="mean"
)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Create a GradScaler for mixed precision training
scaler = torch.cuda.amp.GradScaler(enabled=cfg.TRAIN.MIXED_PRECISION)
# Load a checkpoint to resume training if applicable.
if not cfg.TRAIN.FINETUNE:
start_epoch = cu.load_train_checkpoint(cfg, model, optimizer, scaler if cfg.TRAIN.MIXED_PRECISION else None)
else:
if cfg.TRAIN.AUTO_RESUME and cu.has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = cu.get_last_checkpoint(cfg.OUTPUT_DIR)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, cfg.NUM_GPUS > 1,
optimizer, scaler if cfg.TRAIN.MIXED_PRECISION else None)
start_epoch = checkpoint_epoch + 1
else:
start_epoch = 0
cu.load_checkpoint(cfg.TRAIN.CHECKPOINT_FILE_PATH, model)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = (
loader.construct_loader(cfg, "train", is_precise_bn=True)
if cfg.BN.USE_PRECISE_STATS
else None
)
# Create meters.
if cfg.DETECTION.ENABLE:
train_meter = AVAMeter(len(train_loader), cfg, mode="train")
val_meter = AVAMeter(len(val_loader), cfg, mode="val")
else:
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
# set up writer for logging to Tensorboard format.
if cfg.TENSORBOARD.ENABLE and du.is_master_proc(
cfg.NUM_GPUS * cfg.NUM_SHARDS
):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
# Perform the training loop.
logger.info("Start epoch: {}".format(start_epoch + 1))
total_epochs = cfg.SOLVER.MAX_EPOCH
epoch_timer = EpochTimer()
for cur_epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):
if cfg.MULTIGRID.LONG_CYCLE:
cfg, changed = multigrid.update_long_cycle(cfg, cur_epoch)
if changed:
(
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
) = build_trainer(cfg)
# Load checkpoint.
if cu.has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = cu.get_last_checkpoint(cfg.OUTPUT_DIR)
assert "{:05d}.pyth".format(cur_epoch) in last_checkpoint
else:
last_checkpoint = cfg.TRAIN.CHECKPOINT_FILE_PATH
logger.info("Load from {}".format(last_checkpoint))
cu.load_checkpoint(
last_checkpoint, model, cfg.NUM_GPUS > 1, optimizer
)
# Shuffle the dataset.
loader.shuffle_dataset(train_loader, cur_epoch)
# Train for one epoch.
epoch_timer.epoch_tic()
train_epoch(
train_loader,
model,
loss_fun,
optimizer,
scaler,
train_meter,
cur_epoch,
total_epochs,
cfg,
writer,
)
epoch_timer.epoch_toc()
logger.info(
f"Epoch {cur_epoch} takes {epoch_timer.last_epoch_time():.2f}s. Epochs "
f"from {start_epoch} to {cur_epoch} take "
f"{epoch_timer.avg_epoch_time():.2f}s in average and "
f"{epoch_timer.median_epoch_time():.2f}s in median."
)
logger.info(
f"For epoch {cur_epoch}, each iteraction takes "
f"{epoch_timer.last_epoch_time()/len(train_loader):.2f}s in average. "
f"From epoch {start_epoch} to {cur_epoch}, each iteraction takes "
f"{epoch_timer.avg_epoch_time()/len(train_loader):.2f}s in average."
)
is_checkp_epoch = cu.is_checkpoint_epoch(
cfg,
cur_epoch,
None if multigrid is None else multigrid.schedule,
)
is_eval_epoch = misc.is_eval_epoch(
cfg, cur_epoch, None if multigrid is None else multigrid.schedule
)
# Compute precise BN stats.
if (
(is_checkp_epoch or is_eval_epoch)
and cfg.BN.USE_PRECISE_STATS
and len(get_bn_modules(model)) > 0
):
calculate_and_update_precise_bn(
precise_bn_loader,
model,
min(cfg.BN.NUM_BATCHES_PRECISE, len(precise_bn_loader)),
cfg.NUM_GPUS > 0,
)
_ = misc.aggregate_sub_bn_stats(model)
# Save a checkpoint.
if is_checkp_epoch:
cu.save_checkpoint(
cfg.OUTPUT_DIR,
model,
optimizer,
cur_epoch,
cfg,
scaler if cfg.TRAIN.MIXED_PRECISION else None,
)
# Evaluate the model on validation set.
if is_eval_epoch:
eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer)
if writer is not None:
writer.close()
| 21,443 | 34.562189 | 116 | py |
STTS | STTS-main/MViT/slowfast/config/defaults.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Configs."""
from fvcore.common.config import CfgNode
from . import custom_config
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CfgNode()
# ---------------------------------------------------------------------------- #
# Batch norm options
# ---------------------------------------------------------------------------- #
_C.BN = CfgNode()
# Precise BN stats.
_C.BN.USE_PRECISE_STATS = False
# Number of samples use to compute precise bn.
_C.BN.NUM_BATCHES_PRECISE = 200
# Weight decay value that applies on BN.
_C.BN.WEIGHT_DECAY = 0.0
# Norm type, options include `batchnorm`, `sub_batchnorm`, `sync_batchnorm`
_C.BN.NORM_TYPE = "batchnorm"
# Parameter for SubBatchNorm, where it splits the batch dimension into
# NUM_SPLITS splits, and run BN on each of them separately independently.
_C.BN.NUM_SPLITS = 1
# Parameter for NaiveSyncBatchNorm3d, where the stats across `NUM_SYNC_DEVICES`
# devices will be synchronized.
_C.BN.NUM_SYNC_DEVICES = 1
# ---------------------------------------------------------------------------- #
# Training options.
# ---------------------------------------------------------------------------- #
_C.TRAIN = CfgNode()
# If True Train the model, else skip training.
_C.TRAIN.ENABLE = True
# Dataset.
_C.TRAIN.DATASET = "kinetics"
# Total mini-batch size.
_C.TRAIN.BATCH_SIZE = 64
# Evaluate model on test data every eval period epochs.
_C.TRAIN.EVAL_PERIOD = 10
# Save model checkpoint every checkpoint period epochs.
_C.TRAIN.CHECKPOINT_PERIOD = 10
_C.TRAIN.TRAIN_TOPK_ONLY = False
# Resume training from the latest checkpoint in the output directory.
_C.TRAIN.AUTO_RESUME = True
_C.TRAIN.FINETUNE = True
# Path to the checkpoint to load the initial weight.
_C.TRAIN.CHECKPOINT_FILE_PATH = ""
# Checkpoint types include `caffe2` or `pytorch`.
_C.TRAIN.CHECKPOINT_TYPE = "pytorch"
# If True, perform inflation when loading checkpoint.
_C.TRAIN.CHECKPOINT_INFLATE = False
# If True, reset epochs when loading checkpoint.
_C.TRAIN.CHECKPOINT_EPOCH_RESET = False
# If set, clear all layer names according to the pattern provided.
_C.TRAIN.CHECKPOINT_CLEAR_NAME_PATTERN = () # ("backbone.",)
# If True, use FP16 for activations
_C.TRAIN.MIXED_PRECISION = False
# ---------------------------------------------------------------------------- #
# Augmentation options.
# ---------------------------------------------------------------------------- #
_C.AUG = CfgNode()
# Whether to enable randaug.
_C.AUG.ENABLE = False
# Number of repeated augmentations to used during training.
# If this is greater than 1, then the actual batch size is
# TRAIN.BATCH_SIZE * AUG.NUM_SAMPLE.
_C.AUG.NUM_SAMPLE = 1
# Not used if using randaug.
_C.AUG.COLOR_JITTER = 0.4
# RandAug parameters.
_C.AUG.AA_TYPE = "rand-m9-mstd0.5-inc1"
# Interpolation method.
_C.AUG.INTERPOLATION = "bicubic"
# Probability of random erasing.
_C.AUG.RE_PROB = 0.25
# Random erasing mode.
_C.AUG.RE_MODE = "pixel"
# Random erase count.
_C.AUG.RE_COUNT = 1
# Do not random erase first (clean) augmentation split.
_C.AUG.RE_SPLIT = False
# ---------------------------------------------------------------------------- #
# MipUp options.
# ---------------------------------------------------------------------------- #
_C.MIXUP = CfgNode()
# Whether to use mixup.
_C.MIXUP.ENABLE = False
# Mixup alpha.
_C.MIXUP.ALPHA = 0.8
# Cutmix alpha.
_C.MIXUP.CUTMIX_ALPHA = 1.0
# Probability of performing mixup or cutmix when either/both is enabled.
_C.MIXUP.PROB = 1.0
# Probability of switching to cutmix when both mixup and cutmix enabled.
_C.MIXUP.SWITCH_PROB = 0.5
# Label smoothing.
_C.MIXUP.LABEL_SMOOTH_VALUE = 0.1
# ---------------------------------------------------------------------------- #
# Testing options
# ---------------------------------------------------------------------------- #
_C.TEST = CfgNode()
# If True test the model, else skip the testing.
_C.TEST.ENABLE = True
# Dataset for testing.
_C.TEST.DATASET = "kinetics"
# Total mini-batch size
_C.TEST.BATCH_SIZE = 8
# Path to the checkpoint to load the initial weight.
_C.TEST.CHECKPOINT_FILE_PATH = ""
# Number of clips to sample from a video uniformly for aggregating the
# prediction results.
_C.TEST.NUM_ENSEMBLE_VIEWS = 10
# Number of crops to sample from a frame spatially for aggregating the
# prediction results.
_C.TEST.NUM_SPATIAL_CROPS = 3
_C.TEST.SUBSET = "full"
# Checkpoint types include `caffe2` or `pytorch`.
_C.TEST.CHECKPOINT_TYPE = "pytorch"
# Path to saving prediction results file.
_C.TEST.SAVE_RESULTS_PATH = ""
# -----------------------------------------------------------------------------
# ResNet options
# -----------------------------------------------------------------------------
_C.RESNET = CfgNode()
# Transformation function.
_C.RESNET.TRANS_FUNC = "bottleneck_transform"
# Number of groups. 1 for ResNet, and larger than 1 for ResNeXt).
_C.RESNET.NUM_GROUPS = 1
# Width of each group (64 -> ResNet; 4 -> ResNeXt).
_C.RESNET.WIDTH_PER_GROUP = 64
# Apply relu in a inplace manner.
_C.RESNET.INPLACE_RELU = True
# Apply stride to 1x1 conv.
_C.RESNET.STRIDE_1X1 = False
# If true, initialize the gamma of the final BN of each block to zero.
_C.RESNET.ZERO_INIT_FINAL_BN = False
# Number of weight layers.
_C.RESNET.DEPTH = 50
# If the current block has more than NUM_BLOCK_TEMP_KERNEL blocks, use temporal
# kernel of 1 for the rest of the blocks.
_C.RESNET.NUM_BLOCK_TEMP_KERNEL = [[3], [4], [6], [3]]
# Size of stride on different res stages.
_C.RESNET.SPATIAL_STRIDES = [[1], [2], [2], [2]]
# Size of dilation on different res stages.
_C.RESNET.SPATIAL_DILATIONS = [[1], [1], [1], [1]]
# ---------------------------------------------------------------------------- #
# X3D options
# See https://arxiv.org/abs/2004.04730 for details about X3D Networks.
# ---------------------------------------------------------------------------- #
_C.X3D = CfgNode()
# Width expansion factor.
_C.X3D.WIDTH_FACTOR = 1.0
# Depth expansion factor.
_C.X3D.DEPTH_FACTOR = 1.0
# Bottleneck expansion factor for the 3x3x3 conv.
_C.X3D.BOTTLENECK_FACTOR = 1.0 #
# Dimensions of the last linear layer before classificaiton.
_C.X3D.DIM_C5 = 2048
# Dimensions of the first 3x3 conv layer.
_C.X3D.DIM_C1 = 12
# Whether to scale the width of Res2, default is false.
_C.X3D.SCALE_RES2 = False
# Whether to use a BatchNorm (BN) layer before the classifier, default is false.
_C.X3D.BN_LIN5 = False
# Whether to use channelwise (=depthwise) convolution in the center (3x3x3)
# convolution operation of the residual blocks.
_C.X3D.CHANNELWISE_3x3x3 = True
# -----------------------------------------------------------------------------
# Nonlocal options
# -----------------------------------------------------------------------------
_C.NONLOCAL = CfgNode()
# Index of each stage and block to add nonlocal layers.
_C.NONLOCAL.LOCATION = [[[]], [[]], [[]], [[]]]
# Number of group for nonlocal for each stage.
_C.NONLOCAL.GROUP = [[1], [1], [1], [1]]
# Instatiation to use for non-local layer.
_C.NONLOCAL.INSTANTIATION = "dot_product"
# Size of pooling layers used in Non-Local.
_C.NONLOCAL.POOL = [
# Res2
[[1, 2, 2], [1, 2, 2]],
# Res3
[[1, 2, 2], [1, 2, 2]],
# Res4
[[1, 2, 2], [1, 2, 2]],
# Res5
[[1, 2, 2], [1, 2, 2]],
]
# -----------------------------------------------------------------------------
# Model options
# -----------------------------------------------------------------------------
_C.MODEL = CfgNode()
# Model architecture.
_C.MODEL.ARCH = "slowfast"
# Model name
_C.MODEL.MODEL_NAME = "SlowFast"
# The number of classes to predict for the model.
_C.MODEL.NUM_CLASSES = 400
# Loss function.
_C.MODEL.LOSS_FUNC = "cross_entropy"
# Model architectures that has one single pathway.
_C.MODEL.SINGLE_PATHWAY_ARCH = ["2d", "c2d", "i3d", "slow", "x3d", "mvit"]
# Model architectures that has multiple pathways.
_C.MODEL.MULTI_PATHWAY_ARCH = ["slowfast"]
# Dropout rate before final projection in the backbone.
_C.MODEL.DROPOUT_RATE = 0.5
# Randomly drop rate for Res-blocks, linearly increase from res2 to res5
_C.MODEL.DROPCONNECT_RATE = 0.0
# The std to initialize the fc layer(s).
_C.MODEL.FC_INIT_STD = 0.01
# Activation layer for the output head.
_C.MODEL.HEAD_ACT = "softmax"
# Activation checkpointing enabled or not to save GPU memory.
_C.MODEL.ACT_CHECKPOINT = False
# -----------------------------------------------------------------------------
# MViT options
# -----------------------------------------------------------------------------
_C.MVIT = CfgNode()
# Options include `conv`, `max`.
_C.MVIT.MODE = "conv"
# If True, perform pool before projection in attention.
_C.MVIT.POOL_FIRST = False
# If True, use cls embed in the network, otherwise don't use cls_embed in transformer.
_C.MVIT.CLS_EMBED_ON = True
# Kernel size for patchtification.
_C.MVIT.PATCH_KERNEL = [3, 7, 7]
# Stride size for patchtification.
_C.MVIT.PATCH_STRIDE = [2, 4, 4]
_C.MVIT.DECAY_SIGMA = True
_C.MVIT.SIGMA = 0.05
_C.MVIT.TIME_PRUNING_LOC = None
_C.MVIT.SPACE_PRUNING_LOC = None
_C.MVIT.TIME_SCORE = 'tpool'
_C.MVIT.SPACE_SCORE = 'spatch'
_C.MVIT.TIME_LEFT_RATIO = [0.5]
_C.MVIT.SPACE_LEFT_RATIO = [0.5105]
# Padding size for patchtification.
_C.MVIT.PATCH_PADDING = [2, 4, 4]
# If True, use 2d patch, otherwise use 3d patch.
_C.MVIT.PATCH_2D = False
# Base embedding dimension for the transformer.
_C.MVIT.EMBED_DIM = 96
# Base num of heads for the transformer.
_C.MVIT.NUM_HEADS = 1
# Dimension reduction ratio for the MLP layers.
_C.MVIT.MLP_RATIO = 4.0
# If use, use bias term in attention fc layers.
_C.MVIT.QKV_BIAS = True
# Drop path rate for the tranfomer.
_C.MVIT.DROPPATH_RATE = 0.1
# Depth of the transformer.
_C.MVIT.DEPTH = 16
# Normalization layer for the transformer. Only layernorm is supported now.
_C.MVIT.NORM = "layernorm"
# Dimension multiplication at layer i. If 2.0 is used, then the next block will increase
# the dimension by 2 times. Format: [depth_i: mul_dim_ratio]
_C.MVIT.DIM_MUL = []
# Head number multiplication at layer i. If 2.0 is used, then the next block will
# increase the number of heads by 2 times. Format: [depth_i: head_mul_ratio]
_C.MVIT.HEAD_MUL = []
# Stride size for the Pool KV at layer i.
# Format: [[i, stride_t_i, stride_h_i, stride_w_i], ...,]
_C.MVIT.POOL_KV_STRIDE = None
# Initial stride size for KV at layer 1. The stride size will be further reduced with
# the raio of MVIT.DIM_MUL. If will overwrite MVIT.POOL_KV_STRIDE if not None.
_C.MVIT.POOL_KV_STRIDE_ADAPTIVE = None
# Stride size for the Pool Q at layer i.
# Format: [[i, stride_t_i, stride_h_i, stride_w_i], ...,]
_C.MVIT.POOL_Q_STRIDE = []
# If not None, overwrite the KV_KERNEL and Q_KERNEL size with POOL_KVQ_CONV_SIZ.
# Otherwise the kernel_size is [s + 1 if s > 1 else s for s in stride_size].
_C.MVIT.POOL_KVQ_KERNEL = None
# If True, perform no decay on positional embedding and cls embedding.
_C.MVIT.ZERO_DECAY_POS_CLS = True
# If True, use norm after stem.
_C.MVIT.NORM_STEM = False
# If True, perform separate positional embedding.
_C.MVIT.SEP_POS_EMBED = False
# Dropout rate for the MViT backbone.
_C.MVIT.DROPOUT_RATE = 0.0
# -----------------------------------------------------------------------------
# SlowFast options
# -----------------------------------------------------------------------------
_C.SLOWFAST = CfgNode()
# Corresponds to the inverse of the channel reduction ratio, $\beta$ between
# the Slow and Fast pathways.
_C.SLOWFAST.BETA_INV = 8
# Corresponds to the frame rate reduction ratio, $\alpha$ between the Slow and
# Fast pathways.
_C.SLOWFAST.ALPHA = 8
# Ratio of channel dimensions between the Slow and Fast pathways.
_C.SLOWFAST.FUSION_CONV_CHANNEL_RATIO = 2
# Kernel dimension used for fusing information from Fast pathway to Slow
# pathway.
_C.SLOWFAST.FUSION_KERNEL_SZ = 5
# -----------------------------------------------------------------------------
# Data options
# -----------------------------------------------------------------------------
_C.DATA = CfgNode()
# The path to the data directory.
_C.DATA.PATH_TO_DATA_DIR = ""
# The separator used between path and label.
_C.DATA.PATH_LABEL_SEPARATOR = ","
# Video path prefix if any.
_C.DATA.PATH_PREFIX = ""
# The number of frames of the input clip.
_C.DATA.NUM_FRAMES = 8
# The video sampling rate of the input clip.
_C.DATA.SAMPLING_RATE = 8
# Eigenvalues for PCA jittering. Note PCA is RGB based.
_C.DATA.TRAIN_PCA_EIGVAL = [0.225, 0.224, 0.229]
# Eigenvectors for PCA jittering.
_C.DATA.TRAIN_PCA_EIGVEC = [
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
]
# If a imdb have been dumpped to a local file with the following format:
# `{"im_path": im_path, "class": cont_id}`
# then we can skip the construction of imdb and load it from the local file.
_C.DATA.PATH_TO_PRELOAD_IMDB = ""
# The mean value of the video raw pixels across the R G B channels.
_C.DATA.MEAN = [0.45, 0.45, 0.45]
# List of input frame channel dimensions.
_C.DATA.INPUT_CHANNEL_NUM = [3, 3]
# The std value of the video raw pixels across the R G B channels.
_C.DATA.STD = [0.225, 0.225, 0.225]
# The spatial augmentation jitter scales for training.
_C.DATA.TRAIN_JITTER_SCALES = [256, 320]
# The relative scale range of Inception-style area based random resizing augmentation.
# If this is provided, DATA.TRAIN_JITTER_SCALES above is ignored.
_C.DATA.TRAIN_JITTER_SCALES_RELATIVE = []
# The relative aspect ratio range of Inception-style area based random resizing
# augmentation.
_C.DATA.TRAIN_JITTER_ASPECT_RELATIVE = []
# If True, perform stride length uniform temporal sampling.
_C.DATA.USE_OFFSET_SAMPLING = False
# Whether to apply motion shift for augmentation.
_C.DATA.TRAIN_JITTER_MOTION_SHIFT = False
# The spatial crop size for training.
_C.DATA.TRAIN_CROP_SIZE = 224
# The spatial crop size for testing.
_C.DATA.TEST_CROP_SIZE = 256
# Input videos may has different fps, convert it to the target video fps before
# frame sampling.
_C.DATA.TARGET_FPS = 30
# Decoding backend, options include `pyav` or `torchvision`
_C.DATA.DECODING_BACKEND = "pyav"
# if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a
# reciprocal to get the scale. If False, take a uniform sample from
# [min_scale, max_scale].
_C.DATA.INV_UNIFORM_SAMPLE = False
# If True, perform random horizontal flip on the video frames during training.
_C.DATA.RANDOM_FLIP = True
# If True, calculdate the map as metric.
_C.DATA.MULTI_LABEL = False
# Method to perform the ensemble, options include "sum" and "max".
_C.DATA.ENSEMBLE_METHOD = "sum"
# If True, revert the default input channel (RBG <-> BGR).
_C.DATA.REVERSE_INPUT_CHANNEL = False
# ---------------------------------------------------------------------------- #
# Optimizer options
# ---------------------------------------------------------------------------- #
_C.SOLVER = CfgNode()
# Base learning rate.
_C.SOLVER.BASE_LR = 0.1
# Learning rate policy (see utils/lr_policy.py for options and examples).
_C.SOLVER.LR_POLICY = "cosine"
# Final learning rates for 'cosine' policy.
_C.SOLVER.COSINE_END_LR = 0.0
# Exponential decay factor.
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.BACKBONE_LR = 0.01
# Step size for 'exp' and 'cos' policies (in epochs).
_C.SOLVER.STEP_SIZE = 1
# Steps for 'steps_' policies (in epochs).
_C.SOLVER.STEPS = []
# Learning rates for 'steps_' policies.
_C.SOLVER.LRS = []
# Maximal number of epochs.
_C.SOLVER.MAX_EPOCH = 300
# Momentum.
_C.SOLVER.MOMENTUM = 0.9
# Momentum dampening.
_C.SOLVER.DAMPENING = 0.0
# Nesterov momentum.
_C.SOLVER.NESTEROV = True
# L2 regularization.
_C.SOLVER.WEIGHT_DECAY = 1e-4
# Start the warm up from SOLVER.BASE_LR * SOLVER.WARMUP_FACTOR.
_C.SOLVER.WARMUP_FACTOR = 0.1
# Gradually warm up the SOLVER.BASE_LR over this number of epochs.
_C.SOLVER.WARMUP_EPOCHS = 0.0
# The start learning rate of the warm up.
_C.SOLVER.WARMUP_START_LR = 0.01
# Optimization method.
_C.SOLVER.OPTIMIZING_METHOD = "sgd"
# Base learning rate is linearly scaled with NUM_SHARDS.
_C.SOLVER.BASE_LR_SCALE_NUM_SHARDS = False
# If True, start from the peak cosine learning rate after warm up.
_C.SOLVER.COSINE_AFTER_WARMUP = False
# If True, perform no weight decay on parameter with one dimension (bias term, etc).
_C.SOLVER.ZERO_WD_1D_PARAM = False
# Clip gradient at this value before optimizer update
_C.SOLVER.CLIP_GRAD_VAL = None
# Clip gradient at this norm before optimizer update
_C.SOLVER.CLIP_GRAD_L2NORM = None
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
# Number of GPUs to use (applies to both training and testing).
_C.NUM_GPUS = 1
# Number of machine to use for the job.
_C.NUM_SHARDS = 1
# The index of the current machine.
_C.SHARD_ID = 0
# Output basedir.
_C.OUTPUT_DIR = "./tmp"
# Note that non-determinism may still be present due to non-deterministic
# operator implementations in GPU operator libraries.
_C.RNG_SEED = 1
# Log period in iters.
_C.LOG_PERIOD = 10
# If True, log the model info.
_C.LOG_MODEL_INFO = False
# Distributed backend.
_C.DIST_BACKEND = "nccl"
# ---------------------------------------------------------------------------- #
# Benchmark options
# ---------------------------------------------------------------------------- #
_C.BENCHMARK = CfgNode()
# Number of epochs for data loading benchmark.
_C.BENCHMARK.NUM_EPOCHS = 5
# Log period in iters for data loading benchmark.
_C.BENCHMARK.LOG_PERIOD = 100
# If True, shuffle dataloader for epoch during benchmark.
_C.BENCHMARK.SHUFFLE = True
# ---------------------------------------------------------------------------- #
# Common train/test data loader options
# ---------------------------------------------------------------------------- #
_C.DATA_LOADER = CfgNode()
# Number of data loader workers per training process.
_C.DATA_LOADER.NUM_WORKERS = 8
# Load data to pinned host memory.
_C.DATA_LOADER.PIN_MEMORY = True
# Enable multi thread decoding.
_C.DATA_LOADER.ENABLE_MULTI_THREAD_DECODE = False
# ---------------------------------------------------------------------------- #
# Detection options.
# ---------------------------------------------------------------------------- #
_C.DETECTION = CfgNode()
# Whether enable video detection.
_C.DETECTION.ENABLE = False
# Aligned version of RoI. More details can be found at slowfast/models/head_helper.py
_C.DETECTION.ALIGNED = True
# Spatial scale factor.
_C.DETECTION.SPATIAL_SCALE_FACTOR = 16
# RoI tranformation resolution.
_C.DETECTION.ROI_XFORM_RESOLUTION = 7
# -----------------------------------------------------------------------------
# AVA Dataset options
# -----------------------------------------------------------------------------
_C.AVA = CfgNode()
# Directory path of frames.
_C.AVA.FRAME_DIR = "/mnt/fair-flash3-east/ava_trainval_frames.img/"
# Directory path for files of frame lists.
_C.AVA.FRAME_LIST_DIR = (
"/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/"
)
# Directory path for annotation files.
_C.AVA.ANNOTATION_DIR = (
"/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/"
)
# Filenames of training samples list files.
_C.AVA.TRAIN_LISTS = ["train.csv"]
# Filenames of test samples list files.
_C.AVA.TEST_LISTS = ["val.csv"]
# Filenames of box list files for training. Note that we assume files which
# contains predicted boxes will have a suffix "predicted_boxes" in the
# filename.
_C.AVA.TRAIN_GT_BOX_LISTS = ["ava_train_v2.2.csv"]
_C.AVA.TRAIN_PREDICT_BOX_LISTS = []
# Filenames of box list files for test.
_C.AVA.TEST_PREDICT_BOX_LISTS = ["ava_val_predicted_boxes.csv"]
# This option controls the score threshold for the predicted boxes to use.
_C.AVA.DETECTION_SCORE_THRESH = 0.9
# If use BGR as the format of input frames.
_C.AVA.BGR = False
# Training augmentation parameters
# Whether to use color augmentation method.
_C.AVA.TRAIN_USE_COLOR_AUGMENTATION = False
# Whether to only use PCA jitter augmentation when using color augmentation
# method (otherwise combine with color jitter method).
_C.AVA.TRAIN_PCA_JITTER_ONLY = True
# Whether to do horizontal flipping during test.
_C.AVA.TEST_FORCE_FLIP = False
# Whether to use full test set for validation split.
_C.AVA.FULL_TEST_ON_VAL = False
# The name of the file to the ava label map.
_C.AVA.LABEL_MAP_FILE = "ava_action_list_v2.2_for_activitynet_2019.pbtxt"
# The name of the file to the ava exclusion.
_C.AVA.EXCLUSION_FILE = "ava_val_excluded_timestamps_v2.2.csv"
# The name of the file to the ava groundtruth.
_C.AVA.GROUNDTRUTH_FILE = "ava_val_v2.2.csv"
# Backend to process image, includes `pytorch` and `cv2`.
_C.AVA.IMG_PROC_BACKEND = "cv2"
# ---------------------------------------------------------------------------- #
# Multigrid training options
# See https://arxiv.org/abs/1912.00998 for details about multigrid training.
# ---------------------------------------------------------------------------- #
_C.MULTIGRID = CfgNode()
# Multigrid training allows us to train for more epochs with fewer iterations.
# This hyperparameter specifies how many times more epochs to train.
# The default setting in paper trains for 1.5x more epochs than baseline.
_C.MULTIGRID.EPOCH_FACTOR = 1.5
# Enable short cycles.
_C.MULTIGRID.SHORT_CYCLE = False
# Short cycle additional spatial dimensions relative to the default crop size.
_C.MULTIGRID.SHORT_CYCLE_FACTORS = [0.5, 0.5 ** 0.5]
_C.MULTIGRID.LONG_CYCLE = False
# (Temporal, Spatial) dimensions relative to the default shape.
_C.MULTIGRID.LONG_CYCLE_FACTORS = [
(0.25, 0.5 ** 0.5),
(0.5, 0.5 ** 0.5),
(0.5, 1),
(1, 1),
]
# While a standard BN computes stats across all examples in a GPU,
# for multigrid training we fix the number of clips to compute BN stats on.
# See https://arxiv.org/abs/1912.00998 for details.
_C.MULTIGRID.BN_BASE_SIZE = 8
# Multigrid training epochs are not proportional to actual training time or
# computations, so _C.TRAIN.EVAL_PERIOD leads to too frequent or rare
# evaluation. We use a multigrid-specific rule to determine when to evaluate:
# This hyperparameter defines how many times to evaluate a model per long
# cycle shape.
_C.MULTIGRID.EVAL_FREQ = 3
# No need to specify; Set automatically and used as global variables.
_C.MULTIGRID.LONG_CYCLE_SAMPLING_RATE = 0
_C.MULTIGRID.DEFAULT_B = 0
_C.MULTIGRID.DEFAULT_T = 0
_C.MULTIGRID.DEFAULT_S = 0
# -----------------------------------------------------------------------------
# Tensorboard Visualization Options
# -----------------------------------------------------------------------------
_C.TENSORBOARD = CfgNode()
# Log to summary writer, this will automatically.
# log loss, lr and metrics during train/eval.
_C.TENSORBOARD.ENABLE = False
# Provide path to prediction results for visualization.
# This is a pickle file of [prediction_tensor, label_tensor]
_C.TENSORBOARD.PREDICTIONS_PATH = ""
# Path to directory for tensorboard logs.
# Default to to cfg.OUTPUT_DIR/runs-{cfg.TRAIN.DATASET}.
_C.TENSORBOARD.LOG_DIR = ""
# Path to a json file providing class_name - id mapping
# in the format {"class_name1": id1, "class_name2": id2, ...}.
# This file must be provided to enable plotting confusion matrix
# by a subset or parent categories.
_C.TENSORBOARD.CLASS_NAMES_PATH = ""
# Path to a json file for categories -> classes mapping
# in the format {"parent_class": ["child_class1", "child_class2",...], ...}.
_C.TENSORBOARD.CATEGORIES_PATH = ""
# Config for confusion matrices visualization.
_C.TENSORBOARD.CONFUSION_MATRIX = CfgNode()
# Visualize confusion matrix.
_C.TENSORBOARD.CONFUSION_MATRIX.ENABLE = False
# Figure size of the confusion matrices plotted.
_C.TENSORBOARD.CONFUSION_MATRIX.FIGSIZE = [8, 8]
# Path to a subset of categories to visualize.
# File contains class names separated by newline characters.
_C.TENSORBOARD.CONFUSION_MATRIX.SUBSET_PATH = ""
# Config for histogram visualization.
_C.TENSORBOARD.HISTOGRAM = CfgNode()
# Visualize histograms.
_C.TENSORBOARD.HISTOGRAM.ENABLE = False
# Path to a subset of classes to plot histograms.
# Class names must be separated by newline characters.
_C.TENSORBOARD.HISTOGRAM.SUBSET_PATH = ""
# Visualize top-k most predicted classes on histograms for each
# chosen true label.
_C.TENSORBOARD.HISTOGRAM.TOPK = 10
# Figure size of the histograms plotted.
_C.TENSORBOARD.HISTOGRAM.FIGSIZE = [8, 8]
# Config for layers' weights and activations visualization.
# _C.TENSORBOARD.ENABLE must be True.
_C.TENSORBOARD.MODEL_VIS = CfgNode()
# If False, skip model visualization.
_C.TENSORBOARD.MODEL_VIS.ENABLE = False
# If False, skip visualizing model weights.
_C.TENSORBOARD.MODEL_VIS.MODEL_WEIGHTS = False
# If False, skip visualizing model activations.
_C.TENSORBOARD.MODEL_VIS.ACTIVATIONS = False
# If False, skip visualizing input videos.
_C.TENSORBOARD.MODEL_VIS.INPUT_VIDEO = False
# List of strings containing data about layer names and their indexing to
# visualize weights and activations for. The indexing is meant for
# choosing a subset of activations outputed by a layer for visualization.
# If indexing is not specified, visualize all activations outputed by the layer.
# For each string, layer name and indexing is separated by whitespaces.
# e.g.: [layer1 1,2;1,2, layer2, layer3 150,151;3,4]; this means for each array `arr`
# along the batch dimension in `layer1`, we take arr[[1, 2], [1, 2]]
_C.TENSORBOARD.MODEL_VIS.LAYER_LIST = []
# Top-k predictions to plot on videos
_C.TENSORBOARD.MODEL_VIS.TOPK_PREDS = 1
# Colormap to for text boxes and bounding boxes colors
_C.TENSORBOARD.MODEL_VIS.COLORMAP = "Pastel2"
# Config for visualization video inputs with Grad-CAM.
# _C.TENSORBOARD.ENABLE must be True.
_C.TENSORBOARD.MODEL_VIS.GRAD_CAM = CfgNode()
# Whether to run visualization using Grad-CAM technique.
_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE = True
# CNN layers to use for Grad-CAM. The number of layers must be equal to
# number of pathway(s).
_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST = []
# If True, visualize Grad-CAM using true labels for each instances.
# If False, use the highest predicted class.
_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.USE_TRUE_LABEL = False
# Colormap to for text boxes and bounding boxes colors
_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.COLORMAP = "viridis"
# Config for visualization for wrong prediction visualization.
# _C.TENSORBOARD.ENABLE must be True.
_C.TENSORBOARD.WRONG_PRED_VIS = CfgNode()
_C.TENSORBOARD.WRONG_PRED_VIS.ENABLE = False
# Folder tag to origanize model eval videos under.
_C.TENSORBOARD.WRONG_PRED_VIS.TAG = "Incorrectly classified videos."
# Subset of labels to visualize. Only wrong predictions with true labels
# within this subset is visualized.
_C.TENSORBOARD.WRONG_PRED_VIS.SUBSET_PATH = ""
_C.USE_MINI = False
# ---------------------------------------------------------------------------- #
# Demo options
# ---------------------------------------------------------------------------- #
_C.DEMO = CfgNode()
# Run model in DEMO mode.
_C.DEMO.ENABLE = False
# Path to a json file providing class_name - id mapping
# in the format {"class_name1": id1, "class_name2": id2, ...}.
_C.DEMO.LABEL_FILE_PATH = ""
# Specify a camera device as input. This will be prioritized
# over input video if set.
# If -1, use input video instead.
_C.DEMO.WEBCAM = -1
# Path to input video for demo.
_C.DEMO.INPUT_VIDEO = ""
# Custom width for reading input video data.
_C.DEMO.DISPLAY_WIDTH = 0
# Custom height for reading input video data.
_C.DEMO.DISPLAY_HEIGHT = 0
# Path to Detectron2 object detection model configuration,
# only used for detection tasks.
_C.DEMO.DETECTRON2_CFG = "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
# Path to Detectron2 object detection model pre-trained weights.
_C.DEMO.DETECTRON2_WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/model_final_280758.pkl"
# Threshold for choosing predicted bounding boxes by Detectron2.
_C.DEMO.DETECTRON2_THRESH = 0.9
# Number of overlapping frames between 2 consecutive clips.
# Increase this number for more frequent action predictions.
# The number of overlapping frames cannot be larger than
# half of the sequence length `cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE`
_C.DEMO.BUFFER_SIZE = 0
# If specified, the visualized outputs will be written this a video file of
# this path. Otherwise, the visualized outputs will be displayed in a window.
_C.DEMO.OUTPUT_FILE = ""
# Frames per second rate for writing to output video file.
# If not set (-1), use fps rate from input file.
_C.DEMO.OUTPUT_FPS = -1
# Input format from demo video reader ("RGB" or "BGR").
_C.DEMO.INPUT_FORMAT = "BGR"
# Draw visualization frames in [keyframe_idx - CLIP_VIS_SIZE, keyframe_idx + CLIP_VIS_SIZE] inclusively.
_C.DEMO.CLIP_VIS_SIZE = 10
# Number of processes to run video visualizer.
_C.DEMO.NUM_VIS_INSTANCES = 2
# Path to pre-computed predicted boxes
_C.DEMO.PREDS_BOXES = ""
# Whether to run in with multi-threaded video reader.
_C.DEMO.THREAD_ENABLE = False
# Take one clip for every `DEMO.NUM_CLIPS_SKIP` + 1 for prediction and visualization.
# This is used for fast demo speed by reducing the prediction/visualiztion frequency.
# If -1, take the most recent read clip for visualization. This mode is only supported
# if `DEMO.THREAD_ENABLE` is set to True.
_C.DEMO.NUM_CLIPS_SKIP = 0
# Path to ground-truth boxes and labels (optional)
_C.DEMO.GT_BOXES = ""
# The starting second of the video w.r.t bounding boxes file.
_C.DEMO.STARTING_SECOND = 900
# Frames per second of the input video/folder of images.
_C.DEMO.FPS = 30
# Visualize with top-k predictions or predictions above certain threshold(s).
# Option: {"thres", "top-k"}
_C.DEMO.VIS_MODE = "thres"
# Threshold for common class names.
_C.DEMO.COMMON_CLASS_THRES = 0.7
# Theshold for uncommon class names. This will not be
# used if `_C.DEMO.COMMON_CLASS_NAMES` is empty.
_C.DEMO.UNCOMMON_CLASS_THRES = 0.3
# This is chosen based on distribution of examples in
# each classes in AVA dataset.
_C.DEMO.COMMON_CLASS_NAMES = [
"watch (a person)",
"talk to (e.g., self, a person, a group)",
"listen to (a person)",
"touch (an object)",
"carry/hold (an object)",
"walk",
"sit",
"lie/sleep",
"bend/bow (at the waist)",
]
# Slow-motion rate for the visualization. The visualized portions of the
# video will be played `_C.DEMO.SLOWMO` times slower than usual speed.
_C.DEMO.SLOWMO = 1
# Add custom config with default values.
custom_config.add_custom_config(_C)
def assert_and_infer_cfg(cfg):
# BN assertions.
if cfg.BN.USE_PRECISE_STATS:
assert cfg.BN.NUM_BATCHES_PRECISE >= 0
# TRAIN assertions.
assert cfg.TRAIN.CHECKPOINT_TYPE in ["pytorch", "caffe2"]
assert cfg.NUM_GPUS == 0 or cfg.TRAIN.BATCH_SIZE % cfg.NUM_GPUS == 0
# TEST assertions.
assert cfg.TEST.CHECKPOINT_TYPE in ["pytorch", "caffe2"]
assert cfg.NUM_GPUS == 0 or cfg.TEST.BATCH_SIZE % cfg.NUM_GPUS == 0
# RESNET assertions.
assert cfg.RESNET.NUM_GROUPS > 0
assert cfg.RESNET.WIDTH_PER_GROUP > 0
assert cfg.RESNET.WIDTH_PER_GROUP % cfg.RESNET.NUM_GROUPS == 0
# Execute LR scaling by num_shards.
if cfg.SOLVER.BASE_LR_SCALE_NUM_SHARDS:
cfg.SOLVER.BASE_LR *= cfg.NUM_SHARDS
cfg.SOLVER.WARMUP_START_LR *= cfg.NUM_SHARDS
cfg.SOLVER.COSINE_END_LR *= cfg.NUM_SHARDS
# General assertions.
assert cfg.SHARD_ID < cfg.NUM_SHARDS
return cfg
def get_cfg():
"""
Get a copy of the default config.
"""
return _C.clone()
| 31,843 | 31.230769 | 115 | py |
STTS | STTS-main/MViT/slowfast/models/operators.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Custom operators."""
import torch
import torch.nn as nn
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)."""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return SwishEfficient.apply(x)
class SwishEfficient(torch.autograd.Function):
"""Swish activation function: x * sigmoid(x)."""
@staticmethod
def forward(ctx, x):
result = x * torch.sigmoid(x)
ctx.save_for_backward(x)
return result
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
sigmoid_x = torch.sigmoid(x)
return grad_output * (sigmoid_x * (1 + x * (1 - sigmoid_x)))
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish: AvgPool, FC, Swish, FC, Sigmoid."""
def _round_width(self, width, multiplier, min_width=8, divisor=8):
"""
Round width of filters based on width multiplier
Args:
width (int): the channel dimensions of the input.
multiplier (float): the multiplication factor.
min_width (int): the minimum width after multiplication.
divisor (int): the new width should be dividable by divisor.
"""
if not multiplier:
return width
width *= multiplier
min_width = min_width or divisor
width_out = max(
min_width, int(width + divisor / 2) // divisor * divisor
)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def __init__(self, dim_in, ratio, relu_act=True):
"""
Args:
dim_in (int): the channel dimensions of the input.
ratio (float): the channel reduction ratio for squeeze.
relu_act (bool): whether to use ReLU activation instead
of Swish (default).
divisor (int): the new width should be dividable by divisor.
"""
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
dim_fc = self._round_width(dim_in, ratio)
self.fc1 = nn.Conv3d(dim_in, dim_fc, 1, bias=True)
self.fc1_act = nn.ReLU() if relu_act else Swish()
self.fc2 = nn.Conv3d(dim_fc, dim_in, 1, bias=True)
self.fc2_sig = nn.Sigmoid()
def forward(self, x):
x_in = x
for module in self.children():
x = module(x)
return x_in * x
| 2,552 | 29.759036 | 86 | py |
STTS | STTS-main/MViT/slowfast/models/losses.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Loss functions."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from slowfast.models.topk import batched_index_select
class SoftTargetCrossEntropy(nn.Module):
"""
Cross entropy loss with soft target.
"""
def __init__(self, reduction="mean"):
"""
Args:
reduction (str): specifies reduction to apply to the output. It can be
"mean" (default) or "none".
"""
super(SoftTargetCrossEntropy, self).__init__()
self.reduction = reduction
def forward(self, x, y):
loss = torch.sum(-y * F.log_softmax(x, dim=-1), dim=-1)
if self.reduction == "mean":
return loss.mean()
elif self.reduction == "none":
return loss
else:
raise NotImplementedError
class SoftTargetCrossEntropyPruning(nn.Module):
"""
Cross entropy loss with soft target.
"""
def __init__(self, ratio_weight=2.0, pruning_loc=[0], keep_ratio=[0.5], clf_weight=1.0, reduction="mean"):
"""
Args:
reduction (str): specifies reduction to apply to the output. It can be
"mean" (default) or "none".
"""
super(SoftTargetCrossEntropyPruning, self).__init__()
self.reduction = reduction
self.clf_weight = clf_weight
self.pruning_loc = pruning_loc
self.keep_ratio = keep_ratio
self.cls_loss = 0
self.ratio_loss = 0
self.ratio_weight = ratio_weight
def forward(self, x, y):
pred, out_pred_score = x
cls_loss = torch.sum(-y * F.log_softmax(pred, dim=-1), dim=-1)
if self.reduction == "mean":
cls_loss = cls_loss.mean()
elif self.reduction == "none":
cls_loss = cls_loss
else:
raise NotImplementedError
pred_loss = 0.0
ratio = self.keep_ratio
left_ratio = 1.
for i, score in enumerate(out_pred_score):
pos_ratio = score.mean(1)
left_ratio = left_ratio * ratio[i]
print(left_ratio, pos_ratio)
pred_loss = pred_loss + ((pos_ratio - left_ratio) ** 2).mean()
loss = self.clf_weight * cls_loss + self.ratio_weight * pred_loss / len(self.pruning_loc)
return loss
_LOSSES = {
"cross_entropy": nn.CrossEntropyLoss,
"bce": nn.BCELoss,
"bce_logit": nn.BCEWithLogitsLoss,
"soft_cross_entropy": SoftTargetCrossEntropy,
"soft_cross_entropy_pruning": SoftTargetCrossEntropyPruning,
}
def get_loss_func(loss_name):
"""
Retrieve the loss given the loss name.
Args (int):
loss_name: the name of the loss to use.
"""
if loss_name not in _LOSSES.keys():
raise NotImplementedError("Loss {} is not supported".format(loss_name))
return _LOSSES[loss_name]
| 2,960 | 28.61 | 110 | py |
STTS | STTS-main/MViT/slowfast/models/batchnorm_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""BatchNorm (BN) utility functions and custom batch-size BN implementations"""
from functools import partial
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.autograd.function import Function
import slowfast.utils.distributed as du
def get_norm(cfg):
"""
Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.
"""
if cfg.BN.NORM_TYPE == "batchnorm":
return nn.BatchNorm3d
elif cfg.BN.NORM_TYPE == "sub_batchnorm":
return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)
elif cfg.BN.NORM_TYPE == "sync_batchnorm":
return partial(
NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.NUM_SYNC_DEVICES
)
else:
raise NotImplementedError(
"Norm type {} is not supported".format(cfg.BN.NORM_TYPE)
)
class SubBatchNorm3d(nn.Module):
"""
The standard BN layer computes stats across all examples in a GPU. In some
cases it is desirable to compute stats across only a subset of examples
(e.g., in multigrid training https://arxiv.org/abs/1912.00998).
SubBatchNorm3d splits the batch dimension into N splits, and run BN on
each of them separately (so that the stats are computed on each subset of
examples (1/N of batch) independently. During evaluation, it aggregates
the stats from all splits into one BN.
"""
def __init__(self, num_splits, **args):
"""
Args:
num_splits (int): number of splits.
args (list): other arguments.
"""
super(SubBatchNorm3d, self).__init__()
self.num_splits = num_splits
num_features = args["num_features"]
# Keep only one set of weight and bias.
if args.get("affine", True):
self.affine = True
args["affine"] = False
self.weight = torch.nn.Parameter(torch.ones(num_features))
self.bias = torch.nn.Parameter(torch.zeros(num_features))
else:
self.affine = False
self.bn = nn.BatchNorm3d(**args)
args["num_features"] = num_features * num_splits
self.split_bn = nn.BatchNorm3d(**args)
def _get_aggregated_mean_std(self, means, stds, n):
"""
Calculate the aggregated mean and stds.
Args:
means (tensor): mean values.
stds (tensor): standard deviations.
n (int): number of sets of means and stds.
"""
mean = means.view(n, -1).sum(0) / n
std = (
stds.view(n, -1).sum(0) / n
+ ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n
)
return mean.detach(), std.detach()
def aggregate_stats(self):
"""
Synchronize running_mean, and running_var. Call this before eval.
"""
if self.split_bn.track_running_stats:
(
self.bn.running_mean.data,
self.bn.running_var.data,
) = self._get_aggregated_mean_std(
self.split_bn.running_mean,
self.split_bn.running_var,
self.num_splits,
)
def forward(self, x):
if self.training:
n, c, t, h, w = x.shape
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view((-1, 1, 1, 1))
x = x + self.bias.view((-1, 1, 1, 1))
return x
class GroupGather(Function):
"""
GroupGather performs all gather on each of the local process/ GPU groups.
"""
@staticmethod
def forward(ctx, input, num_sync_devices, num_groups):
"""
Perform forwarding, gathering the stats across different process/ GPU
group.
"""
ctx.num_sync_devices = num_sync_devices
ctx.num_groups = num_groups
input_list = [
torch.zeros_like(input) for k in range(du.get_local_size())
]
dist.all_gather(
input_list, input, async_op=False, group=du._LOCAL_PROCESS_GROUP
)
inputs = torch.stack(input_list, dim=0)
if num_groups > 1:
rank = du.get_local_rank()
group_idx = rank // num_sync_devices
inputs = inputs[
group_idx
* num_sync_devices : (group_idx + 1)
* num_sync_devices
]
inputs = torch.sum(inputs, dim=0)
return inputs
@staticmethod
def backward(ctx, grad_output):
"""
Perform backwarding, gathering the gradients across different process/ GPU
group.
"""
grad_output_list = [
torch.zeros_like(grad_output) for k in range(du.get_local_size())
]
dist.all_gather(
grad_output_list,
grad_output,
async_op=False,
group=du._LOCAL_PROCESS_GROUP,
)
grads = torch.stack(grad_output_list, dim=0)
if ctx.num_groups > 1:
rank = du.get_local_rank()
group_idx = rank // ctx.num_sync_devices
grads = grads[
group_idx
* ctx.num_sync_devices : (group_idx + 1)
* ctx.num_sync_devices
]
grads = torch.sum(grads, dim=0)
return grads, None, None
class NaiveSyncBatchNorm3d(nn.BatchNorm3d):
def __init__(self, num_sync_devices, **args):
"""
Naive version of Synchronized 3D BatchNorm.
Args:
num_sync_devices (int): number of device to sync.
args (list): other arguments.
"""
self.num_sync_devices = num_sync_devices
if self.num_sync_devices > 0:
assert du.get_local_size() % self.num_sync_devices == 0, (
du.get_local_size(),
self.num_sync_devices,
)
self.num_groups = du.get_local_size() // self.num_sync_devices
else:
self.num_sync_devices = du.get_local_size()
self.num_groups = 1
super(NaiveSyncBatchNorm3d, self).__init__(**args)
def forward(self, input):
if du.get_local_size() == 1 or not self.training:
return super().forward(input)
assert input.shape[0] > 0, "SyncBatchNorm does not support empty inputs"
C = input.shape[1]
mean = torch.mean(input, dim=[0, 2, 3, 4])
meansqr = torch.mean(input * input, dim=[0, 2, 3, 4])
vec = torch.cat([mean, meansqr], dim=0)
vec = GroupGather.apply(vec, self.num_sync_devices, self.num_groups) * (
1.0 / self.num_sync_devices
)
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
self.running_mean += self.momentum * (mean.detach() - self.running_mean)
self.running_var += self.momentum * (var.detach() - self.running_var)
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1, 1, 1)
bias = bias.reshape(1, -1, 1, 1, 1)
return input * scale + bias
| 7,424 | 32.90411 | 82 | py |
STTS | STTS-main/MViT/slowfast/models/mvit.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Video models."""
import math
from functools import partial
import torch
import torch.nn as nn
from torch.nn.init import trunc_normal_
from einops import rearrange
from math import sqrt
import slowfast.utils.weight_init_helper as init_helper
from slowfast.models.attention import MultiScaleBlock
from slowfast.models.batchnorm_helper import get_norm
from slowfast.models.stem_helper import PatchEmbed
from slowfast.models.utils import round_width, validate_checkpoint_wrapper_import
from slowfast.models.topk import PatchNet
from . import head_helper
from .build import MODEL_REGISTRY
try:
from fairscale.nn.checkpoint import checkpoint_wrapper
except ImportError:
checkpoint_wrapper = None
@MODEL_REGISTRY.register()
class MViT(nn.Module):
"""
Multiscale Vision Transformers
Haoqi Fan, Bo Xiong, Karttikeya Mangalam, Yanghao Li, Zhicheng Yan, Jitendra Malik, Christoph Feichtenhofer
https://arxiv.org/abs/2104.11227
"""
def __init__(self, cfg):
super().__init__()
# Get parameters.
assert cfg.DATA.TRAIN_CROP_SIZE == cfg.DATA.TEST_CROP_SIZE
self.cfg = cfg
pool_first = cfg.MVIT.POOL_FIRST
# Prepare input.
spatial_size = cfg.DATA.TRAIN_CROP_SIZE
temporal_size = cfg.DATA.NUM_FRAMES
in_chans = cfg.DATA.INPUT_CHANNEL_NUM[0]
use_2d_patch = cfg.MVIT.PATCH_2D
self.patch_stride = cfg.MVIT.PATCH_STRIDE
if use_2d_patch:
self.patch_stride = [1] + self.patch_stride
# Prepare output.
num_classes = cfg.MODEL.NUM_CLASSES
embed_dim = cfg.MVIT.EMBED_DIM
# Prepare backbone
num_heads = cfg.MVIT.NUM_HEADS
mlp_ratio = cfg.MVIT.MLP_RATIO
qkv_bias = cfg.MVIT.QKV_BIAS
self.drop_rate = cfg.MVIT.DROPOUT_RATE
depth = cfg.MVIT.DEPTH
drop_path_rate = cfg.MVIT.DROPPATH_RATE
mode = cfg.MVIT.MODE
self.cls_embed_on = cfg.MVIT.CLS_EMBED_ON
self.sep_pos_embed = cfg.MVIT.SEP_POS_EMBED
if cfg.MVIT.NORM == "layernorm":
norm_layer = partial(nn.LayerNorm, eps=1e-6)
else:
raise NotImplementedError("Only supports layernorm.")
self.num_classes = num_classes
self.patch_embed = PatchEmbed(
dim_in=in_chans,
dim_out=embed_dim,
kernel=cfg.MVIT.PATCH_KERNEL,
stride=cfg.MVIT.PATCH_STRIDE,
padding=cfg.MVIT.PATCH_PADDING,
conv_2d=use_2d_patch,
)
self.input_dims = [temporal_size, spatial_size, spatial_size]
assert self.input_dims[1] == self.input_dims[2]
self.patch_dims = [
self.input_dims[i] // self.patch_stride[i]
for i in range(len(self.input_dims))
]
num_patches = math.prod(self.patch_dims)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
if self.cls_embed_on:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
pos_embed_dim = num_patches + 1
else:
pos_embed_dim = num_patches
if self.sep_pos_embed:
self.pos_embed_spatial = nn.Parameter(
torch.zeros(
1, self.patch_dims[1] * self.patch_dims[2], embed_dim
)
)
self.pos_embed_temporal = nn.Parameter(
torch.zeros(1, self.patch_dims[0], embed_dim)
)
if self.cls_embed_on:
self.pos_embed_class = nn.Parameter(
torch.zeros(1, 1, embed_dim)
)
else:
self.pos_embed = nn.Parameter(
torch.zeros(1, pos_embed_dim, embed_dim)
)
if self.drop_rate > 0.0:
self.pos_drop = nn.Dropout(p=self.drop_rate)
self.time_pruning_loc = cfg.MVIT.TIME_PRUNING_LOC
time_left_ratio = cfg.MVIT.TIME_LEFT_RATIO
time_score = cfg.MVIT.TIME_SCORE
self.space_pruning_loc = cfg.MVIT.SPACE_PRUNING_LOC
space_left_ratio = cfg.MVIT.SPACE_LEFT_RATIO
space_score = cfg.MVIT.SPACE_SCORE
self.sigma_max = cfg.MVIT.SIGMA
self.sigma = cfg.MVIT.SIGMA
dim_mul, head_mul = torch.ones(depth + 1), torch.ones(depth + 1)
for i in range(len(cfg.MVIT.DIM_MUL)):
dim_mul[cfg.MVIT.DIM_MUL[i][0]] = cfg.MVIT.DIM_MUL[i][1]
for i in range(len(cfg.MVIT.HEAD_MUL)):
head_mul[cfg.MVIT.HEAD_MUL[i][0]] = cfg.MVIT.HEAD_MUL[i][1]
pool_q = [[] for i in range(cfg.MVIT.DEPTH)]
pool_kv = [[] for i in range(cfg.MVIT.DEPTH)]
stride_q = [[] for i in range(cfg.MVIT.DEPTH)]
stride_kv = [[] for i in range(cfg.MVIT.DEPTH)]
for i in range(len(cfg.MVIT.POOL_Q_STRIDE)):
stride_q[cfg.MVIT.POOL_Q_STRIDE[i][0]] = cfg.MVIT.POOL_Q_STRIDE[i][
1:
]
if cfg.MVIT.POOL_KVQ_KERNEL is not None:
pool_q[cfg.MVIT.POOL_Q_STRIDE[i][0]] = cfg.MVIT.POOL_KVQ_KERNEL
else:
pool_q[cfg.MVIT.POOL_Q_STRIDE[i][0]] = [
s + 1 if s > 1 else s for s in cfg.MVIT.POOL_Q_STRIDE[i][1:]
]
# If POOL_KV_STRIDE_ADAPTIVE is not None, initialize POOL_KV_STRIDE.
if cfg.MVIT.POOL_KV_STRIDE_ADAPTIVE is not None:
_stride_kv = cfg.MVIT.POOL_KV_STRIDE_ADAPTIVE
cfg.MVIT.POOL_KV_STRIDE = []
for i in range(cfg.MVIT.DEPTH):
if len(stride_q[i]) > 0:
_stride_kv = [
max(_stride_kv[d] // stride_q[i][d], 1)
for d in range(len(_stride_kv))
]
cfg.MVIT.POOL_KV_STRIDE.append([i] + _stride_kv)
for i in range(len(cfg.MVIT.POOL_KV_STRIDE)):
stride_kv[cfg.MVIT.POOL_KV_STRIDE[i][0]] = cfg.MVIT.POOL_KV_STRIDE[
i
][1:]
if cfg.MVIT.POOL_KVQ_KERNEL is not None:
pool_kv[
cfg.MVIT.POOL_KV_STRIDE[i][0]
] = cfg.MVIT.POOL_KVQ_KERNEL
else:
pool_kv[cfg.MVIT.POOL_KV_STRIDE[i][0]] = [
s + 1 if s > 1 else s
for s in cfg.MVIT.POOL_KV_STRIDE[i][1:]
]
self.norm_stem = norm_layer(embed_dim) if cfg.MVIT.NORM_STEM else None
self.blocks = nn.ModuleList()
if cfg.MODEL.ACT_CHECKPOINT:
validate_checkpoint_wrapper_import(checkpoint_wrapper)
embedding_temporal_size = temporal_size // 2
embedding_spatial_size = self.patch_dims[1] * self.patch_dims[2]
time_score_predictor = nn.ModuleList()
space_score_predictor = nn.ModuleList()
s_count = 0
t_count = 0
for i in range(depth):
num_heads = round_width(num_heads, head_mul[i])
embed_dim = round_width(embed_dim, dim_mul[i], divisor=num_heads)
dim_out = round_width(
embed_dim,
dim_mul[i + 1],
divisor=round_width(num_heads, head_mul[i + 1]),
)
attention_block = MultiScaleBlock(
dim=embed_dim,
dim_out=dim_out,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_rate=self.drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
kernel_q=pool_q[i] if len(pool_q) > i else [],
kernel_kv=pool_kv[i] if len(pool_kv) > i else [],
stride_q=stride_q[i] if len(stride_q) > i else [],
stride_kv=stride_kv[i] if len(stride_kv) > i else [],
mode=mode,
has_cls_embed=self.cls_embed_on,
pool_first=pool_first,
)
if cfg.MODEL.ACT_CHECKPOINT:
attention_block = checkpoint_wrapper(attention_block)
self.blocks.append(attention_block)
if len(stride_q[i]) > 0:
embedding_spatial_size = (int(sqrt(embedding_spatial_size)) // stride_q[i][1]) ** 2
if self.time_pruning_loc is not None and i in self.time_pruning_loc:
left_frames = int(embedding_temporal_size * time_left_ratio[t_count])
t_count += 1
patchnet = PatchNet(score=time_score, k=left_frames, in_channels = embed_dim)
time_score_predictor.append(patchnet)
embedding_temporal_size = left_frames
if self.space_pruning_loc is not None and i in self.space_pruning_loc:
left_patches = int(embedding_spatial_size * space_left_ratio[s_count])
s_count += 1
patchnet = PatchNet(score=space_score, k=left_patches, in_channels = embed_dim)
space_score_predictor.append(patchnet)
embedding_spatial_size = left_patches
if len(time_score_predictor) > 0:
self.time_score_predictor = time_score_predictor
if len(space_score_predictor) > 0:
self.space_score_predictor = space_score_predictor
embed_dim = dim_out
self.norm = norm_layer(embed_dim)
self.head = head_helper.TransformerBasicHead(
embed_dim,
num_classes,
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
)
if self.sep_pos_embed:
trunc_normal_(self.pos_embed_spatial, std=0.02)
trunc_normal_(self.pos_embed_temporal, std=0.02)
if self.cls_embed_on:
trunc_normal_(self.pos_embed_class, std=0.02)
else:
trunc_normal_(self.pos_embed, std=0.02)
if self.cls_embed_on:
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
if self.cfg.MVIT.ZERO_DECAY_POS_CLS:
if self.sep_pos_embed:
if self.cls_embed_on:
return {
"pos_embed_spatial",
"pos_embed_temporal",
"pos_embed_class",
"cls_token",
}
else:
return {
"pos_embed_spatial",
"pos_embed_temporal",
"pos_embed_class",
}
else:
if self.cls_embed_on:
return {"pos_embed", "cls_token"}
else:
return {"pos_embed"}
else:
return {}
def update_sigma(self, cur_step, total_steps):
process = cur_step / total_steps
sigma_multiplier = 1 - process
self.sigma = self.sigma_max * sigma_multiplier
def forward(self, x):
x = x[0]
x = self.patch_embed(x)
T = self.cfg.DATA.NUM_FRAMES // self.patch_stride[0]
H = self.cfg.DATA.TRAIN_CROP_SIZE // self.patch_stride[1]
W = self.cfg.DATA.TRAIN_CROP_SIZE // self.patch_stride[2]
B, TN, C = x.shape
N = TN // T
if self.cls_embed_on:
cls_tokens = self.cls_token.expand(
B, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.sep_pos_embed:
pos_embed = self.pos_embed_spatial.repeat(
1, self.patch_dims[0], 1
) + torch.repeat_interleave(
self.pos_embed_temporal,
self.patch_dims[1] * self.patch_dims[2],
dim=1,
)
if self.cls_embed_on:
pos_embed = torch.cat([self.pos_embed_class, pos_embed], 1)
x = x + pos_embed
else:
x = x + self.pos_embed
if self.drop_rate:
x = self.pos_drop(x)
if self.norm_stem:
x = self.norm_stem(x)
thw = [T, H, W]
t_count = 0
s_count = 0
for i, blk in enumerate(self.blocks):
if hasattr(self, 'time_score_predictor') and i in self.time_pruning_loc:
if self.cls_embed_on:
cls_tokens, x = x[:, 0:1], x[:,1:]
x = self.time_score_predictor[t_count](x, 'time', N, T, self.sigma)
T = x.size(1) // N
t_count += 1
if self.cls_embed_on:
x = torch.cat((cls_tokens, x), dim=1)
thw = [T, H, W]
if hasattr(self, 'space_score_predictor') and i in self.space_pruning_loc:
if self.cls_embed_on:
cls_tokens, x = x[:, 0:1, :], x[:,1:]
x = self.space_score_predictor[s_count](x, 'space', N, T, self.sigma)
N = x.size(1) // T
H = W = int(math.sqrt(N))
s_count += 1
if self.cls_embed_on:
x = torch.cat((cls_tokens, x), dim=1)
thw = [T, H, W]
x, thw = blk(x, thw)
T, H, W = thw[0], thw[1], thw[1]
N = H * W
x = self.norm(x)
if self.cls_embed_on:
x = x[:, 0]
else:
x = x.mean(1)
x = self.head(x)
return x
| 14,098 | 35.058824 | 111 | py |
STTS | STTS-main/MViT/slowfast/models/ptv_model_builder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Video models using PyTorchVideo model builder."""
from functools import partial
import torch.nn as nn
from detectron2.layers import ROIAlign
from slowfast.models.batchnorm_helper import get_norm
from slowfast.models.video_model_builder import _POOL1, _TEMPORAL_KERNEL_BASIS
from pytorchvideo.models.csn import create_csn
from pytorchvideo.models.head import (
create_res_basic_head,
create_res_roi_pooling_head,
)
from pytorchvideo.models.r2plus1d import (
create_2plus1d_bottleneck_block,
create_r2plus1d,
)
from pytorchvideo.models.resnet import create_bottleneck_block, create_resnet
from pytorchvideo.models.slowfast import create_slowfast
from pytorchvideo.models.x3d import (
Swish,
create_x3d,
create_x3d_bottleneck_block,
)
from pytorchvideo.models.vision_transformers import create_multiscale_vision_transformers
from .build import MODEL_REGISTRY
def get_head_act(act_func):
"""
Return the actual head activation function given the activation fucntion name.
Args:
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
Returns:
nn.Module: the activation layer.
"""
if act_func == "softmax":
return nn.Softmax(dim=1)
elif act_func == "sigmoid":
return nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as a head activation "
"function.".format(act_func)
)
@MODEL_REGISTRY.register()
class PTVResNet(nn.Module):
"""
ResNet models using PyTorchVideo model builder.
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(PTVResNet, self).__init__()
assert (
cfg.RESNET.STRIDE_1X1 is False
), "STRIDE_1x1 must be True for PTVResNet"
assert (
cfg.RESNET.TRANS_FUNC == "bottleneck_transform"
), f"Unsupported TRANS_FUNC type {cfg.RESNET.TRANS_FUNC} for PTVResNet"
assert cfg.MODEL.ARCH in [
"c2d",
"slow",
"i3d",
], f"Unsupported MODEL.ARCH type {cfg.MODEL.ARCH} for PTVResNet"
self.detection_mode = cfg.DETECTION.ENABLE
self._construct_network(cfg)
def _construct_network(self, cfg):
"""
Builds a single pathway ResNet model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
# Params from configs.
norm_module = get_norm(cfg)
head_act = get_head_act(cfg.MODEL.HEAD_ACT)
pool_size = _POOL1[cfg.MODEL.ARCH]
num_groups = cfg.RESNET.NUM_GROUPS
spatial_dilations = cfg.RESNET.SPATIAL_DILATIONS
spatial_strides = cfg.RESNET.SPATIAL_STRIDES
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
stage1_pool = pool_size[0][0] != 1 or len(set(pool_size[0])) > 1
stage_spatial_stride = (
spatial_strides[0][0],
spatial_strides[1][0],
spatial_strides[2][0],
spatial_strides[3][0],
)
if cfg.MODEL.ARCH == "i3d":
stage_conv_a_kernel_size = (
(3, 1, 1),
[(3, 1, 1), (1, 1, 1)],
[(3, 1, 1), (1, 1, 1)],
[(1, 1, 1), (3, 1, 1)],
)
else:
stage_conv_a_kernel_size = (
(temp_kernel[1][0][0], 1, 1),
(temp_kernel[2][0][0], 1, 1),
(temp_kernel[3][0][0], 1, 1),
(temp_kernel[4][0][0], 1, 1),
)
# Head from config
if cfg.DETECTION.ENABLE:
self.detection_head = create_res_roi_pooling_head(
in_features=cfg.RESNET.WIDTH_PER_GROUP * 2 ** (4 + 1),
out_features=cfg.MODEL.NUM_CLASSES,
pool=nn.AvgPool3d,
output_size=(1, 1, 1),
pool_kernel_size=(
cfg.DATA.NUM_FRAMES // pool_size[0][0],
1,
1,
),
dropout_rate=cfg.MODEL.DROPOUT_RATE,
activation=None,
output_with_global_average=False,
pool_spatial=nn.MaxPool2d,
resolution=[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2,
spatial_scale=1.0 / float(cfg.DETECTION.SPATIAL_SCALE_FACTOR),
sampling_ratio=0,
roi=ROIAlign,
)
self.model = create_resnet(
# Input clip configs.
input_channel=cfg.DATA.INPUT_CHANNEL_NUM[0],
# Model configs.
model_depth=cfg.RESNET.DEPTH,
model_num_class=cfg.MODEL.NUM_CLASSES,
dropout_rate=cfg.MODEL.DROPOUT_RATE,
# Normalization configs.
norm=norm_module,
# Activation configs.
activation=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),
# Stem configs.
stem_dim_out=cfg.RESNET.WIDTH_PER_GROUP,
stem_conv_kernel_size=(temp_kernel[0][0][0], 7, 7),
stem_conv_stride=(1, 2, 2),
stem_pool=nn.MaxPool3d,
stem_pool_kernel_size=(1, 3, 3),
stem_pool_stride=(1, 2, 2),
# Stage configs.
stage1_pool=nn.MaxPool3d if stage1_pool else None,
stage1_pool_kernel_size=pool_size[0],
stage_conv_a_kernel_size=stage_conv_a_kernel_size,
stage_conv_b_kernel_size=(
(1, 3, 3),
(1, 3, 3),
(1, 3, 3),
(1, 3, 3),
),
stage_conv_b_num_groups=(
num_groups,
num_groups,
num_groups,
num_groups,
),
stage_conv_b_dilation=(
(1, spatial_dilations[0][0], spatial_dilations[0][0]),
(1, spatial_dilations[1][0], spatial_dilations[1][0]),
(1, spatial_dilations[2][0], spatial_dilations[2][0]),
(1, spatial_dilations[3][0], spatial_dilations[3][0]),
),
stage_spatial_h_stride=stage_spatial_stride,
stage_spatial_w_stride=stage_spatial_stride,
stage_temporal_stride=(1, 1, 1, 1),
bottleneck=create_bottleneck_block,
# Head configs.
head=create_res_basic_head if not self.detection_mode else None,
head_pool=nn.AvgPool3d,
head_pool_kernel_size=(
cfg.DATA.NUM_FRAMES // pool_size[0][0],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][1],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][2],
),
head_activation=None,
head_output_with_global_average=False,
)
self.post_act = head_act
def forward(self, x, bboxes=None):
x = x[0]
x = self.model(x)
if self.detection_mode:
x = self.detection_head(x, bboxes)
x = self.post_act(x)
else:
# Performs fully convlutional inference.
if not self.training:
x = self.post_act(x)
x = x.mean([2, 3, 4])
x = x.view(x.shape[0], -1)
return x
@MODEL_REGISTRY.register()
class PTVSlowFast(nn.Module):
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(PTVSlowFast, self).__init__()
assert (
cfg.RESNET.STRIDE_1X1 is False
), "STRIDE_1x1 must be True for PTVSlowFast"
assert (
cfg.RESNET.TRANS_FUNC == "bottleneck_transform"
), f"Unsupported TRANS_FUNC type {cfg.RESNET.TRANS_FUNC} for PTVSlowFast"
self.detection_mode = cfg.DETECTION.ENABLE
self._construct_network(cfg)
def _construct_network(self, cfg):
"""
Builds a SlowFast model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
_MODEL_STAGE_DEPTH = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
# Params from configs.
norm_module = get_norm(cfg)
pool_size = _POOL1[cfg.MODEL.ARCH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
spatial_dilations = cfg.RESNET.SPATIAL_DILATIONS
spatial_strides = cfg.RESNET.SPATIAL_STRIDES
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
num_block_temp_kernel = cfg.RESNET.NUM_BLOCK_TEMP_KERNEL
stage_depth = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
stage_conv_a_kernel_sizes = [[], []]
for pathway in range(2):
for stage in range(4):
stage_conv_a_kernel_sizes[pathway].append(
((temp_kernel[stage + 1][pathway][0], 1, 1),)
* num_block_temp_kernel[stage][pathway]
+ ((1, 1, 1),)
* (
stage_depth[stage]
- num_block_temp_kernel[stage][pathway]
)
)
# Head from config
# Number of stages = 4
stage_dim_in = cfg.RESNET.WIDTH_PER_GROUP * 2 ** (4 + 1)
head_in_features = stage_dim_in + stage_dim_in // cfg.SLOWFAST.BETA_INV
if cfg.DETECTION.ENABLE:
self.detection_head = create_res_roi_pooling_head(
in_features=head_in_features,
out_features=cfg.MODEL.NUM_CLASSES,
pool=None,
output_size=(1, 1, 1),
dropout_rate=cfg.MODEL.DROPOUT_RATE,
activation=None,
output_with_global_average=False,
pool_spatial=nn.MaxPool2d,
resolution=[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2,
spatial_scale=1.0 / float(cfg.DETECTION.SPATIAL_SCALE_FACTOR),
sampling_ratio=0,
roi=ROIAlign,
)
head_pool_kernel_sizes = (
(
cfg.DATA.NUM_FRAMES
// cfg.SLOWFAST.ALPHA
// pool_size[0][0],
1,
1,
),
(cfg.DATA.NUM_FRAMES // pool_size[1][0], 1, 1),
)
else:
head_pool_kernel_sizes = (
(
cfg.DATA.NUM_FRAMES
// cfg.SLOWFAST.ALPHA
// pool_size[0][0],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][1],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][2],
),
(
cfg.DATA.NUM_FRAMES // pool_size[1][0],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][1],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][2],
),
)
self.model = create_slowfast(
# SlowFast configs.
slowfast_channel_reduction_ratio=cfg.SLOWFAST.BETA_INV,
slowfast_conv_channel_fusion_ratio=cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
slowfast_fusion_conv_kernel_size=(
cfg.SLOWFAST.FUSION_KERNEL_SZ,
1,
1,
),
slowfast_fusion_conv_stride=(cfg.SLOWFAST.ALPHA, 1, 1),
# Input clip configs.
input_channels=cfg.DATA.INPUT_CHANNEL_NUM,
# Model configs.
model_depth=cfg.RESNET.DEPTH,
model_num_class=cfg.MODEL.NUM_CLASSES,
dropout_rate=cfg.MODEL.DROPOUT_RATE,
# Normalization configs.
norm=norm_module,
# Activation configs.
activation=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),
# Stem configs.
stem_dim_outs=(
width_per_group,
width_per_group // cfg.SLOWFAST.BETA_INV,
),
stem_conv_kernel_sizes=(
(temp_kernel[0][0][0], 7, 7),
(temp_kernel[0][1][0], 7, 7),
),
stem_conv_strides=((1, 2, 2), (1, 2, 2)),
stem_pool=nn.MaxPool3d,
stem_pool_kernel_sizes=((1, 3, 3), (1, 3, 3)),
stem_pool_strides=((1, 2, 2), (1, 2, 2)),
# Stage configs.
stage_conv_a_kernel_sizes=stage_conv_a_kernel_sizes,
stage_conv_b_kernel_sizes=(
((1, 3, 3), (1, 3, 3), (1, 3, 3), (1, 3, 3)),
((1, 3, 3), (1, 3, 3), (1, 3, 3), (1, 3, 3)),
),
stage_conv_b_num_groups=(
(num_groups, num_groups, num_groups, num_groups),
(num_groups, num_groups, num_groups, num_groups),
),
stage_conv_b_dilations=(
(
(1, spatial_dilations[0][0], spatial_dilations[0][0]),
(1, spatial_dilations[1][0], spatial_dilations[1][0]),
(1, spatial_dilations[2][0], spatial_dilations[2][0]),
(1, spatial_dilations[3][0], spatial_dilations[3][0]),
),
(
(1, spatial_dilations[0][1], spatial_dilations[0][1]),
(1, spatial_dilations[1][1], spatial_dilations[1][1]),
(1, spatial_dilations[1][1], spatial_dilations[1][1]),
(1, spatial_dilations[1][1], spatial_dilations[1][1]),
),
),
stage_spatial_strides=(
(
spatial_strides[0][0],
spatial_strides[1][0],
spatial_strides[2][0],
spatial_strides[3][0],
),
(
spatial_strides[0][1],
spatial_strides[1][1],
spatial_strides[2][1],
spatial_strides[3][1],
),
),
stage_temporal_strides=((1, 1, 1, 1), (1, 1, 1, 1)),
bottleneck=create_bottleneck_block,
# Head configs.
head=create_res_basic_head if not self.detection_mode else None,
head_pool=nn.AvgPool3d,
head_pool_kernel_sizes=head_pool_kernel_sizes,
head_activation=None,
head_output_with_global_average=False,
)
self.post_act = get_head_act(cfg.MODEL.HEAD_ACT)
def forward(self, x, bboxes=None):
x = self.model(x)
if self.detection_mode:
x = self.detection_head(x, bboxes)
x = self.post_act(x)
else:
# Performs fully convlutional inference.
if not self.training:
x = self.post_act(x)
x = x.mean([2, 3, 4])
x = x.view(x.shape[0], -1)
return x
@MODEL_REGISTRY.register()
class PTVX3D(nn.Module):
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(PTVX3D, self).__init__()
assert (
cfg.RESNET.STRIDE_1X1 is False
), "STRIDE_1x1 must be True for PTVX3D"
assert (
cfg.RESNET.TRANS_FUNC == "x3d_transform"
), f"Unsupported TRANS_FUNC type {cfg.RESNET.TRANS_FUNC} for PTVX3D"
assert (
cfg.DETECTION.ENABLE is False
), "Detection model is not supported for PTVX3D yet."
self._construct_network(cfg)
def _construct_network(self, cfg):
"""
Builds a X3D model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
# Params from configs.
norm_module = get_norm(cfg)
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.model = create_x3d(
# Input clip configs.
input_channel=cfg.DATA.INPUT_CHANNEL_NUM[0],
input_clip_length=cfg.DATA.NUM_FRAMES,
input_crop_size=cfg.DATA.TRAIN_CROP_SIZE,
# Model configs.
model_num_class=cfg.MODEL.NUM_CLASSES,
dropout_rate=cfg.MODEL.DROPOUT_RATE,
width_factor=cfg.X3D.WIDTH_FACTOR,
depth_factor=cfg.X3D.DEPTH_FACTOR,
# Normalization configs.
norm=norm_module,
norm_eps=1e-5,
norm_momentum=0.1,
# Activation configs.
activation=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),
# Stem configs.
stem_dim_in=cfg.X3D.DIM_C1,
stem_conv_kernel_size=(temp_kernel[0][0][0], 3, 3),
stem_conv_stride=(1, 2, 2),
# Stage configs.
stage_conv_kernel_size=(
(temp_kernel[1][0][0], 3, 3),
(temp_kernel[2][0][0], 3, 3),
(temp_kernel[3][0][0], 3, 3),
(temp_kernel[4][0][0], 3, 3),
),
stage_spatial_stride=(2, 2, 2, 2),
stage_temporal_stride=(1, 1, 1, 1),
bottleneck=create_x3d_bottleneck_block,
bottleneck_factor=cfg.X3D.BOTTLENECK_FACTOR,
se_ratio=0.0625,
inner_act=Swish,
# Head configs.
head_dim_out=cfg.X3D.DIM_C5,
head_pool_act=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),
head_bn_lin5_on=cfg.X3D.BN_LIN5,
head_activation=None,
head_output_with_global_average=False,
)
self.post_act = get_head_act(cfg.MODEL.HEAD_ACT)
def forward(self, x, bboxes=None):
x = x[0]
x = self.model(x)
# Performs fully convlutional inference.
if not self.training:
x = self.post_act(x)
x = x.mean([2, 3, 4])
x = x.reshape(x.shape[0], -1)
return x
@MODEL_REGISTRY.register()
class PTVCSN(nn.Module):
"""
CSN models using PyTorchVideo model builder.
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(PTVCSN, self).__init__()
assert (
cfg.DETECTION.ENABLE is False
), "Detection model is not supported for PTVCSN yet."
self._construct_network(cfg)
def _construct_network(self, cfg):
"""
Builds a single pathway ResNet model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
# Params from configs.
norm_module = get_norm(cfg)
self.model = create_csn(
# Input clip configs.
input_channel=cfg.DATA.INPUT_CHANNEL_NUM[0],
# Model configs.
model_depth=cfg.RESNET.DEPTH,
model_num_class=cfg.MODEL.NUM_CLASSES,
dropout_rate=cfg.MODEL.DROPOUT_RATE,
# Normalization configs.
norm=norm_module,
# Activation configs.
activation=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),
# Stem configs.
stem_dim_out=cfg.RESNET.WIDTH_PER_GROUP,
stem_conv_kernel_size=(3, 7, 7),
stem_conv_stride=(1, 2, 2),
stem_pool=nn.MaxPool3d,
stem_pool_kernel_size=(1, 3, 3),
stem_pool_stride=(1, 2, 2),
# Stage configs.
stage_conv_a_kernel_size=(1, 1, 1),
stage_conv_b_kernel_size=(3, 3, 3),
stage_conv_b_width_per_group=1,
stage_spatial_stride=(1, 2, 2, 2),
stage_temporal_stride=(1, 2, 2, 2),
bottleneck=create_bottleneck_block,
# Head configs.
head_pool=nn.AvgPool3d,
head_pool_kernel_size=(
cfg.DATA.NUM_FRAMES // 8,
cfg.DATA.TRAIN_CROP_SIZE // 32,
cfg.DATA.TRAIN_CROP_SIZE // 32,
),
head_activation=None,
head_output_with_global_average=False,
)
self.post_act = get_head_act(cfg.MODEL.HEAD_ACT)
def forward(self, x, bboxes=None):
x = x[0]
x = self.model(x)
# Performs fully convlutional inference.
if not self.training:
x = self.post_act(x)
x = x.mean([2, 3, 4])
x = x.reshape(x.shape[0], -1)
return x
@MODEL_REGISTRY.register()
class PTVR2plus1D(nn.Module):
"""
R(2+1)D models using PyTorchVideo model builder.
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(PTVR2plus1D, self).__init__()
assert (
cfg.DETECTION.ENABLE is False
), "Detection model is not supported for PTVR2plus1D yet."
self._construct_network(cfg)
def _construct_network(self, cfg):
"""
Builds a single pathway R(2+1)D model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
self.model = create_r2plus1d(
# Input clip configs.
input_channel=cfg.DATA.INPUT_CHANNEL_NUM[0],
# Model configs.
model_depth=cfg.RESNET.DEPTH,
model_num_class=cfg.MODEL.NUM_CLASSES,
dropout_rate=cfg.MODEL.DROPOUT_RATE,
# Normalization configs.
norm=get_norm(cfg),
norm_eps=1e-5,
norm_momentum=0.1,
# Activation configs.
activation=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),
# Stem configs.
stem_dim_out=cfg.RESNET.WIDTH_PER_GROUP,
stem_conv_kernel_size=(1, 7, 7),
stem_conv_stride=(1, 2, 2),
# Stage configs.
stage_conv_a_kernel_size=(
(1, 1, 1),
(1, 1, 1),
(1, 1, 1),
(1, 1, 1),
),
stage_conv_b_kernel_size=(
(3, 3, 3),
(3, 3, 3),
(3, 3, 3),
(3, 3, 3),
),
stage_conv_b_num_groups=(1, 1, 1, 1),
stage_conv_b_dilation=(
(1, 1, 1),
(1, 1, 1),
(1, 1, 1),
(1, 1, 1),
),
stage_spatial_stride=(2, 2, 2, 2),
stage_temporal_stride=(1, 1, 2, 2),
stage_bottleneck=(
create_2plus1d_bottleneck_block,
create_2plus1d_bottleneck_block,
create_2plus1d_bottleneck_block,
create_2plus1d_bottleneck_block,
),
# Head configs.
head_pool=nn.AvgPool3d,
head_pool_kernel_size=(
cfg.DATA.NUM_FRAMES // 4,
cfg.DATA.TRAIN_CROP_SIZE // 32,
cfg.DATA.TRAIN_CROP_SIZE // 32,
),
head_activation=None,
head_output_with_global_average=False,
)
self.post_act = get_head_act(cfg.MODEL.HEAD_ACT)
def forward(self, x, bboxes=None):
x = x[0]
x = self.model(x)
# Performs fully convlutional inference.
if not self.training:
x = self.post_act(x)
x = x.mean([2, 3, 4])
x = x.view(x.shape[0], -1)
return x
@MODEL_REGISTRY.register()
class PTVMViT(nn.Module):
"""
MViT models using PyTorchVideo model builder.
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(PTVMViT, self).__init__()
assert (
cfg.DETECTION.ENABLE is False
), "Detection model is not supported for PTVMViT yet."
self._construct_network(cfg)
def _construct_network(self, cfg):
"""
Builds a MViT model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
self.model = create_multiscale_vision_transformers(
spatial_size=cfg.DATA.TRAIN_CROP_SIZE,
temporal_size=cfg.DATA.NUM_FRAMES,
cls_embed_on=cfg.MVIT.CLS_EMBED_ON,
sep_pos_embed=cfg.MVIT.SEP_POS_EMBED,
depth=cfg.MVIT.DEPTH,
norm=cfg.MVIT.NORM,
# Patch embed config.
input_channels = cfg.DATA.INPUT_CHANNEL_NUM[0],
patch_embed_dim = cfg.MVIT.EMBED_DIM,
conv_patch_embed_kernel = cfg.MVIT.PATCH_KERNEL,
conv_patch_embed_stride = cfg.MVIT.PATCH_STRIDE,
conv_patch_embed_padding = cfg.MVIT.PATCH_PADDING,
enable_patch_embed_norm = cfg.MVIT.NORM_STEM,
use_2d_patch=cfg.MVIT.PATCH_2D,
# Attention block config.
num_heads = cfg.MVIT.NUM_HEADS,
mlp_ratio = cfg.MVIT.MLP_RATIO,
qkv_bias = cfg.MVIT.QKV_BIAS,
dropout_rate_block = cfg.MVIT.DROPOUT_RATE,
droppath_rate_block = cfg.MVIT.DROPPATH_RATE,
pooling_mode = cfg.MVIT.MODE,
pool_first = cfg.MVIT.POOL_FIRST,
embed_dim_mul = cfg.MVIT.DIM_MUL,
atten_head_mul = cfg.MVIT.HEAD_MUL,
pool_q_stride_size = cfg.MVIT.POOL_Q_STRIDE,
pool_kv_stride_size = cfg.MVIT.POOL_KV_STRIDE,
pool_kv_stride_adaptive = cfg.MVIT.POOL_KV_STRIDE_ADAPTIVE,
pool_kvq_kernel = cfg.MVIT.POOL_KVQ_KERNEL,
# Head config.
head_dropout_rate = cfg.MODEL.DROPOUT_RATE,
head_num_classes = cfg.MODEL.NUM_CLASSES,
)
self.post_act = get_head_act(cfg.MODEL.HEAD_ACT)
def forward(self, x, bboxes=None):
x = x[0]
x = self.model(x)
if not self.training:
x = self.post_act(x)
return x
| 26,986 | 33.777062 | 89 | py |
STTS | STTS-main/MViT/slowfast/models/topk.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import einops
from einops import rearrange
from math import sqrt
import time
class PredictorLG(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, embed_dim=384):
super().__init__()
self.in_conv = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, embed_dim),
nn.GELU()
)
self.out_conv = nn.Sequential(
nn.Linear(embed_dim, embed_dim // 2),
nn.GELU(),
nn.Linear(embed_dim // 2, embed_dim // 4),
nn.GELU(),
nn.Linear(embed_dim // 4, 1)
)
def forward(self, x):
x = self.in_conv(x)
B, N, C = x.size()
local_x = x[:,:, :C//2]
global_x = torch.mean(x[:,:, C//2:], dim=1, keepdim=True)
x = torch.cat([local_x, global_x.expand(B, N, C//2)], dim=-1)
return self.out_conv(x)
def HardTopK(k, x):
topk_results = torch.topk(x, k=k, dim=-1, sorted=False)
indices = topk_results.indices # b, k
indices = torch.sort(indices, dim=-1).values
return indices
class PerturbedTopK(nn.Module):
def __init__(self, k: int, num_samples: int = 1000):
super(PerturbedTopK, self).__init__()
self.num_samples = num_samples
self.k = k
def __call__(self, x, sigma):
return PerturbedTopKFunction.apply(x, self.k, self.num_samples, sigma)
class PerturbedTopKFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, k: int, num_samples: int = 1000, sigma: float = 0.05):
b, d = x.shape
# for Gaussian: noise and gradient are the same.
noise = torch.normal(mean=0.0, std=1.0, size=(b, num_samples, d)).to(x.device)
perturbed_x = x[:, None, :] + noise * sigma # b, nS, d
topk_results = torch.topk(perturbed_x, k=k, dim=-1, sorted=False)
indices = topk_results.indices # b, nS, k
indices = torch.sort(indices, dim=-1).values # b, nS, k
perturbed_output = torch.nn.functional.one_hot(indices, num_classes=d).float()
indicators = perturbed_output.mean(dim=1) # b, k, d
# constants for backward
ctx.k = k
ctx.num_samples = num_samples
ctx.sigma = sigma
# tensors for backward
ctx.perturbed_output = perturbed_output
ctx.noise = noise
return indicators
@staticmethod
def backward(ctx, grad_output):
if grad_output is None:
return tuple([None] * 5)
noise_gradient = ctx.noise
if ctx.sigma <= 1e-20:
b, _, k, d = ctx.perturbed_output.size()
expected_gradient = torch.zeros(b, k, d).to(grad_output.device)
else:
expected_gradient = (
torch.einsum("bnkd,bnd->bkd", ctx.perturbed_output, noise_gradient)
/ ctx.num_samples
/ (ctx.sigma)
)
grad_input = torch.einsum("bkd,bkd->bd", grad_output, expected_gradient)
return (grad_input,) + tuple([None] * 5)
def batched_index_select(input, dim, index):
for i in range(1, len(input.shape)):
if i != dim:
index = index.unsqueeze(i)
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.expand(expanse)
return torch.gather(input, dim, index)
def extract_patches_from_indices(x, indices):
batch_size, _, channels = x.shape
k = indices.shape[-1]
patches = x
patches = batched_index_select(patches, 1, indices)
patches = patches.contiguous().view(batch_size, k, channels)
return patches
def extract_patches_from_indicators(x, indicators):
indicators = rearrange(indicators, "b d k -> b k d")
patches = torch.einsum("b k d, b d c -> b k c",
indicators, x)
return patches
def min_max_norm(x):
flatten_score_min = x.min(axis=-1, keepdim=True).values
flatten_score_max = x.max(axis=-1, keepdim=True).values
norm_flatten_score = (x - flatten_score_min) / (flatten_score_max - flatten_score_min + 1e-5)
return norm_flatten_score
class PatchNet(nn.Module):
def __init__(self, score, k, in_channels, stride=None, num_samples=500):
super(PatchNet, self).__init__()
self.k = k
self.anchor_size = int(sqrt(k))
self.stride = stride
self.score = score
self.in_channels = in_channels
self.num_samples = num_samples
if score == 'tpool':
self.score_network = PredictorLG(embed_dim=2*in_channels)
elif score == 'spatch':
self.score_network = PredictorLG(embed_dim=in_channels)
self.init = torch.eye(self.k).unsqueeze(0).unsqueeze(-1).cuda()
def get_indicator(self, scores, k, sigma):
indicator = PerturbedTopKFunction.apply(scores, k, self.num_samples, sigma)
indicator = einops.rearrange(indicator, "b k d -> b d k")
return indicator
def get_indices(self, scores, k):
indices = HardTopK(k, scores)
return indices
def generate_random_indices(self, b, n, k):
indices = []
for _ in range(b):
indice = np.sort(np.random.choice(n, k, replace=False))
indices.append(indice)
indices = np.vstack(indices)
indices = torch.Tensor(indices).long().cuda()
return indices
def generate_uniform_indices(self, b, n, k):
indices = torch.linspace(0, n-1, steps=k).long()
indices = indices.unsqueeze(0).cuda()
indices = indices.repeat(b, 1)
return indices
def forward(self, x, type, N, T, sigma):
B = x.size(0)
H = W = int(sqrt(N))
indicator = None
indices = None
if type == 'time':
if self.score == 'tpool':
x = rearrange(x, 'b (t n) m -> b t n m', t=T)
avg = torch.mean(x, dim=2, keepdim=False)
max_ = torch.max(x, dim=2).values
x_ = torch.cat((avg, max_), dim=2)
scores = self.score_network(x_).squeeze(-1)
scores = min_max_norm(scores)
if self.training:
indicator = self.get_indicator(scores, self.k, sigma)
else:
indices = self.get_indices(scores, self.k)
x = rearrange(x, 'b t n m -> b t (n m)')
else:
s = self.stride if self.stride is not None else int(max((H - self.anchor_size) // 2, 1))
if self.score == 'spatch':
x = rearrange(x, 'b (t n) c -> (b t) n c', t=T)
scores = self.score_network(x)
scores = rearrange(scores, '(b t) (h w) c -> (b t) c h w', b=B, h=H)
scores = F.unfold(scores, kernel_size=self.anchor_size, stride=s)
scores = scores.mean(dim=1)
scores = min_max_norm(scores)
x = rearrange(x, '(b t) (h w) c -> (b t) c h w', b=B, h=H)
x = F.unfold(x, kernel_size=self.anchor_size, stride=s).permute(0, 2, 1).contiguous()
if self.training:
indicator = self.get_indicator(scores, 1, sigma)
else:
indices = self.get_indices(scores, 1)
if self.training:
if indicator is not None:
patches = extract_patches_from_indicators(x, indicator)
elif indices is not None:
patches = extract_patches_from_indices(x, indices)
if type == 'time':
patches = rearrange(patches, 'b k (n c) -> b (k n) c', n = N)
elif self.score == 'spatch':
patches = rearrange(patches, '(b t) k (c kh kw) -> b (t k kh kw) c',
b=B, c=self.in_channels, kh=self.anchor_size)
return patches
else:
patches = extract_patches_from_indices(x, indices)
if type == 'time':
patches = rearrange(patches, 'b k (n c) -> b (k n) c', n = N)
elif self.score == 'spatch':
patches = rearrange(patches, '(b t) k (c kh kw) -> b (t k kh kw) c',
b=B, c=self.in_channels, kh=self.anchor_size)
return patches
| 8,512 | 32.916335 | 101 | py |
STTS | STTS-main/MViT/slowfast/models/utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from logging import NOTSET
import slowfast.utils.logging as logging
import numpy as np
import torch.nn.functional as F
from einops import rearrange
logger = logging.get_logger(__name__)
def round_width(width, multiplier, min_width=1, divisor=1, verbose=False):
if not multiplier:
return width
width *= multiplier
min_width = min_width or divisor
if verbose:
logger.info(f"min width {min_width}")
logger.info(f"width {width} divisor {divisor}")
logger.info(f"other {int(width + divisor / 2) // divisor * divisor}")
width_out = max(min_width, int(width + divisor / 2) // divisor * divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def validate_checkpoint_wrapper_import(checkpoint_wrapper):
"""
Check if checkpoint_wrapper is imported.
"""
if checkpoint_wrapper is None:
raise ImportError("Please install fairscale.")
| 1,017 | 28.941176 | 77 | py |
STTS | STTS-main/MViT/slowfast/models/nonlocal_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Non-local helper"""
import torch
import torch.nn as nn
class Nonlocal(nn.Module):
"""
Builds Non-local Neural Networks as a generic family of building
blocks for capturing long-range dependencies. Non-local Network
computes the response at a position as a weighted sum of the
features at all positions. This building block can be plugged into
many computer vision architectures.
More details in the paper: https://arxiv.org/pdf/1711.07971.pdf
"""
def __init__(
self,
dim,
dim_inner,
pool_size=None,
instantiation="softmax",
zero_init_final_conv=False,
zero_init_final_norm=True,
norm_eps=1e-5,
norm_momentum=0.1,
norm_module=nn.BatchNorm3d,
):
"""
Args:
dim (int): number of dimension for the input.
dim_inner (int): number of dimension inside of the Non-local block.
pool_size (list): the kernel size of spatial temporal pooling,
temporal pool kernel size, spatial pool kernel size, spatial
pool kernel size in order. By default pool_size is None,
then there would be no pooling used.
instantiation (string): supports two different instantiation method:
"dot_product": normalizing correlation matrix with L2.
"softmax": normalizing correlation matrix with Softmax.
zero_init_final_conv (bool): If true, zero initializing the final
convolution of the Non-local block.
zero_init_final_norm (bool):
If true, zero initializing the final batch norm of the Non-local
block.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(Nonlocal, self).__init__()
self.dim = dim
self.dim_inner = dim_inner
self.pool_size = pool_size
self.instantiation = instantiation
self.use_pool = (
False
if pool_size is None
else any((size > 1 for size in pool_size))
)
self.norm_eps = norm_eps
self.norm_momentum = norm_momentum
self._construct_nonlocal(
zero_init_final_conv, zero_init_final_norm, norm_module
)
def _construct_nonlocal(
self, zero_init_final_conv, zero_init_final_norm, norm_module
):
# Three convolution heads: theta, phi, and g.
self.conv_theta = nn.Conv3d(
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
)
self.conv_phi = nn.Conv3d(
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
)
self.conv_g = nn.Conv3d(
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
)
# Final convolution output.
self.conv_out = nn.Conv3d(
self.dim_inner, self.dim, kernel_size=1, stride=1, padding=0
)
# Zero initializing the final convolution output.
self.conv_out.zero_init = zero_init_final_conv
# TODO: change the name to `norm`
self.bn = norm_module(
num_features=self.dim,
eps=self.norm_eps,
momentum=self.norm_momentum,
)
# Zero initializing the final bn.
self.bn.transform_final_bn = zero_init_final_norm
# Optional to add the spatial-temporal pooling.
if self.use_pool:
self.pool = nn.MaxPool3d(
kernel_size=self.pool_size,
stride=self.pool_size,
padding=[0, 0, 0],
)
def forward(self, x):
x_identity = x
N, C, T, H, W = x.size()
theta = self.conv_theta(x)
# Perform temporal-spatial pooling to reduce the computation.
if self.use_pool:
x = self.pool(x)
phi = self.conv_phi(x)
g = self.conv_g(x)
theta = theta.view(N, self.dim_inner, -1)
phi = phi.view(N, self.dim_inner, -1)
g = g.view(N, self.dim_inner, -1)
# (N, C, TxHxW) * (N, C, TxHxW) => (N, TxHxW, TxHxW).
theta_phi = torch.einsum("nct,ncp->ntp", (theta, phi))
# For original Non-local paper, there are two main ways to normalize
# the affinity tensor:
# 1) Softmax normalization (norm on exp).
# 2) dot_product normalization.
if self.instantiation == "softmax":
# Normalizing the affinity tensor theta_phi before softmax.
theta_phi = theta_phi * (self.dim_inner ** -0.5)
theta_phi = nn.functional.softmax(theta_phi, dim=2)
elif self.instantiation == "dot_product":
spatial_temporal_dim = theta_phi.shape[2]
theta_phi = theta_phi / spatial_temporal_dim
else:
raise NotImplementedError(
"Unknown norm type {}".format(self.instantiation)
)
# (N, TxHxW, TxHxW) * (N, C, TxHxW) => (N, C, TxHxW).
theta_phi_g = torch.einsum("ntg,ncg->nct", (theta_phi, g))
# (N, C, TxHxW) => (N, C, T, H, W).
theta_phi_g = theta_phi_g.view(N, self.dim_inner, T, H, W)
p = self.conv_out(theta_phi_g)
p = self.bn(p)
return x_identity + p
| 5,418 | 35.369128 | 80 | py |
STTS | STTS-main/MViT/slowfast/models/video_model_builder.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Video models."""
import math
from functools import partial
import torch
import torch.nn as nn
from torch.nn.init import trunc_normal_
import slowfast.utils.weight_init_helper as init_helper
from slowfast.models.attention import MultiScaleBlock
from slowfast.models.batchnorm_helper import get_norm
from slowfast.models.stem_helper import PatchEmbed
from slowfast.models.utils import round_width, validate_checkpoint_wrapper_import
from . import head_helper, resnet_helper, stem_helper
from .build import MODEL_REGISTRY
try:
from fairscale.nn.checkpoint import checkpoint_wrapper
except ImportError:
checkpoint_wrapper = None
# Number of blocks for different stages given the model depth.
_MODEL_STAGE_DEPTH = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
# Basis of temporal kernel sizes for each of the stage.
_TEMPORAL_KERNEL_BASIS = {
"2d": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[1]], # res4 temporal kernel.
[[1]], # res5 temporal kernel.
],
"c2d": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[1]], # res4 temporal kernel.
[[1]], # res5 temporal kernel.
],
"c2d_nopool": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[1]], # res4 temporal kernel.
[[1]], # res5 temporal kernel.
],
"i3d": [
[[5]], # conv1 temporal kernel.
[[3]], # res2 temporal kernel.
[[3, 1]], # res3 temporal kernel.
[[3, 1]], # res4 temporal kernel.
[[1, 3]], # res5 temporal kernel.
],
"i3d_nopool": [
[[5]], # conv1 temporal kernel.
[[3]], # res2 temporal kernel.
[[3, 1]], # res3 temporal kernel.
[[3, 1]], # res4 temporal kernel.
[[1, 3]], # res5 temporal kernel.
],
"slow": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[3]], # res4 temporal kernel.
[[3]], # res5 temporal kernel.
],
"slowfast": [
[[1], [5]], # conv1 temporal kernel for slow and fast pathway.
[[1], [3]], # res2 temporal kernel for slow and fast pathway.
[[1], [3]], # res3 temporal kernel for slow and fast pathway.
[[3], [3]], # res4 temporal kernel for slow and fast pathway.
[[3], [3]], # res5 temporal kernel for slow and fast pathway.
],
"x3d": [
[[5]], # conv1 temporal kernels.
[[3]], # res2 temporal kernels.
[[3]], # res3 temporal kernels.
[[3]], # res4 temporal kernels.
[[3]], # res5 temporal kernels.
],
}
_POOL1 = {
"2d": [[1, 1, 1]],
"c2d": [[2, 1, 1]],
"c2d_nopool": [[1, 1, 1]],
"i3d": [[2, 1, 1]],
"i3d_nopool": [[1, 1, 1]],
"slow": [[1, 1, 1]],
"slowfast": [[1, 1, 1], [1, 1, 1]],
"x3d": [[1, 1, 1]],
}
class FuseFastToSlow(nn.Module):
"""
Fuses the information from the Fast pathway to the Slow pathway. Given the
tensors from Slow pathway and Fast pathway, fuse information from Fast to
Slow, then return the fused tensors from Slow and Fast pathway in order.
"""
def __init__(
self,
dim_in,
fusion_conv_channel_ratio,
fusion_kernel,
alpha,
eps=1e-5,
bn_mmt=0.1,
inplace_relu=True,
norm_module=nn.BatchNorm3d,
):
"""
Args:
dim_in (int): the channel dimension of the input.
fusion_conv_channel_ratio (int): channel ratio for the convolution
used to fuse from Fast pathway to Slow pathway.
fusion_kernel (int): kernel size of the convolution used to fuse
from Fast pathway to Slow pathway.
alpha (int): the frame rate ratio between the Fast and Slow pathway.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(FuseFastToSlow, self).__init__()
self.conv_f2s = nn.Conv3d(
dim_in,
dim_in * fusion_conv_channel_ratio,
kernel_size=[fusion_kernel, 1, 1],
stride=[alpha, 1, 1],
padding=[fusion_kernel // 2, 0, 0],
bias=False,
)
self.bn = norm_module(
num_features=dim_in * fusion_conv_channel_ratio,
eps=eps,
momentum=bn_mmt,
)
self.relu = nn.ReLU(inplace_relu)
def forward(self, x):
x_s = x[0]
x_f = x[1]
fuse = self.conv_f2s(x_f)
fuse = self.bn(fuse)
fuse = self.relu(fuse)
x_s_fuse = torch.cat([x_s, fuse], 1)
return [x_s_fuse, x_f]
@MODEL_REGISTRY.register()
class SlowFast(nn.Module):
"""
SlowFast model builder for SlowFast network.
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(SlowFast, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = cfg.DETECTION.ENABLE
self.num_pathways = 2
self._construct_network(cfg)
init_helper.init_weights(
self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN
)
def _construct_network(self, cfg):
"""
Builds a SlowFast model. The first pathway is the Slow pathway and the
second pathway is the Fast pathway.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
pool_size = _POOL1[cfg.MODEL.ARCH]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
out_dim_ratio = (
cfg.SLOWFAST.BETA_INV // cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO
)
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(
dim_in=cfg.DATA.INPUT_CHANNEL_NUM,
dim_out=[width_per_group, width_per_group // cfg.SLOWFAST.BETA_INV],
kernel=[temp_kernel[0][0] + [7, 7], temp_kernel[0][1] + [7, 7]],
stride=[[1, 2, 2]] * 2,
padding=[
[temp_kernel[0][0][0] // 2, 3, 3],
[temp_kernel[0][1][0] // 2, 3, 3],
],
norm_module=self.norm_module,
)
self.s1_fuse = FuseFastToSlow(
width_per_group // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s2 = resnet_helper.ResStage(
dim_in=[
width_per_group + width_per_group // out_dim_ratio,
width_per_group // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 4,
width_per_group * 4 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner, dim_inner // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[1],
stride=cfg.RESNET.SPATIAL_STRIDES[0],
num_blocks=[d2] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0],
nonlocal_inds=cfg.NONLOCAL.LOCATION[0],
nonlocal_group=cfg.NONLOCAL.GROUP[0],
nonlocal_pool=cfg.NONLOCAL.POOL[0],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[0],
norm_module=self.norm_module,
)
self.s2_fuse = FuseFastToSlow(
width_per_group * 4 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(
kernel_size=pool_size[pathway],
stride=pool_size[pathway],
padding=[0, 0, 0],
)
self.add_module("pathway{}_pool".format(pathway), pool)
self.s3 = resnet_helper.ResStage(
dim_in=[
width_per_group * 4 + width_per_group * 4 // out_dim_ratio,
width_per_group * 4 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 8,
width_per_group * 8 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner * 2, dim_inner * 2 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[2],
stride=cfg.RESNET.SPATIAL_STRIDES[1],
num_blocks=[d3] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1],
nonlocal_inds=cfg.NONLOCAL.LOCATION[1],
nonlocal_group=cfg.NONLOCAL.GROUP[1],
nonlocal_pool=cfg.NONLOCAL.POOL[1],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[1],
norm_module=self.norm_module,
)
self.s3_fuse = FuseFastToSlow(
width_per_group * 8 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s4 = resnet_helper.ResStage(
dim_in=[
width_per_group * 8 + width_per_group * 8 // out_dim_ratio,
width_per_group * 8 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 16,
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner * 4, dim_inner * 4 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[3],
stride=cfg.RESNET.SPATIAL_STRIDES[2],
num_blocks=[d4] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2],
nonlocal_inds=cfg.NONLOCAL.LOCATION[2],
nonlocal_group=cfg.NONLOCAL.GROUP[2],
nonlocal_pool=cfg.NONLOCAL.POOL[2],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[2],
norm_module=self.norm_module,
)
self.s4_fuse = FuseFastToSlow(
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s5 = resnet_helper.ResStage(
dim_in=[
width_per_group * 16 + width_per_group * 16 // out_dim_ratio,
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner * 8, dim_inner * 8 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[4],
stride=cfg.RESNET.SPATIAL_STRIDES[3],
num_blocks=[d5] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3],
nonlocal_inds=cfg.NONLOCAL.LOCATION[3],
nonlocal_group=cfg.NONLOCAL.GROUP[3],
nonlocal_pool=cfg.NONLOCAL.POOL[3],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[3],
norm_module=self.norm_module,
)
if cfg.DETECTION.ENABLE:
self.head = head_helper.ResNetRoIHead(
dim_in=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
num_classes=cfg.MODEL.NUM_CLASSES,
pool_size=[
[
cfg.DATA.NUM_FRAMES
// cfg.SLOWFAST.ALPHA
// pool_size[0][0],
1,
1,
],
[cfg.DATA.NUM_FRAMES // pool_size[1][0], 1, 1],
],
resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2] * 2,
scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR] * 2,
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
aligned=cfg.DETECTION.ALIGNED,
)
else:
self.head = head_helper.ResNetBasicHead(
dim_in=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
num_classes=cfg.MODEL.NUM_CLASSES,
pool_size=[None, None]
if cfg.MULTIGRID.SHORT_CYCLE
else [
[
cfg.DATA.NUM_FRAMES
// cfg.SLOWFAST.ALPHA
// pool_size[0][0],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][1],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][2],
],
[
cfg.DATA.NUM_FRAMES // pool_size[1][0],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][1],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][2],
],
], # None for AdaptiveAvgPool3d((1, 1, 1))
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
)
def forward(self, x, bboxes=None):
x = self.s1(x)
x = self.s1_fuse(x)
x = self.s2(x)
x = self.s2_fuse(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s3_fuse(x)
x = self.s4(x)
x = self.s4_fuse(x)
x = self.s5(x)
if self.enable_detection:
x = self.head(x, bboxes)
else:
x = self.head(x)
return x
@MODEL_REGISTRY.register()
class ResNet(nn.Module):
"""
ResNet model builder. It builds a ResNet like network backbone without
lateral connection (C2D, I3D, Slow).
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He.
"Non-local neural networks."
https://arxiv.org/pdf/1711.07971.pdf
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(ResNet, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = cfg.DETECTION.ENABLE
self.num_pathways = 1
self._construct_network(cfg)
init_helper.init_weights(
self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN
)
def _construct_network(self, cfg):
"""
Builds a single pathway ResNet model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
pool_size = _POOL1[cfg.MODEL.ARCH]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
s1 = stem_helper.VideoModelStem(
dim_in=cfg.DATA.INPUT_CHANNEL_NUM,
dim_out=[width_per_group],
kernel=[temp_kernel[0][0] + [7, 7]],
stride=[[1, 2, 2]],
padding=[[temp_kernel[0][0][0] // 2, 3, 3]],
norm_module=self.norm_module,
)
s2 = resnet_helper.ResStage(
dim_in=[width_per_group],
dim_out=[width_per_group * 4],
dim_inner=[dim_inner],
temp_kernel_sizes=temp_kernel[1],
stride=cfg.RESNET.SPATIAL_STRIDES[0],
num_blocks=[d2],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0],
nonlocal_inds=cfg.NONLOCAL.LOCATION[0],
nonlocal_group=cfg.NONLOCAL.GROUP[0],
nonlocal_pool=cfg.NONLOCAL.POOL[0],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[0],
norm_module=self.norm_module,
)
# Based on profiling data of activation size, s1 and s2 have the activation sizes
# that are 4X larger than the second largest. Therefore, checkpointing them gives
# best memory savings. Further tuning is possible for better memory saving and tradeoffs
# with recomputing FLOPs.
if cfg.MODEL.ACT_CHECKPOINT:
validate_checkpoint_wrapper_import(checkpoint_wrapper)
self.s1 = checkpoint_wrapper(s1)
self.s2 = checkpoint_wrapper(s2)
else:
self.s1 = s1
self.s2 = s2
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(
kernel_size=pool_size[pathway],
stride=pool_size[pathway],
padding=[0, 0, 0],
)
self.add_module("pathway{}_pool".format(pathway), pool)
self.s3 = resnet_helper.ResStage(
dim_in=[width_per_group * 4],
dim_out=[width_per_group * 8],
dim_inner=[dim_inner * 2],
temp_kernel_sizes=temp_kernel[2],
stride=cfg.RESNET.SPATIAL_STRIDES[1],
num_blocks=[d3],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1],
nonlocal_inds=cfg.NONLOCAL.LOCATION[1],
nonlocal_group=cfg.NONLOCAL.GROUP[1],
nonlocal_pool=cfg.NONLOCAL.POOL[1],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[1],
norm_module=self.norm_module,
)
self.s4 = resnet_helper.ResStage(
dim_in=[width_per_group * 8],
dim_out=[width_per_group * 16],
dim_inner=[dim_inner * 4],
temp_kernel_sizes=temp_kernel[3],
stride=cfg.RESNET.SPATIAL_STRIDES[2],
num_blocks=[d4],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2],
nonlocal_inds=cfg.NONLOCAL.LOCATION[2],
nonlocal_group=cfg.NONLOCAL.GROUP[2],
nonlocal_pool=cfg.NONLOCAL.POOL[2],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[2],
norm_module=self.norm_module,
)
self.s5 = resnet_helper.ResStage(
dim_in=[width_per_group * 16],
dim_out=[width_per_group * 32],
dim_inner=[dim_inner * 8],
temp_kernel_sizes=temp_kernel[4],
stride=cfg.RESNET.SPATIAL_STRIDES[3],
num_blocks=[d5],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3],
nonlocal_inds=cfg.NONLOCAL.LOCATION[3],
nonlocal_group=cfg.NONLOCAL.GROUP[3],
nonlocal_pool=cfg.NONLOCAL.POOL[3],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[3],
norm_module=self.norm_module,
)
if self.enable_detection:
self.head = head_helper.ResNetRoIHead(
dim_in=[width_per_group * 32],
num_classes=cfg.MODEL.NUM_CLASSES,
pool_size=[[cfg.DATA.NUM_FRAMES // pool_size[0][0], 1, 1]],
resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2],
scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR],
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
aligned=cfg.DETECTION.ALIGNED,
)
else:
self.head = head_helper.ResNetBasicHead(
dim_in=[width_per_group * 32],
num_classes=cfg.MODEL.NUM_CLASSES,
pool_size=[None, None]
if cfg.MULTIGRID.SHORT_CYCLE
else [
[
cfg.DATA.NUM_FRAMES // pool_size[0][0],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][1],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][2],
]
], # None for AdaptiveAvgPool3d((1, 1, 1))
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
)
def forward(self, x, bboxes=None):
x = self.s1(x)
x = self.s2(x)
y = [] # Don't modify x list in place due to activation checkpoint.
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
y.append(pool(x[pathway]))
x = self.s3(y)
x = self.s4(x)
x = self.s5(x)
if self.enable_detection:
x = self.head(x, bboxes)
else:
x = self.head(x)
return x
@MODEL_REGISTRY.register()
class X3D(nn.Module):
"""
X3D model builder. It builds a X3D network backbone, which is a ResNet.
Christoph Feichtenhofer.
"X3D: Expanding Architectures for Efficient Video Recognition."
https://arxiv.org/abs/2004.04730
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(X3D, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = cfg.DETECTION.ENABLE
self.num_pathways = 1
exp_stage = 2.0
self.dim_c1 = cfg.X3D.DIM_C1
self.dim_res2 = (
round_width(self.dim_c1, exp_stage, divisor=8)
if cfg.X3D.SCALE_RES2
else self.dim_c1
)
self.dim_res3 = round_width(self.dim_res2, exp_stage, divisor=8)
self.dim_res4 = round_width(self.dim_res3, exp_stage, divisor=8)
self.dim_res5 = round_width(self.dim_res4, exp_stage, divisor=8)
self.block_basis = [
# blocks, c, stride
[1, self.dim_res2, 2],
[2, self.dim_res3, 2],
[5, self.dim_res4, 2],
[3, self.dim_res5, 2],
]
self._construct_network(cfg)
init_helper.init_weights(
self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN
)
def _round_repeats(self, repeats, multiplier):
"""Round number of layers based on depth multiplier."""
multiplier = multiplier
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def _construct_network(self, cfg):
"""
Builds a single pathway X3D model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
w_mul = cfg.X3D.WIDTH_FACTOR
d_mul = cfg.X3D.DEPTH_FACTOR
dim_res1 = round_width(self.dim_c1, w_mul)
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(
dim_in=cfg.DATA.INPUT_CHANNEL_NUM,
dim_out=[dim_res1],
kernel=[temp_kernel[0][0] + [3, 3]],
stride=[[1, 2, 2]],
padding=[[temp_kernel[0][0][0] // 2, 1, 1]],
norm_module=self.norm_module,
stem_func_name="x3d_stem",
)
# blob_in = s1
dim_in = dim_res1
for stage, block in enumerate(self.block_basis):
dim_out = round_width(block[1], w_mul)
dim_inner = int(cfg.X3D.BOTTLENECK_FACTOR * dim_out)
n_rep = self._round_repeats(block[0], d_mul)
prefix = "s{}".format(
stage + 2
) # start w res2 to follow convention
s = resnet_helper.ResStage(
dim_in=[dim_in],
dim_out=[dim_out],
dim_inner=[dim_inner],
temp_kernel_sizes=temp_kernel[1],
stride=[block[2]],
num_blocks=[n_rep],
num_groups=[dim_inner]
if cfg.X3D.CHANNELWISE_3x3x3
else [num_groups],
num_block_temp_kernel=[n_rep],
nonlocal_inds=cfg.NONLOCAL.LOCATION[0],
nonlocal_group=cfg.NONLOCAL.GROUP[0],
nonlocal_pool=cfg.NONLOCAL.POOL[0],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
norm_module=self.norm_module,
dilation=cfg.RESNET.SPATIAL_DILATIONS[stage],
drop_connect_rate=cfg.MODEL.DROPCONNECT_RATE
* (stage + 2)
/ (len(self.block_basis) + 1),
)
dim_in = dim_out
self.add_module(prefix, s)
if self.enable_detection:
NotImplementedError
else:
spat_sz = int(math.ceil(cfg.DATA.TRAIN_CROP_SIZE / 32.0))
self.head = head_helper.X3DHead(
dim_in=dim_out,
dim_inner=dim_inner,
dim_out=cfg.X3D.DIM_C5,
num_classes=cfg.MODEL.NUM_CLASSES,
pool_size=[cfg.DATA.NUM_FRAMES, spat_sz, spat_sz],
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
bn_lin5_on=cfg.X3D.BN_LIN5,
)
def forward(self, x, bboxes=None):
for module in self.children():
x = module(x)
return x
| 28,931 | 36.044814 | 96 | py |
STTS | STTS-main/MViT/slowfast/models/common.py | # Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop_rate=0.0,
):
super().__init__()
self.drop_rate = drop_rate
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
if self.drop_rate > 0.0:
self.drop = nn.Dropout(drop_rate)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
if self.drop_rate > 0.0:
x = self.drop(x)
x = self.fc2(x)
if self.drop_rate > 0.0:
x = self.drop(x)
return x
class Permute(nn.Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def forward(self, x):
return x.permute(*self.dims)
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""
Stochastic Depth per sample.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
mask = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
mask.floor_() # binarize
output = x.div(keep_prob) * mask
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
| 1,900 | 25.774648 | 99 | py |
STTS | STTS-main/MViT/slowfast/models/head_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""ResNe(X)t Head helper."""
import torch
import torch.nn as nn
from detectron2.layers import ROIAlign
class ResNetRoIHead(nn.Module):
"""
ResNe(X)t RoI head.
"""
def __init__(
self,
dim_in,
num_classes,
pool_size,
resolution,
scale_factor,
dropout_rate=0.0,
act_func="softmax",
aligned=True,
):
"""
The `__init__` method of any subclass should also contain these
arguments.
ResNetRoIHead takes p pathways as input where p in [1, infty].
Args:
dim_in (list): the list of channel dimensions of the p inputs to the
ResNetHead.
num_classes (int): the channel dimensions of the p outputs to the
ResNetHead.
pool_size (list): the list of kernel sizes of p spatial temporal
poolings, temporal pool kernel size, spatial pool kernel size,
spatial pool kernel size in order.
resolution (list): the list of spatial output size from the ROIAlign.
scale_factor (list): the list of ratio to the input boxes by this
number.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
aligned (bool): if False, use the legacy implementation. If True,
align the results more perfectly.
Note:
Given a continuous coordinate c, its two neighboring pixel indices
(in our pixel model) are computed by floor (c - 0.5) and ceil
(c - 0.5). For example, c=1.3 has pixel neighbors with discrete
indices [0] and [1] (which are sampled from the underlying signal at
continuous coordinates 0.5 and 1.5). But the original roi_align
(aligned=False) does not subtract the 0.5 when computing neighboring
pixel indices and therefore it uses pixels with a slightly incorrect
alignment (relative to our pixel model) when performing bilinear
interpolation.
With `aligned=True`, we first appropriately scale the ROI and then
shift it by -0.5 prior to calling roi_align. This produces the
correct neighbors; It makes negligible differences to the model's
performance if ROIAlign is used together with conv layers.
"""
super(ResNetRoIHead, self).__init__()
assert (
len({len(pool_size), len(dim_in)}) == 1
), "pathway dimensions are not consistent."
self.num_pathways = len(pool_size)
for pathway in range(self.num_pathways):
temporal_pool = nn.AvgPool3d(
[pool_size[pathway][0], 1, 1], stride=1
)
self.add_module("s{}_tpool".format(pathway), temporal_pool)
roi_align = ROIAlign(
resolution[pathway],
spatial_scale=1.0 / scale_factor[pathway],
sampling_ratio=0,
aligned=aligned,
)
self.add_module("s{}_roi".format(pathway), roi_align)
spatial_pool = nn.MaxPool2d(resolution[pathway], stride=1)
self.add_module("s{}_spool".format(pathway), spatial_pool)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
# Perform FC in a fully convolutional manner. The FC layer will be
# initialized with a different std comparing to convolutional layers.
self.projection = nn.Linear(sum(dim_in), num_classes, bias=True)
# Softmax for evaluation and testing.
if act_func == "softmax":
self.act = nn.Softmax(dim=1)
elif act_func == "sigmoid":
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as an activation"
"function.".format(act_func)
)
def forward(self, inputs, bboxes):
assert (
len(inputs) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
pool_out = []
for pathway in range(self.num_pathways):
t_pool = getattr(self, "s{}_tpool".format(pathway))
out = t_pool(inputs[pathway])
assert out.shape[2] == 1
out = torch.squeeze(out, 2)
roi_align = getattr(self, "s{}_roi".format(pathway))
out = roi_align(out, bboxes)
s_pool = getattr(self, "s{}_spool".format(pathway))
pool_out.append(s_pool(out))
# B C H W.
x = torch.cat(pool_out, 1)
# Perform dropout.
if hasattr(self, "dropout"):
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = self.projection(x)
x = self.act(x)
return x
class ResNetBasicHead(nn.Module):
"""
ResNe(X)t 3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(
self,
dim_in,
num_classes,
pool_size,
dropout_rate=0.0,
act_func="softmax",
):
"""
The `__init__` method of any subclass should also contain these
arguments.
ResNetBasicHead takes p pathways as input where p in [1, infty].
Args:
dim_in (list): the list of channel dimensions of the p inputs to the
ResNetHead.
num_classes (int): the channel dimensions of the p outputs to the
ResNetHead.
pool_size (list): the list of kernel sizes of p spatial temporal
poolings, temporal pool kernel size, spatial pool kernel size,
spatial pool kernel size in order.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
"""
super(ResNetBasicHead, self).__init__()
assert (
len({len(pool_size), len(dim_in)}) == 1
), "pathway dimensions are not consistent."
self.num_pathways = len(pool_size)
for pathway in range(self.num_pathways):
if pool_size[pathway] is None:
avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
avg_pool = nn.AvgPool3d(pool_size[pathway], stride=1)
self.add_module("pathway{}_avgpool".format(pathway), avg_pool)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
# Perform FC in a fully convolutional manner. The FC layer will be
# initialized with a different std comparing to convolutional layers.
self.projection = nn.Linear(sum(dim_in), num_classes, bias=True)
# Softmax for evaluation and testing.
if act_func == "softmax":
self.act = nn.Softmax(dim=4)
elif act_func == "sigmoid":
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as an activation"
"function.".format(act_func)
)
def forward(self, inputs):
assert (
len(inputs) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
pool_out = []
for pathway in range(self.num_pathways):
m = getattr(self, "pathway{}_avgpool".format(pathway))
pool_out.append(m(inputs[pathway]))
x = torch.cat(pool_out, 1)
# (N, C, T, H, W) -> (N, T, H, W, C).
x = x.permute((0, 2, 3, 4, 1))
# Perform dropout.
if hasattr(self, "dropout"):
x = self.dropout(x)
x = self.projection(x)
# Performs fully convlutional inference.
if not self.training:
x = self.act(x)
x = x.mean([1, 2, 3])
x = x.view(x.shape[0], -1)
return x
class X3DHead(nn.Module):
"""
X3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(
self,
dim_in,
dim_inner,
dim_out,
num_classes,
pool_size,
dropout_rate=0.0,
act_func="softmax",
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
bn_lin5_on=False,
):
"""
The `__init__` method of any subclass should also contain these
arguments.
X3DHead takes a 5-dim feature tensor (BxCxTxHxW) as input.
Args:
dim_in (float): the channel dimension C of the input.
num_classes (int): the channel dimensions of the output.
pool_size (float): a single entry list of kernel size for
spatiotemporal pooling for the TxHxW dimensions.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
bn_lin5_on (bool): if True, perform normalization on the features
before the classifier.
"""
super(X3DHead, self).__init__()
self.pool_size = pool_size
self.dropout_rate = dropout_rate
self.num_classes = num_classes
self.act_func = act_func
self.eps = eps
self.bn_mmt = bn_mmt
self.inplace_relu = inplace_relu
self.bn_lin5_on = bn_lin5_on
self._construct_head(dim_in, dim_inner, dim_out, norm_module)
def _construct_head(self, dim_in, dim_inner, dim_out, norm_module):
self.conv_5 = nn.Conv3d(
dim_in,
dim_inner,
kernel_size=(1, 1, 1),
stride=(1, 1, 1),
padding=(0, 0, 0),
bias=False,
)
self.conv_5_bn = norm_module(
num_features=dim_inner, eps=self.eps, momentum=self.bn_mmt
)
self.conv_5_relu = nn.ReLU(self.inplace_relu)
if self.pool_size is None:
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = nn.AvgPool3d(self.pool_size, stride=1)
self.lin_5 = nn.Conv3d(
dim_inner,
dim_out,
kernel_size=(1, 1, 1),
stride=(1, 1, 1),
padding=(0, 0, 0),
bias=False,
)
if self.bn_lin5_on:
self.lin_5_bn = norm_module(
num_features=dim_out, eps=self.eps, momentum=self.bn_mmt
)
self.lin_5_relu = nn.ReLU(self.inplace_relu)
if self.dropout_rate > 0.0:
self.dropout = nn.Dropout(self.dropout_rate)
# Perform FC in a fully convolutional manner. The FC layer will be
# initialized with a different std comparing to convolutional layers.
self.projection = nn.Linear(dim_out, self.num_classes, bias=True)
# Softmax for evaluation and testing.
if self.act_func == "softmax":
self.act = nn.Softmax(dim=4)
elif self.act_func == "sigmoid":
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as an activation"
"function.".format(self.act_func)
)
def forward(self, inputs):
# In its current design the X3D head is only useable for a single
# pathway input.
assert len(inputs) == 1, "Input tensor does not contain 1 pathway"
x = self.conv_5(inputs[0])
x = self.conv_5_bn(x)
x = self.conv_5_relu(x)
x = self.avg_pool(x)
x = self.lin_5(x)
if self.bn_lin5_on:
x = self.lin_5_bn(x)
x = self.lin_5_relu(x)
# (N, C, T, H, W) -> (N, T, H, W, C).
x = x.permute((0, 2, 3, 4, 1))
# Perform dropout.
if hasattr(self, "dropout"):
x = self.dropout(x)
x = self.projection(x)
# Performs fully convlutional inference.
if not self.training:
x = self.act(x)
x = x.mean([1, 2, 3])
x = x.view(x.shape[0], -1)
return x
class TransformerBasicHead(nn.Module):
"""
BasicHead. No pool.
"""
def __init__(
self,
dim_in,
num_classes,
dropout_rate=0.0,
act_func="softmax",
):
"""
Perform linear projection and activation as head for tranformers.
Args:
dim_in (int): the channel dimension of the input to the head.
num_classes (int): the channel dimensions of the output to the head.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
"""
super(TransformerBasicHead, self).__init__()
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
self.projection = nn.Linear(dim_in, num_classes, bias=True)
# Softmax for evaluation and testing.
if act_func == "softmax":
self.act = nn.Softmax(dim=1)
elif act_func == "sigmoid":
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as an activation"
"function.".format(act_func)
)
def forward(self, x):
if hasattr(self, "dropout"):
x = self.dropout(x)
x = self.projection(x)
if not self.training:
x = self.act(x)
return x
| 14,978 | 35.623472 | 81 | py |
STTS | STTS-main/MViT/slowfast/models/stem_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""ResNe(X)t 3D stem helper."""
import torch.nn as nn
def get_stem_func(name):
"""
Retrieves the stem module by name.
"""
trans_funcs = {"x3d_stem": X3DStem, "basic_stem": ResNetBasicStem}
assert (
name in trans_funcs.keys()
), "Transformation function '{}' not supported".format(name)
return trans_funcs[name]
class VideoModelStem(nn.Module):
"""
Video 3D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool
on input data tensor for one or multiple pathways.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
stem_func_name="basic_stem",
):
"""
The `__init__` method of any subclass should also contain these
arguments. List size of 1 for single pathway models (C2D, I3D, Slow
and etc), list size of 2 for two pathway models (SlowFast).
Args:
dim_in (list): the list of channel dimensions of the inputs.
dim_out (list): the output dimension of the convolution in the stem
layer.
kernel (list): the kernels' size of the convolutions in the stem
layers. Temporal kernel size, height kernel size, width kernel
size in order.
stride (list): the stride sizes of the convolutions in the stem
layer. Temporal kernel stride, height kernel size, width kernel
size in order.
padding (list): the paddings' sizes of the convolutions in the stem
layer. Temporal padding size, height padding size, width padding
size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
stem_func_name (string): name of the the stem function applied on
input to the network.
"""
super(VideoModelStem, self).__init__()
assert (
len(
{
len(dim_in),
len(dim_out),
len(kernel),
len(stride),
len(padding),
}
)
== 1
), "Input pathway dimensions are not consistent. {} {} {} {} {}".format(
len(dim_in),
len(dim_out),
len(kernel),
len(stride),
len(padding),
)
self.num_pathways = len(dim_in)
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
# Construct the stem layer.
self._construct_stem(dim_in, dim_out, norm_module, stem_func_name)
def _construct_stem(self, dim_in, dim_out, norm_module, stem_func_name):
trans_func = get_stem_func(stem_func_name)
for pathway in range(len(dim_in)):
stem = trans_func(
dim_in[pathway],
dim_out[pathway],
self.kernel[pathway],
self.stride[pathway],
self.padding[pathway],
self.inplace_relu,
self.eps,
self.bn_mmt,
norm_module,
)
self.add_module("pathway{}_stem".format(pathway), stem)
def forward(self, x):
assert (
len(x) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
# use a new list, don't modify in-place the x list, which is bad for activation checkpointing.
y = []
for pathway in range(len(x)):
m = getattr(self, "pathway{}_stem".format(pathway))
y.append(m(x[pathway]))
return y
class ResNetBasicStem(nn.Module):
"""
ResNe(X)t 3D stem module.
Performs spatiotemporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input, and 2 or 3 is used for optical flow input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(ResNetBasicStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
# Construct the stem layer.
self._construct_stem(dim_in, dim_out, norm_module)
def _construct_stem(self, dim_in, dim_out, norm_module):
self.conv = nn.Conv3d(
dim_in,
dim_out,
self.kernel,
stride=self.stride,
padding=self.padding,
bias=False,
)
self.bn = norm_module(
num_features=dim_out, eps=self.eps, momentum=self.bn_mmt
)
self.relu = nn.ReLU(self.inplace_relu)
self.pool_layer = nn.MaxPool3d(
kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]
)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.pool_layer(x)
return x
class X3DStem(nn.Module):
"""
X3D's 3D stem module.
Performs a spatial followed by a depthwise temporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input, and 2 or 3 is used for optical flow input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(X3DStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
# Construct the stem layer.
self._construct_stem(dim_in, dim_out, norm_module)
def _construct_stem(self, dim_in, dim_out, norm_module):
self.conv_xy = nn.Conv3d(
dim_in,
dim_out,
kernel_size=(1, self.kernel[1], self.kernel[2]),
stride=(1, self.stride[1], self.stride[2]),
padding=(0, self.padding[1], self.padding[2]),
bias=False,
)
self.conv = nn.Conv3d(
dim_out,
dim_out,
kernel_size=(self.kernel[0], 1, 1),
stride=(self.stride[0], 1, 1),
padding=(self.padding[0], 0, 0),
bias=False,
groups=dim_out,
)
self.bn = norm_module(
num_features=dim_out, eps=self.eps, momentum=self.bn_mmt
)
self.relu = nn.ReLU(self.inplace_relu)
def forward(self, x):
x = self.conv_xy(x)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class PatchEmbed(nn.Module):
"""
PatchEmbed.
"""
def __init__(
self,
dim_in=3,
dim_out=768,
kernel=(1, 16, 16),
stride=(1, 4, 4),
padding=(1, 7, 7),
conv_2d=False,
):
super().__init__()
if conv_2d:
conv = nn.Conv2d
else:
conv = nn.Conv3d
self.proj = conv(
dim_in,
dim_out,
kernel_size=kernel,
stride=stride,
padding=padding,
)
def forward(self, x):
x = self.proj(x)
# B C (T) H W -> B (T)HW C
return x.flatten(2).transpose(1, 2)
| 10,775 | 32.362229 | 102 | py |
STTS | STTS-main/MViT/slowfast/models/resnet_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Video models."""
import torch
import torch.nn as nn
from slowfast.models.common import drop_path
from slowfast.models.nonlocal_helper import Nonlocal
from slowfast.models.operators import SE, Swish
def get_trans_func(name):
"""
Retrieves the transformation module by name.
"""
trans_funcs = {
"bottleneck_transform": BottleneckTransform,
"basic_transform": BasicTransform,
"x3d_transform": X3DTransform,
}
assert (
name in trans_funcs.keys()
), "Transformation function '{}' not supported".format(name)
return trans_funcs[name]
class BasicTransform(nn.Module):
"""
Basic transformation: Tx3x3, 1x3x3, where T is the size of temporal kernel.
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner=None,
num_groups=1,
stride_1x1=None,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
block_idx=0,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the first
convolution in the basic block.
stride (int): the stride of the bottleneck.
dim_inner (None): the inner dimension would not be used in
BasicTransform.
num_groups (int): number of groups for the convolution. Number of
group is always 1 for BasicTransform.
stride_1x1 (None): stride_1x1 will not be used in BasicTransform.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(BasicTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._construct(dim_in, dim_out, stride, norm_module)
def _construct(self, dim_in, dim_out, stride, norm_module):
# Tx3x3, BN, ReLU.
self.a = nn.Conv3d(
dim_in,
dim_out,
kernel_size=[self.temp_kernel_size, 3, 3],
stride=[1, stride, stride],
padding=[int(self.temp_kernel_size // 2), 1, 1],
bias=False,
)
self.a_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x3x3, BN.
self.b = nn.Conv3d(
dim_out,
dim_out,
kernel_size=[1, 3, 3],
stride=[1, 1, 1],
padding=[0, 1, 1],
bias=False,
)
self.b_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.b_bn.transform_final_bn = True
def forward(self, x):
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
x = self.b(x)
x = self.b_bn(x)
return x
class X3DTransform(nn.Module):
"""
X3D transformation: 1x1x1, Tx3x3 (channelwise, num_groups=dim_in), 1x1x1,
augmented with (optional) SE (squeeze-excitation) on the 3x3x3 output.
T is the temporal kernel size (defaulting to 3)
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner,
num_groups,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
dilation=1,
norm_module=nn.BatchNorm3d,
se_ratio=0.0625,
swish_inner=True,
block_idx=0,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
se_ratio (float): if > 0, apply SE to the Tx3x3 conv, with the SE
channel dimensionality being se_ratio times the Tx3x3 conv dim.
swish_inner (bool): if True, apply swish to the Tx3x3 conv, otherwise
apply ReLU to the Tx3x3 conv.
"""
super(X3DTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._se_ratio = se_ratio
self._swish_inner = swish_inner
self._stride_1x1 = stride_1x1
self._block_idx = block_idx
self._construct(
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
)
def _construct(
self,
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
):
(str1x1, str3x3) = (stride, 1) if self._stride_1x1 else (1, stride)
# 1x1x1, BN, ReLU.
self.a = nn.Conv3d(
dim_in,
dim_inner,
kernel_size=[1, 1, 1],
stride=[1, str1x1, str1x1],
padding=[0, 0, 0],
bias=False,
)
self.a_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
# Tx3x3, BN, ReLU.
self.b = nn.Conv3d(
dim_inner,
dim_inner,
[self.temp_kernel_size, 3, 3],
stride=[1, str3x3, str3x3],
padding=[int(self.temp_kernel_size // 2), dilation, dilation],
groups=num_groups,
bias=False,
dilation=[1, dilation, dilation],
)
self.b_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
# Apply SE attention or not
use_se = True if (self._block_idx + 1) % 2 else False
if self._se_ratio > 0.0 and use_se:
self.se = SE(dim_inner, self._se_ratio)
if self._swish_inner:
self.b_relu = Swish()
else:
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x1x1, BN.
self.c = nn.Conv3d(
dim_inner,
dim_out,
kernel_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
bias=False,
)
self.c_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.c_bn.transform_final_bn = True
def forward(self, x):
for block in self.children():
x = block(x)
return x
class BottleneckTransform(nn.Module):
"""
Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of
temporal kernel.
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner,
num_groups,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
dilation=1,
norm_module=nn.BatchNorm3d,
block_idx=0,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the first
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(BottleneckTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._stride_1x1 = stride_1x1
self._construct(
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
)
def _construct(
self,
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
):
(str1x1, str3x3) = (stride, 1) if self._stride_1x1 else (1, stride)
# Tx1x1, BN, ReLU.
self.a = nn.Conv3d(
dim_in,
dim_inner,
kernel_size=[self.temp_kernel_size, 1, 1],
stride=[1, str1x1, str1x1],
padding=[int(self.temp_kernel_size // 2), 0, 0],
bias=False,
)
self.a_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x3x3, BN, ReLU.
self.b = nn.Conv3d(
dim_inner,
dim_inner,
[1, 3, 3],
stride=[1, str3x3, str3x3],
padding=[0, dilation, dilation],
groups=num_groups,
bias=False,
dilation=[1, dilation, dilation],
)
self.b_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x1x1, BN.
self.c = nn.Conv3d(
dim_inner,
dim_out,
kernel_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
bias=False,
)
self.c_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.c_bn.transform_final_bn = True
def forward(self, x):
# Explicitly forward every layer.
# Branch2a.
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
# Branch2b.
x = self.b(x)
x = self.b_bn(x)
x = self.b_relu(x)
# Branch2c
x = self.c(x)
x = self.c_bn(x)
return x
class ResBlock(nn.Module):
"""
Residual block.
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
trans_func,
dim_inner,
num_groups=1,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
dilation=1,
norm_module=nn.BatchNorm3d,
block_idx=0,
drop_connect_rate=0.0,
):
"""
ResBlock class constructs redisual blocks. More details can be found in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
"Deep residual learning for image recognition."
https://arxiv.org/abs/1512.03385
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
trans_func (string): transform function to be used to construct the
bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
drop_connect_rate (float): basic rate at which blocks are dropped,
linearly increases from input to output blocks.
"""
super(ResBlock, self).__init__()
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._drop_connect_rate = drop_connect_rate
self._construct(
dim_in,
dim_out,
temp_kernel_size,
stride,
trans_func,
dim_inner,
num_groups,
stride_1x1,
inplace_relu,
dilation,
norm_module,
block_idx,
)
def _construct(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
trans_func,
dim_inner,
num_groups,
stride_1x1,
inplace_relu,
dilation,
norm_module,
block_idx,
):
# Use skip connection with projection if dim or res change.
if (dim_in != dim_out) or (stride != 1):
self.branch1 = nn.Conv3d(
dim_in,
dim_out,
kernel_size=1,
stride=[1, stride, stride],
padding=0,
bias=False,
dilation=1,
)
self.branch1_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.branch2 = trans_func(
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner,
num_groups,
stride_1x1=stride_1x1,
inplace_relu=inplace_relu,
dilation=dilation,
norm_module=norm_module,
block_idx=block_idx,
)
self.relu = nn.ReLU(self._inplace_relu)
def forward(self, x):
f_x = self.branch2(x)
if self.training and self._drop_connect_rate > 0.0:
f_x = drop_path(f_x, self._drop_connect_rate)
if hasattr(self, "branch1"):
x = self.branch1_bn(self.branch1(x)) + f_x
else:
x = x + f_x
x = self.relu(x)
return x
class ResStage(nn.Module):
"""
Stage of 3D ResNet. It expects to have one or more tensors as input for
single pathway (C2D, I3D, Slow), and multi-pathway (SlowFast) cases.
More details can be found here:
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(
self,
dim_in,
dim_out,
stride,
temp_kernel_sizes,
num_blocks,
dim_inner,
num_groups,
num_block_temp_kernel,
nonlocal_inds,
nonlocal_group,
nonlocal_pool,
dilation,
instantiation="softmax",
trans_func_name="bottleneck_transform",
stride_1x1=False,
inplace_relu=True,
norm_module=nn.BatchNorm3d,
drop_connect_rate=0.0,
):
"""
The `__init__` method of any subclass should also contain these arguments.
ResStage builds p streams, where p can be greater or equal to one.
Args:
dim_in (list): list of p the channel dimensions of the input.
Different channel dimensions control the input dimension of
different pathways.
dim_out (list): list of p the channel dimensions of the output.
Different channel dimensions control the input dimension of
different pathways.
temp_kernel_sizes (list): list of the p temporal kernel sizes of the
convolution in the bottleneck. Different temp_kernel_sizes
control different pathway.
stride (list): list of the p strides of the bottleneck. Different
stride control different pathway.
num_blocks (list): list of p numbers of blocks for each of the
pathway.
dim_inner (list): list of the p inner channel dimensions of the
input. Different channel dimensions control the input dimension
of different pathways.
num_groups (list): list of number of p groups for the convolution.
num_groups=1 is for standard ResNet like networks, and
num_groups>1 is for ResNeXt like networks.
num_block_temp_kernel (list): extent the temp_kernel_sizes to
num_block_temp_kernel blocks, then fill temporal kernel size
of 1 for the rest of the layers.
nonlocal_inds (list): If the tuple is empty, no nonlocal layer will
be added. If the tuple is not empty, add nonlocal layers after
the index-th block.
dilation (list): size of dilation for each pathway.
nonlocal_group (list): list of number of p nonlocal groups. Each
number controls how to fold temporal dimension to batch
dimension before applying nonlocal transformation.
https://github.com/facebookresearch/video-nonlocal-net.
instantiation (string): different instantiation for nonlocal layer.
Supports two different instantiation method:
"dot_product": normalizing correlation matrix with L2.
"softmax": normalizing correlation matrix with Softmax.
trans_func_name (string): name of the the transformation function apply
on the network.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
drop_connect_rate (float): basic rate at which blocks are dropped,
linearly increases from input to output blocks.
"""
super(ResStage, self).__init__()
assert all(
(
num_block_temp_kernel[i] <= num_blocks[i]
for i in range(len(temp_kernel_sizes))
)
)
self.num_blocks = num_blocks
self.nonlocal_group = nonlocal_group
self._drop_connect_rate = drop_connect_rate
self.temp_kernel_sizes = [
(temp_kernel_sizes[i] * num_blocks[i])[: num_block_temp_kernel[i]]
+ [1] * (num_blocks[i] - num_block_temp_kernel[i])
for i in range(len(temp_kernel_sizes))
]
assert (
len(
{
len(dim_in),
len(dim_out),
len(temp_kernel_sizes),
len(stride),
len(num_blocks),
len(dim_inner),
len(num_groups),
len(num_block_temp_kernel),
len(nonlocal_inds),
len(nonlocal_group),
}
)
== 1
)
self.num_pathways = len(self.num_blocks)
self._construct(
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
trans_func_name,
stride_1x1,
inplace_relu,
nonlocal_inds,
nonlocal_pool,
instantiation,
dilation,
norm_module,
)
def _construct(
self,
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
trans_func_name,
stride_1x1,
inplace_relu,
nonlocal_inds,
nonlocal_pool,
instantiation,
dilation,
norm_module,
):
for pathway in range(self.num_pathways):
for i in range(self.num_blocks[pathway]):
# Retrieve the transformation function.
trans_func = get_trans_func(trans_func_name)
# Construct the block.
res_block = ResBlock(
dim_in[pathway] if i == 0 else dim_out[pathway],
dim_out[pathway],
self.temp_kernel_sizes[pathway][i],
stride[pathway] if i == 0 else 1,
trans_func,
dim_inner[pathway],
num_groups[pathway],
stride_1x1=stride_1x1,
inplace_relu=inplace_relu,
dilation=dilation[pathway],
norm_module=norm_module,
block_idx=i,
drop_connect_rate=self._drop_connect_rate,
)
self.add_module("pathway{}_res{}".format(pathway, i), res_block)
if i in nonlocal_inds[pathway]:
nln = Nonlocal(
dim_out[pathway],
dim_out[pathway] // 2,
nonlocal_pool[pathway],
instantiation=instantiation,
norm_module=norm_module,
)
self.add_module(
"pathway{}_nonlocal{}".format(pathway, i), nln
)
def forward(self, inputs):
output = []
for pathway in range(self.num_pathways):
x = inputs[pathway]
for i in range(self.num_blocks[pathway]):
m = getattr(self, "pathway{}_res{}".format(pathway, i))
x = m(x)
if hasattr(self, "pathway{}_nonlocal{}".format(pathway, i)):
nln = getattr(
self, "pathway{}_nonlocal{}".format(pathway, i)
)
b, c, t, h, w = x.shape
if self.nonlocal_group[pathway] > 1:
# Fold temporal dimension into batch dimension.
x = x.permute(0, 2, 1, 3, 4)
x = x.reshape(
b * self.nonlocal_group[pathway],
t // self.nonlocal_group[pathway],
c,
h,
w,
)
x = x.permute(0, 2, 1, 3, 4)
x = nln(x)
if self.nonlocal_group[pathway] > 1:
# Fold back to temporal dimension.
x = x.permute(0, 2, 1, 3, 4)
x = x.reshape(b, t, c, h, w)
x = x.permute(0, 2, 1, 3, 4)
output.append(x)
return output
| 24,795 | 33.15427 | 83 | py |
STTS | STTS-main/MViT/slowfast/models/attention.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy
import torch
import torch.nn as nn
from einops import rearrange
from slowfast.models.common import DropPath, Mlp
def attention_pool(tensor, pool, thw_shape, has_cls_embed=True, norm=None):
if pool is None:
return tensor, thw_shape
tensor_dim = tensor.ndim
if tensor_dim == 4:
pass
elif tensor_dim == 3:
tensor = tensor.unsqueeze(1)
else:
raise NotImplementedError(f"Unsupported input dimension {tensor.shape}")
if has_cls_embed:
cls_tok, tensor = tensor[:, :, :1, :], tensor[:, :, 1:, :]
B, N, L, C = tensor.shape
T, H, W = thw_shape
tensor = (
tensor.reshape(B * N, T, H, W, C).permute(0, 4, 1, 2, 3).contiguous()
)
tensor = pool(tensor)
thw_shape = [tensor.shape[2], tensor.shape[3], tensor.shape[4]]
L_pooled = tensor.shape[2] * tensor.shape[3] * tensor.shape[4]
tensor = tensor.reshape(B, N, C, L_pooled).transpose(2, 3)
if has_cls_embed:
tensor = torch.cat((cls_tok, tensor), dim=2)
if norm is not None:
tensor = norm(tensor)
# Assert tensor_dim in [3, 4]
if tensor_dim == 4:
pass
else: # tensor_dim == 3:
tensor = tensor.squeeze(1)
return tensor, thw_shape
class MultiScaleAttention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
drop_rate=0.0,
kernel_q=(1, 1, 1),
kernel_kv=(1, 1, 1),
stride_q=(1, 1, 1),
stride_kv=(1, 1, 1),
norm_layer=nn.LayerNorm,
has_cls_embed=True,
# Options include `conv`, `avg`, and `max`.
mode="conv",
# If True, perform pool before projection.
pool_first=False,
):
super().__init__()
self.pool_first = pool_first
self.drop_rate = drop_rate
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.has_cls_embed = has_cls_embed
padding_q = [int(q // 2) for q in kernel_q]
padding_kv = [int(kv // 2) for kv in kernel_kv]
#self.q = nn.Linear(dim, dim, bias=qkv_bias)
#self.k = nn.Linear(dim, dim, bias=qkv_bias)
#self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.qkv = nn.Linear(dim, dim*3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
if drop_rate > 0.0:
self.proj_drop = nn.Dropout(drop_rate)
# Skip pooling with kernel and stride size of (1, 1, 1).
if numpy.prod(kernel_q) == 1 and numpy.prod(stride_q) == 1:
kernel_q = ()
if numpy.prod(kernel_kv) == 1 and numpy.prod(stride_kv) == 1:
kernel_kv = ()
if mode in ("avg", "max"):
pool_op = nn.MaxPool3d if mode == "max" else nn.AvgPool3d
self.pool_q = (
pool_op(kernel_q, stride_q, padding_q, ceil_mode=False)
if len(kernel_q) > 0
else None
)
self.pool_k = (
pool_op(kernel_kv, stride_kv, padding_kv, ceil_mode=False)
if len(kernel_kv) > 0
else None
)
self.pool_v = (
pool_op(kernel_kv, stride_kv, padding_kv, ceil_mode=False)
if len(kernel_kv) > 0
else None
)
elif mode == "conv":
self.pool_q = (
nn.Conv3d(
head_dim,
head_dim,
kernel_q,
stride=stride_q,
padding=padding_q,
groups=head_dim,
bias=False,
)
if len(kernel_q) > 0
else None
)
self.norm_q = norm_layer(head_dim) if len(kernel_q) > 0 else None
self.pool_k = (
nn.Conv3d(
head_dim,
head_dim,
kernel_kv,
stride=stride_kv,
padding=padding_kv,
groups=head_dim,
bias=False,
)
if len(kernel_kv) > 0
else None
)
self.norm_k = norm_layer(head_dim) if len(kernel_kv) > 0 else None
self.pool_v = (
nn.Conv3d(
head_dim,
head_dim,
kernel_kv,
stride=stride_kv,
padding=padding_kv,
groups=head_dim,
bias=False,
)
if len(kernel_kv) > 0
else None
)
# print(kernel_kv)
self.norm_v = norm_layer(head_dim) if len(kernel_kv) > 0 else None
else:
raise NotImplementedError(f"Unsupported model {mode}")
def forward(self, x, thw_shape):
B, N, C = x.shape
q = k = v = x
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4).contiguous()
)
q, k, v = qkv[0], qkv[1], qkv[2]
q, q_shape = attention_pool(
q,
self.pool_q,
thw_shape,
has_cls_embed=self.has_cls_embed,
norm=self.norm_q if hasattr(self, "norm_q") else None,
)
k, _ = attention_pool(
k,
self.pool_k,
thw_shape,
has_cls_embed=self.has_cls_embed,
norm=self.norm_k if hasattr(self, "norm_k") else None,
)
v, _ = attention_pool(
v,
self.pool_v,
thw_shape,
has_cls_embed=self.has_cls_embed,
norm=self.norm_v if hasattr(self, "norm_v") else None,
)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
N = q.shape[2]
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
if self.drop_rate > 0.0:
x = self.proj_drop(x)
return x, q_shape
class MultiScaleBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop_rate=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
up_rate=None,
kernel_q=(1, 1, 1),
kernel_kv=(1, 1, 1),
stride_q=(1, 1, 1),
stride_kv=(1, 1, 1),
mode="conv",
has_cls_embed=True,
pool_first=False,
):
super().__init__()
self.dim = dim
self.dim_out = dim_out
self.norm1 = norm_layer(dim)
kernel_skip = [s + 1 if s > 1 else s for s in stride_q]
stride_skip = stride_q
padding_skip = [int(skip // 2) for skip in kernel_skip]
self.attn = MultiScaleAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
drop_rate=drop_rate,
kernel_q=kernel_q,
kernel_kv=kernel_kv,
stride_q=stride_q,
stride_kv=stride_kv,
norm_layer=nn.LayerNorm,
has_cls_embed=has_cls_embed,
mode=mode,
pool_first=pool_first,
)
self.drop_path = (
DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.has_cls_embed = has_cls_embed
# TODO: check the use case for up_rate, and merge the following lines
if up_rate is not None and up_rate > 1:
mlp_dim_out = dim * up_rate
else:
mlp_dim_out = dim_out
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
out_features=mlp_dim_out,
act_layer=act_layer,
drop_rate=drop_rate,
)
if dim != dim_out:
self.proj = nn.Linear(dim, dim_out)
self.pool_skip = (
nn.MaxPool3d(
kernel_skip, stride_skip, padding_skip, ceil_mode=False
)
if len(kernel_skip) > 0
else None
)
def forward(self, x, thw_shape):
x_block, thw_shape_new = self.attn(self.norm1(x), thw_shape)
x_res, _ = attention_pool(
x, self.pool_skip, thw_shape, has_cls_embed=self.has_cls_embed
)
x = x_res + self.drop_path(x_block)
x_norm = self.norm2(x)
x_mlp = self.mlp(x_norm)
if self.dim != self.dim_out:
x = self.proj(x_norm)
x = x + self.drop_path(x_mlp)
return x, thw_shape_new | 8,875 | 29.712803 | 80 | py |
STTS | STTS-main/MViT/slowfast/models/build.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Model construction functions."""
import torch
from fvcore.common.registry import Registry
MODEL_REGISTRY = Registry("MODEL")
MODEL_REGISTRY.__doc__ = """
Registry for video model.
The registered object will be called with `obj(cfg)`.
The call should return a `torch.nn.Module` object.
"""
def build_model(cfg, gpu_id=None):
"""
Builds the video model.
Args:
cfg (configs): configs that contains the hyper-parameters to build the
backbone. Details can be seen in slowfast/config/defaults.py.
gpu_id (Optional[int]): specify the gpu index to build model.
"""
if torch.cuda.is_available():
assert (
cfg.NUM_GPUS <= torch.cuda.device_count()
), "Cannot use more GPU devices than available"
else:
assert (
cfg.NUM_GPUS == 0
), "Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs."
# Construct the model
name = cfg.MODEL.MODEL_NAME
model = MODEL_REGISTRY.get(name)(cfg)
if cfg.NUM_GPUS:
if gpu_id is None:
# Determine the GPU used by the current process
cur_device = torch.cuda.current_device()
else:
cur_device = gpu_id
# Transfer the model to the current GPU device
model = model.cuda(device=cur_device)
# Use multi-process data parallel model in the multi-gpu setting
if cfg.NUM_GPUS > 1:
# Make model replica operate on the current device
model = torch.nn.parallel.DistributedDataParallel(
module=model, device_ids=[cur_device], output_device=cur_device
)
return model
| 1,725 | 30.962963 | 80 | py |
STTS | STTS-main/MViT/slowfast/models/optimizer.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Optimizer."""
import torch
import slowfast.utils.lr_policy as lr_policy
def construct_optimizer(model, cfg):
"""
Construct a stochastic gradient descent or ADAM optimizer with momentum.
Details can be found in:
Herbert Robbins, and Sutton Monro. "A stochastic approximation method."
and
Diederik P.Kingma, and Jimmy Ba.
"Adam: A Method for Stochastic Optimization."
Args:
model (model): model to perform stochastic gradient descent
optimization or ADAM optimization.
cfg (config): configs of hyper-parameters of SGD or ADAM, includes base
learning rate, momentum, weight_decay, dampening, and etc.
"""
train_topk_only = cfg.TRAIN.TRAIN_TOPK_ONLY
# Batchnorm parameters.
bn_params = []
# Non-batchnorm parameters.
non_bn_params = []
zero_params = []
predictor = []
skip = {}
if hasattr(model, "no_weight_decay"):
skip = model.no_weight_decay()
for name, m in model.named_modules():
is_bn = isinstance(m, torch.nn.modules.batchnorm._NormBase)
for p in m.parameters(recurse=False):
if not p.requires_grad:
continue
if 'predictor' in name:
predictor.append(p)
elif train_topk_only:
continue
elif is_bn:
bn_params.append(p)
elif name in skip or (
(len(p.shape) == 1 or name.endswith(".bias"))
and cfg.SOLVER.ZERO_WD_1D_PARAM
):
zero_params.append(p)
else:
non_bn_params.append(p)
optim_params = [
{"params": predictor, "weight_decay": cfg.SOLVER.WEIGHT_DECAY, 'name': 'predictor'},
{"params": bn_params, "weight_decay": cfg.BN.WEIGHT_DECAY, 'name': 'backbone_bn'},
{"params": non_bn_params, "weight_decay": cfg.SOLVER.WEIGHT_DECAY, 'name': 'backbone_nonbn'},
{"params": zero_params, "weight_decay": 0.0, 'name': 'bacbone_zero'},
]
optim_params = [x for x in optim_params if len(x["params"])]
if cfg.SOLVER.OPTIMIZING_METHOD == "sgd":
return torch.optim.SGD(
optim_params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
dampening=cfg.SOLVER.DAMPENING,
nesterov=cfg.SOLVER.NESTEROV,
)
elif cfg.SOLVER.OPTIMIZING_METHOD == "adam":
return torch.optim.Adam(
optim_params,
lr=cfg.SOLVER.BASE_LR,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
elif cfg.SOLVER.OPTIMIZING_METHOD == "adamw":
return torch.optim.AdamW(
optim_params,
lr=cfg.SOLVER.BASE_LR,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
else:
raise NotImplementedError(
"Does not support {} optimizer".format(cfg.SOLVER.OPTIMIZING_METHOD)
)
def get_epoch_lr(cur_epoch, cfg):
"""
Retrieves the lr for the given epoch (as specified by the lr policy).
Args:
cfg (config): configs of hyper-parameters of ADAM, includes base
learning rate, betas, and weight decays.
cur_epoch (float): the number of epoch of the current training stage.
"""
return lr_policy.get_lr_at_epoch(cfg, cur_epoch)
def set_lr(optimizer, new_lr, cfg):
"""
Sets the optimizer lr to the specified value.
Args:
optimizer (optim): the optimizer using to optimize the current network.
new_lr (float): the new learning rate to set.
"""
if cfg.TRAIN.FINETUNE:
for param_group in optimizer.param_groups:
if param_group['name'] == 'predictor':
param_group['lr'] = new_lr[0]
else:
param_group['lr'] = new_lr[1]
else:
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr[0]
| 4,155 | 30.484848 | 101 | py |
STTS | STTS-main/MViT/slowfast/datasets/video_container.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import av
def get_video_container(path_to_vid, multi_thread_decode=False, backend="pyav"):
"""
Given the path to the video, return the pyav video container.
Args:
path_to_vid (str): path to the video.
multi_thread_decode (bool): if True, perform multi-thread decoding.
backend (str): decoder backend, options include `pyav` and
`torchvision`, default is `pyav`.
Returns:
container (container): video container.
"""
if backend == "torchvision":
with open(path_to_vid, "rb") as fp:
container = fp.read()
return container
elif backend == "pyav":
container = av.open(path_to_vid)
if multi_thread_decode:
# Enable multiple threads for decoding.
container.streams.video[0].thread_type = "AUTO"
return container
else:
raise NotImplementedError("Unknown backend {}".format(backend))
| 1,033 | 33.466667 | 80 | py |
STTS | STTS-main/MViT/slowfast/datasets/transform.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import numpy as np
# import cv2
import random
import torch
import torchvision.transforms.functional as F
from PIL import Image
from torchvision import transforms
from .rand_augment import rand_augment_transform
from .random_erasing import RandomErasing
_pil_interpolation_to_str = {
Image.NEAREST: "PIL.Image.NEAREST",
Image.BILINEAR: "PIL.Image.BILINEAR",
Image.BICUBIC: "PIL.Image.BICUBIC",
Image.LANCZOS: "PIL.Image.LANCZOS",
Image.HAMMING: "PIL.Image.HAMMING",
Image.BOX: "PIL.Image.BOX",
}
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def _pil_interp(method):
if method == "bicubic":
return Image.BICUBIC
elif method == "lanczos":
return Image.LANCZOS
elif method == "hamming":
return Image.HAMMING
else:
return Image.BILINEAR
def random_short_side_scale_jitter(
images, min_size, max_size, boxes=None, inverse_uniform_sampling=False
):
"""
Perform a spatial short scale jittering on the given images and
corresponding boxes.
Args:
images (tensor): images to perform scale jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
min_size (int): the minimal size to scale the frames.
max_size (int): the maximal size to scale the frames.
boxes (ndarray): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale, max_scale].
Returns:
(tensor): the scaled images with dimension of
`num frames` x `channel` x `new height` x `new width`.
(ndarray or None): the scaled boxes with dimension of
`num boxes` x 4.
"""
if inverse_uniform_sampling:
size = int(
round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))
)
else:
size = int(round(np.random.uniform(min_size, max_size)))
height = images.shape[2]
width = images.shape[3]
if (width <= height and width == size) or (
height <= width and height == size
):
return images, boxes
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
if boxes is not None:
boxes = boxes * float(new_height) / height
else:
new_width = int(math.floor((float(width) / height) * size))
if boxes is not None:
boxes = boxes * float(new_width) / width
return (
torch.nn.functional.interpolate(
images,
size=(new_height, new_width),
mode="bilinear",
align_corners=False,
),
boxes,
)
def crop_boxes(boxes, x_offset, y_offset):
"""
Peform crop on the bounding boxes given the offsets.
Args:
boxes (ndarray or None): bounding boxes to peform crop. The dimension
is `num boxes` x 4.
x_offset (int): cropping offset in the x axis.
y_offset (int): cropping offset in the y axis.
Returns:
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped_boxes = boxes.copy()
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return cropped_boxes
def random_crop(images, size, boxes=None):
"""
Perform random spatial crop on the given images and corresponding boxes.
Args:
images (tensor): images to perform random crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): the size of height and width to crop on the image.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (tensor): cropped images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
if images.shape[2] == size and images.shape[3] == size:
return images
height = images.shape[2]
width = images.shape[3]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
return cropped, cropped_boxes
def horizontal_flip(prob, images, boxes=None):
"""
Perform horizontal flip on the given images and corresponding boxes.
Args:
prob (float): probility to flip the images.
images (tensor): images to perform horizontal flip, the dimension is
`num frames` x `channel` x `height` x `width`.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
images (tensor): images with dimension of
`num frames` x `channel` x `height` x `width`.
flipped_boxes (ndarray or None): the flipped boxes with dimension of
`num boxes` x 4.
"""
if boxes is None:
flipped_boxes = None
else:
flipped_boxes = boxes.copy()
if np.random.uniform() < prob:
images = images.flip((-1))
if len(images.shape) == 3:
width = images.shape[2]
elif len(images.shape) == 4:
width = images.shape[3]
else:
raise NotImplementedError("Dimension does not supported")
if boxes is not None:
flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1
return images, flipped_boxes
def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
scale_size (int): optinal. If not None, resize the images to scale_size before
performing any crop.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
assert spatial_idx in [0, 1, 2]
ndim = len(images.shape)
if ndim == 3:
images = images.unsqueeze(0)
height = images.shape[2]
width = images.shape[3]
if scale_size is not None:
if width <= height:
width, height = scale_size, int(height / width * scale_size)
else:
width, height = int(width / height * scale_size), scale_size
images = torch.nn.functional.interpolate(
images,
size=(height, width),
mode="bilinear",
align_corners=False,
)
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
if ndim == 3:
cropped = cropped.squeeze(0)
return cropped, cropped_boxes
def clip_boxes_to_image(boxes, height, width):
"""
Clip an array of boxes to an image with the given height and width.
Args:
boxes (ndarray): bounding boxes to perform clipping.
Dimension is `num boxes` x 4.
height (int): given image height.
width (int): given image width.
Returns:
clipped_boxes (ndarray): the clipped boxes with dimension of
`num boxes` x 4.
"""
clipped_boxes = boxes.copy()
clipped_boxes[:, [0, 2]] = np.minimum(
width - 1.0, np.maximum(0.0, boxes[:, [0, 2]])
)
clipped_boxes[:, [1, 3]] = np.minimum(
height - 1.0, np.maximum(0.0, boxes[:, [1, 3]])
)
return clipped_boxes
def blend(images1, images2, alpha):
"""
Blend two images with a given weight alpha.
Args:
images1 (tensor): the first images to be blended, the dimension is
`num frames` x `channel` x `height` x `width`.
images2 (tensor): the second images to be blended, the dimension is
`num frames` x `channel` x `height` x `width`.
alpha (float): the blending weight.
Returns:
(tensor): blended images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
return images1 * alpha + images2 * (1 - alpha)
def grayscale(images):
"""
Get the grayscale for the input images. The channels of images should be
in order BGR.
Args:
images (tensor): the input images for getting grayscale. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
img_gray (tensor): blended images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
# R -> 0.299, G -> 0.587, B -> 0.114.
img_gray = torch.tensor(images)
gray_channel = (
0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0]
)
img_gray[:, 0] = gray_channel
img_gray[:, 1] = gray_channel
img_gray[:, 2] = gray_channel
return img_gray
def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0):
"""
Perfrom a color jittering on the input images. The channels of images
should be in order BGR.
Args:
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
img_brightness (float): jitter ratio for brightness.
img_contrast (float): jitter ratio for contrast.
img_saturation (float): jitter ratio for saturation.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
jitter = []
if img_brightness != 0:
jitter.append("brightness")
if img_contrast != 0:
jitter.append("contrast")
if img_saturation != 0:
jitter.append("saturation")
if len(jitter) > 0:
order = np.random.permutation(np.arange(len(jitter)))
for idx in range(0, len(jitter)):
if jitter[order[idx]] == "brightness":
images = brightness_jitter(img_brightness, images)
elif jitter[order[idx]] == "contrast":
images = contrast_jitter(img_contrast, images)
elif jitter[order[idx]] == "saturation":
images = saturation_jitter(img_saturation, images)
return images
def brightness_jitter(var, images):
"""
Perfrom brightness jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for brightness.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_bright = torch.zeros(images.shape)
images = blend(images, img_bright, alpha)
return images
def contrast_jitter(var, images):
"""
Perfrom contrast jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for contrast.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True)
images = blend(images, img_gray, alpha)
return images
def saturation_jitter(var, images):
"""
Perfrom saturation jittering on the input images. The channels of images
should be in order BGR.
Args:
var (float): jitter ratio for saturation.
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
alpha = 1.0 + np.random.uniform(-var, var)
img_gray = grayscale(images)
images = blend(images, img_gray, alpha)
return images
def lighting_jitter(images, alphastd, eigval, eigvec):
"""
Perform AlexNet-style PCA jitter on the given images.
Args:
images (tensor): images to perform lighting jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
alphastd (float): jitter ratio for PCA jitter.
eigval (list): eigenvalues for PCA jitter.
eigvec (list[list]): eigenvectors for PCA jitter.
Returns:
out_images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if alphastd == 0:
return images
# generate alpha1, alpha2, alpha3.
alpha = np.random.normal(0, alphastd, size=(1, 3))
eig_vec = np.array(eigvec)
eig_val = np.reshape(eigval, (1, 3))
rgb = np.sum(
eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0),
axis=1,
)
out_images = torch.zeros_like(images)
if len(images.shape) == 3:
# C H W
channel_dim = 0
elif len(images.shape) == 4:
# T C H W
channel_dim = 1
else:
raise NotImplementedError(f"Unsupported dimension {len(images.shape)}")
for idx in range(images.shape[channel_dim]):
# C H W
if len(images.shape) == 3:
out_images[idx] = images[idx] + rgb[2 - idx]
# T C H W
elif len(images.shape) == 4:
out_images[:, idx] = images[:, idx] + rgb[2 - idx]
else:
raise NotImplementedError(
f"Unsupported dimension {len(images.shape)}"
)
return out_images
def color_normalization(images, mean, stddev):
"""
Perform color nomration on the given images.
Args:
images (tensor): images to perform color normalization. Dimension is
`num frames` x `channel` x `height` x `width`.
mean (list): mean values for normalization.
stddev (list): standard deviations for normalization.
Returns:
out_images (tensor): the noramlized images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if len(images.shape) == 3:
assert (
len(mean) == images.shape[0]
), "channel mean not computed properly"
assert (
len(stddev) == images.shape[0]
), "channel stddev not computed properly"
elif len(images.shape) == 4:
assert (
len(mean) == images.shape[1]
), "channel mean not computed properly"
assert (
len(stddev) == images.shape[1]
), "channel stddev not computed properly"
else:
raise NotImplementedError(f"Unsupported dimension {len(images.shape)}")
out_images = torch.zeros_like(images)
for idx in range(len(mean)):
# C H W
if len(images.shape) == 3:
out_images[idx] = (images[idx] - mean[idx]) / stddev[idx]
elif len(images.shape) == 4:
out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx]
else:
raise NotImplementedError(
f"Unsupported dimension {len(images.shape)}"
)
return out_images
def _get_param_spatial_crop(
scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False
):
"""
Given scale, ratio, height and width, return sampled coordinates of the videos.
"""
for _ in range(num_repeat):
area = height * width
target_area = random.uniform(*scale) * area
if log_scale:
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
else:
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.uniform() < 0.5 and switch_hw:
w, h = h, w
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def random_resized_crop(
images,
target_height,
target_width,
scale=(0.8, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
):
"""
Crop the given images to random size and aspect ratio. A crop of random
size (default: of 0.08 to 1.0) of the original size and a random aspect
ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This
crop is finally resized to given size. This is popularly used to train the
Inception networks.
Args:
images: Images to perform resizing and cropping.
target_height: Desired height after cropping.
target_width: Desired width after cropping.
scale: Scale range of Inception-style area based random resizing.
ratio: Aspect ratio range of Inception-style area based random resizing.
"""
height = images.shape[2]
width = images.shape[3]
i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)
cropped = images[:, :, i : i + h, j : j + w]
return torch.nn.functional.interpolate(
cropped,
size=(target_height, target_width),
mode="bilinear",
align_corners=False,
)
def random_resized_crop_with_shift(
images,
target_height,
target_width,
scale=(0.8, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
):
"""
This is similar to random_resized_crop. However, it samples two different
boxes (for cropping) for the first and last frame. It then linearly
interpolates the two boxes for other frames.
Args:
images: Images to perform resizing and cropping.
target_height: Desired height after cropping.
target_width: Desired width after cropping.
scale: Scale range of Inception-style area based random resizing.
ratio: Aspect ratio range of Inception-style area based random resizing.
"""
t = images.shape[1]
height = images.shape[2]
width = images.shape[3]
i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)
i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width)
i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()]
j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()]
h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()]
w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()]
out = torch.zeros((3, t, target_height, target_width))
for ind in range(t):
out[:, ind : ind + 1, :, :] = torch.nn.functional.interpolate(
images[
:,
ind : ind + 1,
i_s[ind] : i_s[ind] + h_s[ind],
j_s[ind] : j_s[ind] + w_s[ind],
],
size=(target_height, target_width),
mode="bilinear",
align_corners=False,
)
return out
def create_random_augment(
input_size,
auto_augment=None,
interpolation="bilinear",
):
"""
Get video randaug transform.
Args:
input_size: The size of the input video in tuple.
auto_augment: Parameters for randaug. An example:
"rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number
of operations to apply).
interpolation: Interpolation method.
"""
if isinstance(input_size, tuple):
img_size = input_size[-2:]
else:
img_size = input_size
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, tuple):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = {"translate_const": int(img_size_min * 0.45)}
if interpolation and interpolation != "random":
aa_params["interpolation"] = _pil_interp(interpolation)
if auto_augment.startswith("rand"):
return transforms.Compose(
[rand_augment_transform(auto_augment, aa_params)]
)
raise NotImplementedError
def random_sized_crop_img(
im,
size,
jitter_scale=(0.08, 1.0),
jitter_aspect=(3.0 / 4.0, 4.0 / 3.0),
max_iter=10,
):
"""
Performs Inception-style cropping (used for training).
"""
assert (
len(im.shape) == 3
), "Currently only support image for random_sized_crop"
h, w = im.shape[1:3]
i, j, h, w = _get_param_spatial_crop(
scale=jitter_scale,
ratio=jitter_aspect,
height=h,
width=w,
num_repeat=max_iter,
log_scale=False,
switch_hw=True,
)
cropped = im[:, i : i + h, j : j + w]
return torch.nn.functional.interpolate(
cropped.unsqueeze(0),
size=(size, size),
mode="bilinear",
align_corners=False,
).squeeze(0)
# The following code are modified based on timm lib, we will replace the following
# contents with dependency from PyTorchVideo.
# https://github.com/facebookresearch/pytorchvideo
class RandomResizedCropAndInterpolation:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation="bilinear",
):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
print("range should be of kind (min, max)")
if interpolation == "random":
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for _ in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = " ".join(
[_pil_interpolation_to_str[x] for x in self.interpolation]
)
else:
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + "(size={0}".format(self.size)
format_string += ", scale={0}".format(
tuple(round(s, 4) for s in self.scale)
)
format_string += ", ratio={0}".format(
tuple(round(r, 4) for r in self.ratio)
)
format_string += ", interpolation={0})".format(interpolate_str)
return format_string
def transforms_imagenet_train(
img_size=224,
scale=None,
ratio=None,
hflip=0.5,
vflip=0.0,
color_jitter=0.4,
auto_augment=None,
interpolation="random",
use_prefetcher=False,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
re_prob=0.0,
re_mode="const",
re_count=1,
re_num_splits=0,
separate=False,
):
"""
If separate==True, the transforms are returned as a tuple of 3 separate transforms
for use in a mixing dataset that passes
* all data through the first (primary) transform, called the 'clean' data
* a portion of the data through the secondary transform
* normalizes and converts the branches above with the third, final transform
"""
if isinstance(img_size, tuple):
img_size = img_size[-2:]
else:
img_size = img_size
scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range
ratio = tuple(
ratio or (3.0 / 4.0, 4.0 / 3.0)
) # default imagenet ratio range
primary_tfl = [
RandomResizedCropAndInterpolation(
img_size, scale=scale, ratio=ratio, interpolation=interpolation
)
]
if hflip > 0.0:
primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)]
if vflip > 0.0:
primary_tfl += [transforms.RandomVerticalFlip(p=vflip)]
secondary_tfl = []
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, tuple):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = dict(
translate_const=int(img_size_min * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
if interpolation and interpolation != "random":
aa_params["interpolation"] = _pil_interp(interpolation)
if auto_augment.startswith("rand"):
secondary_tfl += [rand_augment_transform(auto_augment, aa_params)]
elif auto_augment.startswith("augmix"):
raise NotImplementedError("Augmix not implemented")
else:
raise NotImplementedError("Auto aug not implemented")
elif color_jitter is not None:
# color jitter is enabled when not using AA
if isinstance(color_jitter, (list, tuple)):
# color jitter should be a 3-tuple/list if spec brightness/contrast/saturation
# or 4 if also augmenting hue
assert len(color_jitter) in (3, 4)
else:
# if it's a scalar, duplicate for brightness, contrast, and saturation, no hue
color_jitter = (float(color_jitter),) * 3
secondary_tfl += [transforms.ColorJitter(*color_jitter)]
final_tfl = []
final_tfl += [
transforms.ToTensor(),
transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)),
]
if re_prob > 0.0:
final_tfl.append(
RandomErasing(
re_prob,
mode=re_mode,
max_count=re_count,
num_splits=re_num_splits,
device="cpu",
cube=False,
)
)
if separate:
return (
transforms.Compose(primary_tfl),
transforms.Compose(secondary_tfl),
transforms.Compose(final_tfl),
)
else:
return transforms.Compose(primary_tfl + secondary_tfl + final_tfl)
| 30,520 | 33.101676 | 90 | py |
STTS | STTS-main/MViT/slowfast/datasets/utils.py | #!/usr/bin/env python3
import logging
import numpy as np
import os
import random
import time
from collections import defaultdict
import cv2
import torch
from torch.utils.data.distributed import DistributedSampler
from slowfast.utils.env import pathmgr
from . import transform as transform
logger = logging.getLogger(__name__)
def retry_load_images(image_paths, retry=10, backend="pytorch"):
"""
This function is to load images with support of retrying for failed load.
Args:
image_paths (list): paths of images needed to be loaded.
retry (int, optional): maximum time of loading retrying. Defaults to 10.
backend (str): `pytorch` or `cv2`.
Returns:
imgs (list): list of loaded images.
"""
for i in range(retry):
imgs = []
for image_path in image_paths:
with pathmgr.open(image_path, "rb") as f:
img_str = np.frombuffer(f.read(), np.uint8)
img = cv2.imdecode(img_str, flags=cv2.IMREAD_COLOR)
imgs.append(img)
if all(img is not None for img in imgs):
if backend == "pytorch":
imgs = torch.as_tensor(np.stack(imgs))
return imgs
else:
logger.warn("Reading failed. Will retry.")
time.sleep(1.0)
if i == retry - 1:
raise Exception("Failed to load images {}".format(image_paths))
def get_sequence(center_idx, half_len, sample_rate, num_frames):
"""
Sample frames among the corresponding clip.
Args:
center_idx (int): center frame idx for current clip
half_len (int): half of the clip length
sample_rate (int): sampling rate for sampling frames inside of the clip
num_frames (int): number of expected sampled frames
Returns:
seq (list): list of indexes of sampled frames in this clip.
"""
seq = list(range(center_idx - half_len, center_idx + half_len, sample_rate))
for seq_idx in range(len(seq)):
if seq[seq_idx] < 0:
seq[seq_idx] = 0
elif seq[seq_idx] >= num_frames:
seq[seq_idx] = num_frames - 1
return seq
def pack_pathway_output(cfg, frames):
"""
Prepare output as a list of tensors. Each tensor corresponding to a
unique pathway.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `channel` x `num frames` x `height` x `width`.
Returns:
frame_list (list): list of tensors with the dimension of
`channel` x `num frames` x `height` x `width`.
"""
if cfg.DATA.REVERSE_INPUT_CHANNEL:
frames = frames[[2, 1, 0], :, :, :]
if cfg.MODEL.ARCH in cfg.MODEL.SINGLE_PATHWAY_ARCH:
frame_list = [frames]
elif cfg.MODEL.ARCH in cfg.MODEL.MULTI_PATHWAY_ARCH:
fast_pathway = frames
# Perform temporal sampling from the fast pathway.
slow_pathway = torch.index_select(
frames,
1,
torch.linspace(
0, frames.shape[1] - 1, frames.shape[1] // cfg.SLOWFAST.ALPHA
).long(),
)
frame_list = [slow_pathway, fast_pathway]
else:
raise NotImplementedError(
"Model arch {} is not in {}".format(
cfg.MODEL.ARCH,
cfg.MODEL.SINGLE_PATHWAY_ARCH + cfg.MODEL.MULTI_PATHWAY_ARCH,
)
)
return frame_list
def spatial_sampling(
frames,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=224,
random_horizontal_flip=True,
inverse_uniform_sampling=False,
aspect_ratio=None,
scale=None,
motion_shift=False,
):
"""
Perform spatial sampling on the given video frames. If spatial_idx is
-1, perform random scale, random crop, and random flip on the given
frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling
with the given spatial_idx.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `num frames` x `height` x `width` x `channel`.
spatial_idx (int): if -1, perform random spatial sampling. If 0, 1,
or 2, perform left, center, right crop if width is larger than
height, and perform top, center, buttom crop if height is larger
than width.
min_scale (int): the minimal size of scaling.
max_scale (int): the maximal size of scaling.
crop_size (int): the size of height and width used to crop the
frames.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale,
max_scale].
aspect_ratio (list): Aspect ratio range for resizing.
scale (list): Scale range for resizing.
motion_shift (bool): Whether to apply motion shift for resizing.
Returns:
frames (tensor): spatially sampled frames.
"""
assert spatial_idx in [-1, 0, 1, 2]
if spatial_idx == -1:
if aspect_ratio is None and scale is None:
frames, _ = transform.random_short_side_scale_jitter(
images=frames,
min_size=min_scale,
max_size=max_scale,
inverse_uniform_sampling=inverse_uniform_sampling,
)
frames, _ = transform.random_crop(frames, crop_size)
else:
transform_func = (
transform.random_resized_crop_with_shift
if motion_shift
else transform.random_resized_crop
)
frames = transform_func(
images=frames,
target_height=crop_size,
target_width=crop_size,
scale=scale,
ratio=aspect_ratio,
)
if random_horizontal_flip:
frames, _ = transform.horizontal_flip(0.5, frames)
else:
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale}) == 1
frames, _ = transform.random_short_side_scale_jitter(
frames, min_scale, max_scale
)
frames, _ = transform.uniform_crop(frames, crop_size, spatial_idx)
return frames
def as_binary_vector(labels, num_classes):
"""
Construct binary label vector given a list of label indices.
Args:
labels (list): The input label list.
num_classes (int): Number of classes of the label vector.
Returns:
labels (numpy array): the resulting binary vector.
"""
label_arr = np.zeros((num_classes,))
for lbl in set(labels):
label_arr[lbl] = 1.0
return label_arr
def aggregate_labels(label_list):
"""
Join a list of label list.
Args:
labels (list): The input label list.
Returns:
labels (list): The joint list of all lists in input.
"""
all_labels = []
for labels in label_list:
for l in labels:
all_labels.append(l)
return list(set(all_labels))
def convert_to_video_level_labels(labels):
"""
Aggregate annotations from all frames of a video to form video-level labels.
Args:
labels (list): The input label list.
Returns:
labels (list): Same as input, but with each label replaced by
a video-level one.
"""
for video_id in range(len(labels)):
video_level_labels = aggregate_labels(labels[video_id])
for i in range(len(labels[video_id])):
labels[video_id][i] = video_level_labels
return labels
def load_image_lists(frame_list_file, prefix="", return_list=False):
"""
Load image paths and labels from a "frame list".
Each line of the frame list contains:
`original_vido_id video_id frame_id path labels`
Args:
frame_list_file (string): path to the frame list.
prefix (str): the prefix for the path.
return_list (bool): if True, return a list. If False, return a dict.
Returns:
image_paths (list or dict): list of list containing path to each frame.
If return_list is False, then return in a dict form.
labels (list or dict): list of list containing label of each frame.
If return_list is False, then return in a dict form.
"""
image_paths = defaultdict(list)
labels = defaultdict(list)
with pathmgr.open(frame_list_file, "r") as f:
assert f.readline().startswith("original_vido_id")
for line in f:
row = line.split()
# original_vido_id video_id frame_id path labels
assert len(row) == 5
video_name = row[0]
if prefix == "":
path = row[3]
else:
path = os.path.join(prefix, row[3])
image_paths[video_name].append(path)
frame_labels = row[-1].replace('"', "")
if frame_labels != "":
labels[video_name].append(
[int(x) for x in frame_labels.split(",")]
)
else:
labels[video_name].append([])
if return_list:
keys = image_paths.keys()
image_paths = [image_paths[key] for key in keys]
labels = [labels[key] for key in keys]
return image_paths, labels
return dict(image_paths), dict(labels)
def tensor_normalize(tensor, mean, std):
"""
Normalize a given tensor by subtracting the mean and dividing the std.
Args:
tensor (tensor): tensor to normalize.
mean (tensor or list): mean value to subtract.
std (tensor or list): std to divide.
"""
if tensor.dtype == torch.uint8:
tensor = tensor.float()
tensor = tensor / 255.0
if type(mean) == list:
mean = torch.tensor(mean)
if type(std) == list:
std = torch.tensor(std)
tensor = tensor - mean
tensor = tensor / std
return tensor
def get_random_sampling_rate(long_cycle_sampling_rate, sampling_rate):
"""
When multigrid training uses a fewer number of frames, we randomly
increase the sampling rate so that some clips cover the original span.
"""
if long_cycle_sampling_rate > 0:
assert long_cycle_sampling_rate >= sampling_rate
return random.randint(sampling_rate, long_cycle_sampling_rate)
else:
return sampling_rate
def revert_tensor_normalize(tensor, mean, std):
"""
Revert normalization for a given tensor by multiplying by the std and adding the mean.
Args:
tensor (tensor): tensor to revert normalization.
mean (tensor or list): mean value to add.
std (tensor or list): std to multiply.
"""
if type(mean) == list:
mean = torch.tensor(mean)
if type(std) == list:
std = torch.tensor(std)
tensor = tensor * std
tensor = tensor + mean
return tensor
def create_sampler(dataset, shuffle, cfg):
"""
Create sampler for the given dataset.
Args:
dataset (torch.utils.data.Dataset): the given dataset.
shuffle (bool): set to ``True`` to have the data reshuffled
at every epoch.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
Returns:
sampler (Sampler): the created sampler.
"""
sampler = DistributedSampler(dataset) if cfg.NUM_GPUS > 1 else None
return sampler
def loader_worker_init_fn(dataset):
"""
Create init function passed to pytorch data loader.
Args:
dataset (torch.utils.data.Dataset): the given dataset.
"""
return None
| 11,739 | 32.638968 | 90 | py |
STTS | STTS-main/MViT/slowfast/datasets/mixup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/mixup.py,
published under an Apache License 2.0.
COMMENT FROM ORIGINAL:
Mixup and Cutmix
Papers:
mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899) # NOQA
Code Reference:
CutMix: https://github.com/clovaai/CutMix-PyTorch
Hacked together by / Copyright 2020 Ross Wightman
"""
import numpy as np
import torch
def convert_to_one_hot(targets, num_classes, on_value=1.0, off_value=0.0):
"""
This function converts target class indices to one-hot vectors, given the
number of classes.
Args:
targets (loader): Class labels.
num_classes (int): Total number of classes.
on_value (float): Target Value for ground truth class.
off_value (float): Target Value for other classes.This value is used for
label smoothing.
"""
targets = targets.long().view(-1, 1)
return torch.full(
(targets.size()[0], num_classes), off_value, device=targets.device
).scatter_(1, targets, on_value)
def mixup_target(target, num_classes, lam=1.0, smoothing=0.0):
"""
This function converts target class indices to one-hot vectors, given the
number of classes.
Args:
targets (loader): Class labels.
num_classes (int): Total number of classes.
lam (float): lamba value for mixup/cutmix.
smoothing (float): Label smoothing value.
"""
off_value = smoothing / num_classes
on_value = 1.0 - smoothing + off_value
target1 = convert_to_one_hot(
target,
num_classes,
on_value=on_value,
off_value=off_value,
)
target2 = convert_to_one_hot(
target.flip(0),
num_classes,
on_value=on_value,
off_value=off_value,
)
return target1 * lam + target2 * (1.0 - lam)
def rand_bbox(img_shape, lam, margin=0.0, count=None):
"""
Generates a random square bbox based on lambda value.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
def get_cutmix_bbox(img_shape, lam, correct_lam=True, count=None):
"""
Generates the box coordinates for cutmix.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
correct_lam (bool): Apply lambda correction when cutmix bbox clipped by
image borders.
count (int): Number of bbox to generate
"""
yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)
if correct_lam:
bbox_area = (yu - yl) * (xu - xl)
lam = 1.0 - bbox_area / float(img_shape[-2] * img_shape[-1])
return (yl, yu, xl, xu), lam
class MixUp:
"""
Apply mixup and/or cutmix for videos at batch level.
mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable
Features (https://arxiv.org/abs/1905.04899)
"""
def __init__(
self,
mixup_alpha=1.0,
cutmix_alpha=0.0,
mix_prob=1.0,
switch_prob=0.5,
correct_lam=True,
label_smoothing=0.1,
num_classes=1000,
):
"""
Args:
mixup_alpha (float): Mixup alpha value.
cutmix_alpha (float): Cutmix alpha value.
mix_prob (float): Probability of applying mixup or cutmix.
switch_prob (float): Probability of switching to cutmix instead of
mixup when both are active.
correct_lam (bool): Apply lambda correction when cutmix bbox
clipped by image borders.
label_smoothing (float): Apply label smoothing to the mixed target
tensor. If label_smoothing is not used, set it to 0.
num_classes (int): Number of classes for target.
"""
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.mix_prob = mix_prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
self.correct_lam = correct_lam
def _get_mixup_params(self):
lam = 1.0
use_cutmix = False
if np.random.rand() < self.mix_prob:
if self.mixup_alpha > 0.0 and self.cutmix_alpha > 0.0:
use_cutmix = np.random.rand() < self.switch_prob
lam_mix = (
np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
if use_cutmix
else np.random.beta(self.mixup_alpha, self.mixup_alpha)
)
elif self.mixup_alpha > 0.0:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.cutmix_alpha > 0.0:
use_cutmix = True
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
lam = float(lam_mix)
return lam, use_cutmix
def _mix_batch(self, x):
lam, use_cutmix = self._get_mixup_params()
if lam == 1.0:
return 1.0
if use_cutmix:
(yl, yh, xl, xh), lam = get_cutmix_bbox(
x.shape,
lam,
correct_lam=self.correct_lam,
)
x[..., yl:yh, xl:xh] = x.flip(0)[..., yl:yh, xl:xh]
else:
x_flipped = x.flip(0).mul_(1.0 - lam)
x.mul_(lam).add_(x_flipped)
return lam
def __call__(self, x, target):
assert len(x) > 1, "Batch size should be greater than 1 for mixup."
lam = self._mix_batch(x)
target = mixup_target(
target, self.num_classes, lam, self.label_smoothing
)
return x, target
| 6,659 | 34.052632 | 127 | py |
STTS | STTS-main/MViT/slowfast/datasets/ava_dataset.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import numpy as np
import torch
from . import ava_helper as ava_helper
from . import cv2_transform as cv2_transform
from . import transform as transform
from . import utils as utils
from .build import DATASET_REGISTRY
logger = logging.getLogger(__name__)
@DATASET_REGISTRY.register()
class Ava(torch.utils.data.Dataset):
"""
AVA Dataset
"""
def __init__(self, cfg, split):
self.cfg = cfg
self._split = split
self._sample_rate = cfg.DATA.SAMPLING_RATE
self._video_length = cfg.DATA.NUM_FRAMES
self._seq_len = self._video_length * self._sample_rate
self._num_classes = cfg.MODEL.NUM_CLASSES
# Augmentation params.
self._data_mean = cfg.DATA.MEAN
self._data_std = cfg.DATA.STD
self._use_bgr = cfg.AVA.BGR
self.random_horizontal_flip = cfg.DATA.RANDOM_FLIP
if self._split == "train":
self._crop_size = cfg.DATA.TRAIN_CROP_SIZE
self._jitter_min_scale = cfg.DATA.TRAIN_JITTER_SCALES[0]
self._jitter_max_scale = cfg.DATA.TRAIN_JITTER_SCALES[1]
self._use_color_augmentation = cfg.AVA.TRAIN_USE_COLOR_AUGMENTATION
self._pca_jitter_only = cfg.AVA.TRAIN_PCA_JITTER_ONLY
self._pca_eigval = cfg.DATA.TRAIN_PCA_EIGVAL
self._pca_eigvec = cfg.DATA.TRAIN_PCA_EIGVEC
else:
self._crop_size = cfg.DATA.TEST_CROP_SIZE
self._test_force_flip = cfg.AVA.TEST_FORCE_FLIP
self._load_data(cfg)
def _load_data(self, cfg):
"""
Load frame paths and annotations from files
Args:
cfg (CfgNode): config
"""
# Loading frame paths.
(
self._image_paths,
self._video_idx_to_name,
) = ava_helper.load_image_lists(cfg, is_train=(self._split == "train"))
# Loading annotations for boxes and labels.
boxes_and_labels = ava_helper.load_boxes_and_labels(
cfg, mode=self._split
)
assert len(boxes_and_labels) == len(self._image_paths)
boxes_and_labels = [
boxes_and_labels[self._video_idx_to_name[i]]
for i in range(len(self._image_paths))
]
# Get indices of keyframes and corresponding boxes and labels.
(
self._keyframe_indices,
self._keyframe_boxes_and_labels,
) = ava_helper.get_keyframe_data(boxes_and_labels)
# Calculate the number of used boxes.
self._num_boxes_used = ava_helper.get_num_boxes_used(
self._keyframe_indices, self._keyframe_boxes_and_labels
)
self.print_summary()
def print_summary(self):
logger.info("=== AVA dataset summary ===")
logger.info("Split: {}".format(self._split))
logger.info("Number of videos: {}".format(len(self._image_paths)))
total_frames = sum(
len(video_img_paths) for video_img_paths in self._image_paths
)
logger.info("Number of frames: {}".format(total_frames))
logger.info("Number of key frames: {}".format(len(self)))
logger.info("Number of boxes: {}.".format(self._num_boxes_used))
def __len__(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return self.num_videos
@property
def num_videos(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return len(self._keyframe_indices)
def _images_and_boxes_preprocessing_cv2(self, imgs, boxes):
"""
This function performs preprocessing for the input images and
corresponding boxes for one clip with opencv as backend.
Args:
imgs (tensor): the images.
boxes (ndarray): the boxes for the current clip.
Returns:
imgs (tensor): list of preprocessed images.
boxes (ndarray): preprocessed boxes.
"""
height, width, _ = imgs[0].shape
boxes[:, [0, 2]] *= width
boxes[:, [1, 3]] *= height
boxes = cv2_transform.clip_boxes_to_image(boxes, height, width)
# `transform.py` is list of np.array. However, for AVA, we only have
# one np.array.
boxes = [boxes]
# The image now is in HWC, BGR format.
if self._split == "train": # "train"
imgs, boxes = cv2_transform.random_short_side_scale_jitter_list(
imgs,
min_size=self._jitter_min_scale,
max_size=self._jitter_max_scale,
boxes=boxes,
)
imgs, boxes = cv2_transform.random_crop_list(
imgs, self._crop_size, order="HWC", boxes=boxes
)
if self.random_horizontal_flip:
# random flip
imgs, boxes = cv2_transform.horizontal_flip_list(
0.5, imgs, order="HWC", boxes=boxes
)
elif self._split == "val":
# Short side to test_scale. Non-local and STRG uses 256.
imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]
boxes = [
cv2_transform.scale_boxes(
self._crop_size, boxes[0], height, width
)
]
imgs, boxes = cv2_transform.spatial_shift_crop_list(
self._crop_size, imgs, 1, boxes=boxes
)
if self._test_force_flip:
imgs, boxes = cv2_transform.horizontal_flip_list(
1, imgs, order="HWC", boxes=boxes
)
elif self._split == "test":
# Short side to test_scale. Non-local and STRG uses 256.
imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]
boxes = [
cv2_transform.scale_boxes(
self._crop_size, boxes[0], height, width
)
]
if self._test_force_flip:
imgs, boxes = cv2_transform.horizontal_flip_list(
1, imgs, order="HWC", boxes=boxes
)
else:
raise NotImplementedError(
"Unsupported split mode {}".format(self._split)
)
# Convert image to CHW keeping BGR order.
imgs = [cv2_transform.HWC2CHW(img) for img in imgs]
# Image [0, 255] -> [0, 1].
imgs = [img / 255.0 for img in imgs]
imgs = [
np.ascontiguousarray(
# img.reshape((3, self._crop_size, self._crop_size))
img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))
).astype(np.float32)
for img in imgs
]
# Do color augmentation (after divided by 255.0).
if self._split == "train" and self._use_color_augmentation:
if not self._pca_jitter_only:
imgs = cv2_transform.color_jitter_list(
imgs,
img_brightness=0.4,
img_contrast=0.4,
img_saturation=0.4,
)
imgs = cv2_transform.lighting_list(
imgs,
alphastd=0.1,
eigval=np.array(self._pca_eigval).astype(np.float32),
eigvec=np.array(self._pca_eigvec).astype(np.float32),
)
# Normalize images by mean and std.
imgs = [
cv2_transform.color_normalization(
img,
np.array(self._data_mean, dtype=np.float32),
np.array(self._data_std, dtype=np.float32),
)
for img in imgs
]
# Concat list of images to single ndarray.
imgs = np.concatenate(
[np.expand_dims(img, axis=1) for img in imgs], axis=1
)
if not self._use_bgr:
# Convert image format from BGR to RGB.
imgs = imgs[::-1, ...]
imgs = np.ascontiguousarray(imgs)
imgs = torch.from_numpy(imgs)
boxes = cv2_transform.clip_boxes_to_image(
boxes[0], imgs[0].shape[1], imgs[0].shape[2]
)
return imgs, boxes
def _images_and_boxes_preprocessing(self, imgs, boxes):
"""
This function performs preprocessing for the input images and
corresponding boxes for one clip.
Args:
imgs (tensor): the images.
boxes (ndarray): the boxes for the current clip.
Returns:
imgs (tensor): list of preprocessed images.
boxes (ndarray): preprocessed boxes.
"""
# Image [0, 255] -> [0, 1].
imgs = imgs.float()
imgs = imgs / 255.0
height, width = imgs.shape[2], imgs.shape[3]
# The format of boxes is [x1, y1, x2, y2]. The input boxes are in the
# range of [0, 1].
boxes[:, [0, 2]] *= width
boxes[:, [1, 3]] *= height
boxes = transform.clip_boxes_to_image(boxes, height, width)
if self._split == "train":
# Train split
imgs, boxes = transform.random_short_side_scale_jitter(
imgs,
min_size=self._jitter_min_scale,
max_size=self._jitter_max_scale,
boxes=boxes,
)
imgs, boxes = transform.random_crop(
imgs, self._crop_size, boxes=boxes
)
# Random flip.
imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)
elif self._split == "val":
# Val split
# Resize short side to crop_size. Non-local and STRG uses 256.
imgs, boxes = transform.random_short_side_scale_jitter(
imgs,
min_size=self._crop_size,
max_size=self._crop_size,
boxes=boxes,
)
# Apply center crop for val split
imgs, boxes = transform.uniform_crop(
imgs, size=self._crop_size, spatial_idx=1, boxes=boxes
)
if self._test_force_flip:
imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)
elif self._split == "test":
# Test split
# Resize short side to crop_size. Non-local and STRG uses 256.
imgs, boxes = transform.random_short_side_scale_jitter(
imgs,
min_size=self._crop_size,
max_size=self._crop_size,
boxes=boxes,
)
if self._test_force_flip:
imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)
else:
raise NotImplementedError(
"{} split not supported yet!".format(self._split)
)
# Do color augmentation (after divided by 255.0).
if self._split == "train" and self._use_color_augmentation:
if not self._pca_jitter_only:
imgs = transform.color_jitter(
imgs,
img_brightness=0.4,
img_contrast=0.4,
img_saturation=0.4,
)
imgs = transform.lighting_jitter(
imgs,
alphastd=0.1,
eigval=np.array(self._pca_eigval).astype(np.float32),
eigvec=np.array(self._pca_eigvec).astype(np.float32),
)
# Normalize images by mean and std.
imgs = transform.color_normalization(
imgs,
np.array(self._data_mean, dtype=np.float32),
np.array(self._data_std, dtype=np.float32),
)
if not self._use_bgr:
# Convert image format from BGR to RGB.
# Note that Kinetics pre-training uses RGB!
imgs = imgs[:, [2, 1, 0], ...]
boxes = transform.clip_boxes_to_image(
boxes, self._crop_size, self._crop_size
)
return imgs, boxes
def __getitem__(self, idx):
"""
Generate corresponding clips, boxes, labels and metadata for given idx.
Args:
idx (int): the video index provided by the pytorch sampler.
Returns:
frames (tensor): the frames of sampled from the video. The dimension
is `channel` x `num frames` x `height` x `width`.
label (ndarray): the label for correspond boxes for the current video.
idx (int): the video index provided by the pytorch sampler.
extra_data (dict): a dict containing extra data fields, like "boxes",
"ori_boxes" and "metadata".
"""
video_idx, sec_idx, sec, center_idx = self._keyframe_indices[idx]
# Get the frame idxs for current clip.
seq = utils.get_sequence(
center_idx,
self._seq_len // 2,
self._sample_rate,
num_frames=len(self._image_paths[video_idx]),
)
clip_label_list = self._keyframe_boxes_and_labels[video_idx][sec_idx]
assert len(clip_label_list) > 0
# Get boxes and labels for current clip.
boxes = []
labels = []
for box_labels in clip_label_list:
boxes.append(box_labels[0])
labels.append(box_labels[1])
boxes = np.array(boxes)
# Score is not used.
boxes = boxes[:, :4].copy()
ori_boxes = boxes.copy()
# Load images of current clip.
image_paths = [self._image_paths[video_idx][frame] for frame in seq]
imgs = utils.retry_load_images(
image_paths, backend=self.cfg.AVA.IMG_PROC_BACKEND
)
if self.cfg.AVA.IMG_PROC_BACKEND == "pytorch":
# T H W C -> T C H W.
imgs = imgs.permute(0, 3, 1, 2)
# Preprocess images and boxes.
imgs, boxes = self._images_and_boxes_preprocessing(
imgs, boxes=boxes
)
# T C H W -> C T H W.
imgs = imgs.permute(1, 0, 2, 3)
else:
# Preprocess images and boxes
imgs, boxes = self._images_and_boxes_preprocessing_cv2(
imgs, boxes=boxes
)
# Construct label arrays.
label_arrs = np.zeros((len(labels), self._num_classes), dtype=np.int32)
for i, box_labels in enumerate(labels):
# AVA label index starts from 1.
for label in box_labels:
if label == -1:
continue
assert label >= 1 and label <= 80
label_arrs[i][label - 1] = 1
imgs = utils.pack_pathway_output(self.cfg, imgs)
metadata = [[video_idx, sec]] * len(boxes)
extra_data = {
"boxes": boxes,
"ori_boxes": ori_boxes,
"metadata": metadata,
}
return imgs, label_arrs, idx, extra_data
| 14,963 | 33.881119 | 82 | py |
STTS | STTS-main/MViT/slowfast/datasets/ptv_datasets.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import functools
import os
from typing import Dict
import torch
from torch.utils.data import (
DistributedSampler,
RandomSampler,
SequentialSampler,
)
from torchvision.transforms import Compose, Lambda
from torchvision.transforms._transforms_video import (
NormalizeVideo,
RandomCropVideo,
RandomHorizontalFlipVideo,
)
import slowfast.utils.logging as logging
from pytorchvideo.data import (
Charades,
LabeledVideoDataset,
SSv2,
make_clip_sampler,
)
from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths
from pytorchvideo.transforms import (
ApplyTransformToKey,
RandomShortSideScale,
ShortSideScale,
UniformCropVideo,
UniformTemporalSubsample,
)
from . import utils as utils
from .build import DATASET_REGISTRY
logger = logging.get_logger(__name__)
class PTVDatasetWrapper(torch.utils.data.IterableDataset):
"""
Wrapper for PyTorchVideo datasets.
"""
def __init__(self, num_videos, clips_per_video, crops_per_clip, dataset):
"""
Construct the dataset.
Args:
num_vidoes (int): number of videos in the dataset.
clips_per_video (int): number of clips per video in the dataset.
dataset (torch.utils.data.IterableDataset): a PyTorchVideo dataset.
"""
self._clips_per_video = clips_per_video
self._crops_per_clip = crops_per_clip
self._num_videos = num_videos
self.dataset = dataset
def __next__(self):
"""
Retrieves the next clip from the dataset.
"""
return self.dataset.__next__()
@property
def sampler(self):
"""
Returns:
(torch.utils.data.Sampler): video sampler for the dataset.
"""
return self.dataset.video_sampler
def __len__(self):
"""
Returns:
(int): the number of clips per replica in the IterableDataset.
"""
return len(self.sampler) * self._clips_per_video * self._crops_per_clip
@property
def num_videos(self):
"""
Returns:
(int): the number of clips in total in the dataset.
"""
return self._num_videos * self._clips_per_video * self._crops_per_clip
def __iter__(self):
return self
class PackPathway(torch.nn.Module):
"""
Transform for converting video frames as a list of tensors. Each tensor
corresponding to a unique pathway.
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x: torch.Tensor):
return utils.pack_pathway_output(self.cfg, x)
class DictToTuple(torch.nn.Module):
"""
Transform for converting output from dict to a tuple following PySlowFast
dataset output format.
"""
def __init__(self, num_clips, num_crops):
super().__init__()
self._num_clips = num_clips
self._num_crops = num_crops
def forward(self, x: Dict[str, torch.Tensor]):
index = (
x["video_index"] * self._num_clips * self._num_crops
+ x["clip_index"] * self._num_crops
+ x["aug_index"]
)
return x["video"], x["label"], index, {}
def div255(x):
"""
Scale clip frames from [0, 255] to [0, 1].
Args:
x (Tensor): A tensor of the clip's RGB frames with shape:
(channel, time, height, width).
Returns:
x (Tensor): Scaled tensor by divide 255.
"""
return x / 255.0
@DATASET_REGISTRY.register()
def Ptvkinetics(cfg, mode):
"""
Construct the Kinetics video loader with a given csv file. The format of
the csv file is:
```
path_to_video_1 label_1
path_to_video_2 label_2
...
path_to_video_N label_N
```
For `train` and `val` mode, a single clip is randomly sampled from every video
with random cropping, scaling, and flipping. For `test` mode, multiple clips are
uniformaly sampled from every video with center cropping.
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
For the train and val mode, the data loader will take data
from the train or val set, and sample one clip per video.
For the test mode, the data loader will take data from test set,
and sample multiple clips per video.
"""
# Only support train, val, and test mode.
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported".format(mode)
logger.info("Constructing Ptvkinetics {}...".format(mode))
clip_duration = (
cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE / cfg.DATA.TARGET_FPS
)
path_to_file = os.path.join(
cfg.DATA.PATH_TO_DATA_DIR, "{}.csv".format(mode)
)
labeled_video_paths = LabeledVideoPaths.from_path(path_to_file)
num_videos = len(labeled_video_paths)
labeled_video_paths.path_prefix = cfg.DATA.PATH_PREFIX
logger.info(
"Constructing kinetics dataloader (size: {}) from {}".format(
num_videos, path_to_file
)
)
if mode in ["train", "val"]:
num_clips = 1
num_crops = 1
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
UniformTemporalSubsample(cfg.DATA.NUM_FRAMES),
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
RandomShortSideScale(
min_size=cfg.DATA.TRAIN_JITTER_SCALES[0],
max_size=cfg.DATA.TRAIN_JITTER_SCALES[1],
),
RandomCropVideo(cfg.DATA.TRAIN_CROP_SIZE),
]
+ (
[RandomHorizontalFlipVideo(p=0.5)]
if cfg.DATA.RANDOM_FLIP
else []
)
+ [PackPathway(cfg)]
),
),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler("random", clip_duration)
if cfg.NUM_GPUS > 1:
video_sampler = DistributedSampler
else:
video_sampler = (
RandomSampler if mode == "train" else SequentialSampler
)
else:
num_clips = cfg.TEST.NUM_ENSEMBLE_VIEWS
num_crops = cfg.TEST.NUM_SPATIAL_CROPS
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
UniformTemporalSubsample(cfg.DATA.NUM_FRAMES),
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
ShortSideScale(
size=cfg.DATA.TRAIN_JITTER_SCALES[0]
),
]
),
),
UniformCropVideo(size=cfg.DATA.TEST_CROP_SIZE),
ApplyTransformToKey(key="video", transform=PackPathway(cfg)),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler(
"constant_clips_per_video",
clip_duration,
num_clips,
num_crops,
)
video_sampler = (
DistributedSampler if cfg.NUM_GPUS > 1 else SequentialSampler
)
return PTVDatasetWrapper(
num_videos=num_videos,
clips_per_video=num_clips,
crops_per_clip=num_crops,
dataset=LabeledVideoDataset(
labeled_video_paths=labeled_video_paths,
clip_sampler=clip_sampler,
video_sampler=video_sampler,
transform=transform,
decode_audio=False,
),
)
def process_charades_label(x, mode, num_classes):
"""
Process the video label for Charades dataset. Use video-level label for
training mode, otherwise use clip-level label. Then convert the label into
a binary vector.
Args:
x (dict): a video clip including label index.
mode (string): Options includes `train`, `val`, or `test` mode.
num_classes (int): Number of classes in the dataset.
Returns:
x (dict): video clip with updated label information.
"""
label = (
utils.aggregate_labels(x["label"])
if mode == "train"
else x["video_label"]
)
x["label"] = torch.as_tensor(utils.as_binary_vector(label, num_classes))
return x
def rgb2bgr(x):
"""
Convert clip frames from RGB mode to BRG mode.
Args:
x (Tensor): A tensor of the clip's RGB frames with shape:
(channel, time, height, width).
Returns:
x (Tensor): Converted tensor
"""
return x[[2, 1, 0], ...]
@DATASET_REGISTRY.register()
def Ptvcharades(cfg, mode):
"""
Construct PyTorchVideo Charades video loader.
Load Charades data (frame paths, labels, etc. ) to Charades Dataset object.
The dataset could be downloaded from Chrades official website
(https://allenai.org/plato/charades/).
Please see datasets/DATASET.md for more information about the data format.
For `train` and `val` mode, a single clip is randomly sampled from every video
with random cropping, scaling, and flipping. For `test` mode, multiple clips are
uniformaly sampled from every video with center cropping.
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
For the train and val mode, the data loader will take data
from the train or val set, and sample one clip per video.
For the test mode, the data loader will take data from test set,
and sample multiple clips per video.
"""
# Only support train, val, and test mode.
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported".format(mode)
logger.info("Constructing Ptvcharades {}...".format(mode))
clip_duration = (
(cfg.DATA.NUM_FRAMES - 1) * cfg.DATA.SAMPLING_RATE + 1
) / cfg.DATA.TARGET_FPS
if mode in ["train", "val"]:
num_clips = 1
num_crops = 1
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
RandomShortSideScale(
min_size=cfg.DATA.TRAIN_JITTER_SCALES[0],
max_size=cfg.DATA.TRAIN_JITTER_SCALES[1],
),
RandomCropVideo(cfg.DATA.TRAIN_CROP_SIZE),
Lambda(rgb2bgr),
]
+ (
[RandomHorizontalFlipVideo(p=0.5)]
if cfg.DATA.RANDOM_FLIP
else []
)
+ [PackPathway(cfg)]
),
),
Lambda(
functools.partial(
process_charades_label,
mode=mode,
num_classes=cfg.MODEL.NUM_CLASSES,
)
),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler("random", clip_duration)
if cfg.NUM_GPUS > 1:
video_sampler = DistributedSampler
else:
video_sampler = (
RandomSampler if mode == "train" else SequentialSampler
)
else:
num_clips = cfg.TEST.NUM_ENSEMBLE_VIEWS
num_crops = cfg.TEST.NUM_SPATIAL_CROPS
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
ShortSideScale(size=cfg.DATA.TEST_CROP_SIZE),
]
),
),
UniformCropVideo(size=cfg.DATA.TEST_CROP_SIZE),
Lambda(
functools.partial(
process_charades_label,
mode=mode,
num_classes=cfg.MODEL.NUM_CLASSES,
)
),
ApplyTransformToKey(
key="video",
transform=Compose(
[Lambda(rgb2bgr), PackPathway(cfg)],
),
),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler(
"constant_clips_per_video",
clip_duration,
num_clips,
num_crops,
)
video_sampler = (
DistributedSampler if cfg.NUM_GPUS > 1 else SequentialSampler
)
data_path = os.path.join(cfg.DATA.PATH_TO_DATA_DIR, "{}.csv".format(mode))
dataset = Charades(
data_path=data_path,
clip_sampler=clip_sampler,
video_sampler=video_sampler,
transform=transform,
video_path_prefix=cfg.DATA.PATH_PREFIX,
frames_per_clip=cfg.DATA.NUM_FRAMES,
)
logger.info(
"Constructing charades dataloader (size: {}) from {}".format(
len(dataset._path_to_videos), data_path
)
)
return PTVDatasetWrapper(
num_videos=len(dataset._path_to_videos),
clips_per_video=num_clips,
crops_per_clip=num_crops,
dataset=dataset,
)
@DATASET_REGISTRY.register()
def Ptvssv2(cfg, mode):
"""
Construct PyTorchVideo Something-Something v2 SSv2 video loader.
Load SSv2 data (frame paths, labels, etc. ) to SSv2 Dataset object.
The dataset could be downloaded from Chrades official website
(https://20bn.com/datasets/something-something).
Please see datasets/DATASET.md for more information about the data format.
For training and validation, a single clip is randomly sampled from every
video with random cropping and scaling. For testing, multiple clips are
uniformaly sampled from every video with uniform cropping. For uniform cropping,
we take the left, center, and right crop if the width is larger than height,
or take top, center, and bottom crop if the height is larger than the width.
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
"""
# Only support train, val, and test mode.
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported".format(mode)
logger.info("Constructing Ptvcharades {}...".format(mode))
if mode in ["train", "val"]:
num_clips = 1
num_crops = 1
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
RandomShortSideScale(
min_size=cfg.DATA.TRAIN_JITTER_SCALES[0],
max_size=cfg.DATA.TRAIN_JITTER_SCALES[1],
),
RandomCropVideo(cfg.DATA.TRAIN_CROP_SIZE),
Lambda(rgb2bgr),
]
+ (
[RandomHorizontalFlipVideo(p=0.5)]
if cfg.DATA.RANDOM_FLIP
else []
)
+ [PackPathway(cfg)]
),
),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler(
"constant_clips_per_video",
1, # Put arbitrary duration as ssv2 always needs full video clip.
num_clips,
num_crops,
)
if cfg.NUM_GPUS > 1:
video_sampler = DistributedSampler
else:
video_sampler = (
RandomSampler if mode == "train" else SequentialSampler
)
else:
assert cfg.TEST.NUM_ENSEMBLE_VIEWS == 1
num_clips = cfg.TEST.NUM_ENSEMBLE_VIEWS
num_crops = cfg.TEST.NUM_SPATIAL_CROPS
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
ShortSideScale(size=cfg.DATA.TEST_CROP_SIZE),
]
),
),
UniformCropVideo(size=cfg.DATA.TEST_CROP_SIZE),
ApplyTransformToKey(
key="video",
transform=Compose(
[Lambda(rgb2bgr), PackPathway(cfg)],
),
),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler(
"constant_clips_per_video",
1, # Put arbitrary duration as ssv2 always needs full video clip.
num_clips,
num_crops,
)
video_sampler = (
DistributedSampler if cfg.NUM_GPUS > 1 else SequentialSampler
)
label_name_file = os.path.join(
cfg.DATA.PATH_TO_DATA_DIR, "something-something-v2-labels.json"
)
video_label_file = os.path.join(
cfg.DATA.PATH_TO_DATA_DIR,
"something-something-v2-{}.json".format(
"train" if mode == "train" else "validation"
),
)
data_path = os.path.join(
cfg.DATA.PATH_TO_DATA_DIR,
"{}.csv".format("train" if mode == "train" else "val"),
)
dataset = SSv2(
label_name_file=label_name_file,
video_label_file=video_label_file,
video_path_label_file=data_path,
clip_sampler=clip_sampler,
video_sampler=video_sampler,
transform=transform,
video_path_prefix=cfg.DATA.PATH_PREFIX,
frames_per_clip=cfg.DATA.NUM_FRAMES,
rand_sample_frames=mode == "train",
)
logger.info(
"Constructing ssv2 dataloader (size: {}) from {}".format(
len(dataset._path_to_videos), data_path
)
)
return PTVDatasetWrapper(
num_videos=len(dataset._path_to_videos),
clips_per_video=num_clips,
crops_per_clip=num_crops,
dataset=dataset,
)
| 19,371 | 31.557983 | 84 | py |
STTS | STTS-main/MViT/slowfast/datasets/charades.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import random
from itertools import chain as chain
import torch
import torch.utils.data
import slowfast.utils.logging as logging
from slowfast.utils.env import pathmgr
from . import utils as utils
from .build import DATASET_REGISTRY
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class Charades(torch.utils.data.Dataset):
"""
Charades video loader. Construct the Charades video loader, then sample
clips from the videos. For training and validation, a single clip is randomly
sampled from every video with random cropping, scaling, and flipping. For
testing, multiple clips are uniformaly sampled from every video with uniform
cropping. For uniform cropping, we take the left, center, and right crop if
the width is larger than height, or take top, center, and bottom crop if the
height is larger than the width.
"""
def __init__(self, cfg, mode, num_retries=10):
"""
Load Charades data (frame paths, labels, etc. ) to a given Dataset object.
The dataset could be downloaded from Chrades official website
(https://allenai.org/plato/charades/).
Please see datasets/DATASET.md for more information about the data format.
Args:
dataset (Dataset): a Dataset object to load Charades data to.
mode (string): 'train', 'val', or 'test'.
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
For the train and val mode, the data loader will take data
from the train or val set, and sample one clip per video.
For the test mode, the data loader will take data from test set,
and sample multiple clips per video.
num_retries (int): number of retries.
"""
# Only support train, val, and test mode.
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported for Charades ".format(mode)
self.mode = mode
self.cfg = cfg
self._video_meta = {}
self._num_retries = num_retries
# For training or validation mode, one single clip is sampled from every
# video. For testing, NUM_ENSEMBLE_VIEWS clips are sampled from every
# video. For every clip, NUM_SPATIAL_CROPS is cropped spatially from
# the frames.
if self.mode in ["train", "val"]:
self._num_clips = 1
elif self.mode in ["test"]:
self._num_clips = (
cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS
)
logger.info("Constructing Charades {}...".format(mode))
self._construct_loader()
def _construct_loader(self):
"""
Construct the video loader.
"""
path_to_file = os.path.join(
self.cfg.DATA.PATH_TO_DATA_DIR,
"{}.csv".format("train" if self.mode == "train" else "val"),
)
assert pathmgr.exists(path_to_file), "{} dir not found".format(
path_to_file
)
(self._path_to_videos, self._labels) = utils.load_image_lists(
path_to_file, self.cfg.DATA.PATH_PREFIX, return_list=True
)
if self.mode != "train":
# Form video-level labels from frame level annotations.
self._labels = utils.convert_to_video_level_labels(self._labels)
self._path_to_videos = list(
chain.from_iterable(
[[x] * self._num_clips for x in self._path_to_videos]
)
)
self._labels = list(
chain.from_iterable([[x] * self._num_clips for x in self._labels])
)
self._spatial_temporal_idx = list(
chain.from_iterable(
[range(self._num_clips) for _ in range(len(self._labels))]
)
)
logger.info(
"Charades dataloader constructed (size: {}) from {}".format(
len(self._path_to_videos), path_to_file
)
)
def get_seq_frames(self, index):
"""
Given the video index, return the list of indexs of sampled frames.
Args:
index (int): the video index.
Returns:
seq (list): the indexes of sampled frames from the video.
"""
temporal_sample_index = (
-1
if self.mode in ["train", "val"]
else self._spatial_temporal_idx[index]
// self.cfg.TEST.NUM_SPATIAL_CROPS
)
num_frames = self.cfg.DATA.NUM_FRAMES
sampling_rate = utils.get_random_sampling_rate(
self.cfg.MULTIGRID.LONG_CYCLE_SAMPLING_RATE,
self.cfg.DATA.SAMPLING_RATE,
)
video_length = len(self._path_to_videos[index])
assert video_length == len(self._labels[index])
clip_length = (num_frames - 1) * sampling_rate + 1
if temporal_sample_index == -1:
if clip_length > video_length:
start = random.randint(video_length - clip_length, 0)
else:
start = random.randint(0, video_length - clip_length)
else:
gap = float(max(video_length - clip_length, 0)) / (
self.cfg.TEST.NUM_ENSEMBLE_VIEWS - 1
)
start = int(round(gap * temporal_sample_index))
seq = [
max(min(start + i * sampling_rate, video_length - 1), 0)
for i in range(num_frames)
]
return seq
def __getitem__(self, index):
"""
Given the video index, return the list of frames, label, and video
index if the video frames can be fetched.
Args:
index (int): the video index provided by the pytorch sampler.
Returns:
frames (tensor): the frames of sampled from the video. The dimension
is `channel` x `num frames` x `height` x `width`.
label (int): the label of the current video.
index (int): the index of the video.
"""
short_cycle_idx = None
# When short cycle is used, input index is a tupple.
if isinstance(index, tuple):
index, short_cycle_idx = index
if self.mode in ["train", "val"]:
# -1 indicates random sampling.
spatial_sample_index = -1
min_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[0]
max_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[1]
crop_size = self.cfg.DATA.TRAIN_CROP_SIZE
if short_cycle_idx in [0, 1]:
crop_size = int(
round(
self.cfg.MULTIGRID.SHORT_CYCLE_FACTORS[short_cycle_idx]
* self.cfg.MULTIGRID.DEFAULT_S
)
)
if self.cfg.MULTIGRID.DEFAULT_S > 0:
# Decreasing the scale is equivalent to using a larger "span"
# in a sampling grid.
min_scale = int(
round(
float(min_scale)
* crop_size
/ self.cfg.MULTIGRID.DEFAULT_S
)
)
elif self.mode in ["test"]:
# spatial_sample_index is in [0, 1, 2]. Corresponding to left,
# center, or right if width is larger than height, and top, middle,
# or bottom if height is larger than width.
spatial_sample_index = (
self._spatial_temporal_idx[index]
% self.cfg.TEST.NUM_SPATIAL_CROPS
)
min_scale, max_scale, crop_size = [self.cfg.DATA.TEST_CROP_SIZE] * 3
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale, crop_size}) == 1
else:
raise NotImplementedError(
"Does not support {} mode".format(self.mode)
)
seq = self.get_seq_frames(index)
frames = torch.as_tensor(
utils.retry_load_images(
[self._path_to_videos[index][frame] for frame in seq],
self._num_retries,
)
)
label = utils.aggregate_labels(
[self._labels[index][i] for i in range(seq[0], seq[-1] + 1)]
)
label = torch.as_tensor(
utils.as_binary_vector(label, self.cfg.MODEL.NUM_CLASSES)
)
# Perform color normalization.
frames = utils.tensor_normalize(
frames, self.cfg.DATA.MEAN, self.cfg.DATA.STD
)
# T H W C -> C T H W.
frames = frames.permute(3, 0, 1, 2)
# Perform data augmentation.
frames = utils.spatial_sampling(
frames,
spatial_idx=spatial_sample_index,
min_scale=min_scale,
max_scale=max_scale,
crop_size=crop_size,
random_horizontal_flip=self.cfg.DATA.RANDOM_FLIP,
inverse_uniform_sampling=self.cfg.DATA.INV_UNIFORM_SAMPLE,
)
frames = utils.pack_pathway_output(self.cfg, frames)
return frames, label, index, {}
def __len__(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return self.num_videos
@property
def num_videos(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return len(self._path_to_videos)
| 9,677 | 36.366795 | 82 | py |
STTS | STTS-main/MViT/slowfast/datasets/multigrid_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Helper functions for multigrid training."""
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
if TORCH_MAJOR >= 1 and TORCH_MINOR >= 8:
_int_classes = int
else:
from torch._six import int_classes as _int_classes
class ShortCycleBatchSampler(Sampler):
"""
Extend Sampler to support "short cycle" sampling.
See paper "A Multigrid Method for Efficiently Training Video Models",
Wu et al., 2019 (https://arxiv.org/abs/1912.00998) for details.
"""
def __init__(self, sampler, batch_size, drop_last, cfg):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
if (
not isinstance(batch_size, _int_classes)
or isinstance(batch_size, bool)
or batch_size <= 0
):
raise ValueError(
"batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size)
)
if not isinstance(drop_last, bool):
raise ValueError(
"drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last)
)
self.sampler = sampler
self.drop_last = drop_last
bs_factor = [
int(
round(
(
float(cfg.DATA.TRAIN_CROP_SIZE)
/ (s * cfg.MULTIGRID.DEFAULT_S)
)
** 2
)
)
for s in cfg.MULTIGRID.SHORT_CYCLE_FACTORS
]
self.batch_sizes = [
batch_size * bs_factor[0],
batch_size * bs_factor[1],
batch_size,
]
def __iter__(self):
counter = 0
batch_size = self.batch_sizes[0]
batch = []
for idx in self.sampler:
batch.append((idx, counter % 3))
if len(batch) == batch_size:
yield batch
counter += 1
batch_size = self.batch_sizes[counter % 3]
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
avg_batch_size = sum(self.batch_sizes) / 3.0
if self.drop_last:
return int(np.floor(len(self.sampler) / avg_batch_size))
else:
return int(np.ceil(len(self.sampler) / avg_batch_size))
| 2,753 | 30.295455 | 78 | py |
STTS | STTS-main/MViT/slowfast/datasets/decoder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import numpy as np
import random
import torch
import torchvision.io as io
def temporal_sampling(frames, start_idx, end_idx, num_samples):
"""
Given the start and end frame index, sample num_samples frames between
the start and end with equal interval.
Args:
frames (tensor): a tensor of video frames, dimension is
`num video frames` x `channel` x `height` x `width`.
start_idx (int): the index of the start frame.
end_idx (int): the index of the end frame.
num_samples (int): number of frames to sample.
Returns:
frames (tersor): a tensor of temporal sampled video frames, dimension is
`num clip frames` x `channel` x `height` x `width`.
"""
index = torch.linspace(start_idx, end_idx, num_samples)
index = torch.clamp(index, 0, frames.shape[0] - 1).long()
frames = torch.index_select(frames, 0, index)
return frames
def get_start_end_idx(
video_size, clip_size, clip_idx, num_clips, use_offset=False
):
"""
Sample a clip of size clip_size from a video of size video_size and
return the indices of the first and last frame of the clip. If clip_idx is
-1, the clip is randomly sampled, otherwise uniformly split the video to
num_clips clips, and select the start and end index of clip_idx-th video
clip.
Args:
video_size (int): number of overall frames.
clip_size (int): size of the clip to sample from the frames.
clip_idx (int): if clip_idx is -1, perform random jitter sampling. If
clip_idx is larger than -1, uniformly split the video to num_clips
clips, and select the start and end index of the clip_idx-th video
clip.
num_clips (int): overall number of clips to uniformly sample from the
given video for testing.
Returns:
start_idx (int): the start frame index.
end_idx (int): the end frame index.
"""
delta = max(video_size - clip_size, 0)
if clip_idx == -1:
# Random temporal sampling.
start_idx = random.uniform(0, delta)
else:
if use_offset:
if num_clips == 1:
# Take the center clip if num_clips is 1.
start_idx = math.floor(delta / 2)
else:
# Uniformly sample the clip with the given index.
start_idx = clip_idx * math.floor(delta / (num_clips - 1))
else:
# Uniformly sample the clip with the given index.
start_idx = delta * clip_idx / num_clips
end_idx = start_idx + clip_size - 1
return start_idx, end_idx
def pyav_decode_stream(
container, start_pts, end_pts, stream, stream_name, buffer_size=0
):
"""
Decode the video with PyAV decoder.
Args:
container (container): PyAV container.
start_pts (int): the starting Presentation TimeStamp to fetch the
video frames.
end_pts (int): the ending Presentation TimeStamp of the decoded frames.
stream (stream): PyAV stream.
stream_name (dict): a dictionary of streams. For example, {"video": 0}
means video stream at stream index 0.
buffer_size (int): number of additional frames to decode beyond end_pts.
Returns:
result (list): list of frames decoded.
max_pts (int): max Presentation TimeStamp of the video sequence.
"""
# Seeking in the stream is imprecise. Thus, seek to an ealier PTS by a
# margin pts.
margin = 1024
seek_offset = max(start_pts - margin, 0)
container.seek(seek_offset, any_frame=False, backward=True, stream=stream)
frames = {}
buffer_count = 0
max_pts = 0
for frame in container.decode(**stream_name):
max_pts = max(max_pts, frame.pts)
if frame.pts < start_pts:
continue
if frame.pts <= end_pts:
frames[frame.pts] = frame
else:
buffer_count += 1
frames[frame.pts] = frame
if buffer_count >= buffer_size:
break
result = [frames[pts] for pts in sorted(frames)]
return result, max_pts
def torchvision_decode(
video_handle,
sampling_rate,
num_frames,
clip_idx,
video_meta,
num_clips=10,
target_fps=30,
modalities=("visual",),
max_spatial_scale=0,
use_offset=False,
):
"""
If video_meta is not empty, perform temporal selective decoding to sample a
clip from the video with TorchVision decoder. If video_meta is empty, decode
the entire video and update the video_meta.
Args:
video_handle (bytes): raw bytes of the video file.
sampling_rate (int): frame sampling rate (interval between two sampled
frames).
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal
sampling. If clip_idx is larger than -1, uniformly split the
video to num_clips clips, and select the clip_idx-th video clip.
video_meta (dict): a dict contains VideoMetaData. Details can be found
at `pytorch/vision/torchvision/io/_video_opt.py`.
num_clips (int): overall number of clips to uniformly sample from the
given video.
target_fps (int): the input video may has different fps, convert it to
the target video fps.
modalities (tuple): tuple of modalities to decode. Currently only
support `visual`, planning to support `acoustic` soon.
max_spatial_scale (int): the maximal resolution of the spatial shorter
edge size during decoding.
Returns:
frames (tensor): decoded frames from the video.
fps (float): the number of frames per second of the video.
decode_all_video (bool): if True, the entire video was decoded.
"""
# Convert the bytes to a tensor.
video_tensor = torch.from_numpy(np.frombuffer(video_handle, dtype=np.uint8))
decode_all_video = True
video_start_pts, video_end_pts = 0, -1
# The video_meta is empty, fetch the meta data from the raw video.
if len(video_meta) == 0:
# Tracking the meta info for selective decoding in the future.
meta = io._probe_video_from_memory(video_tensor)
# Using the information from video_meta to perform selective decoding.
video_meta["video_timebase"] = meta.video_timebase
video_meta["video_numerator"] = meta.video_timebase.numerator
video_meta["video_denominator"] = meta.video_timebase.denominator
video_meta["has_video"] = meta.has_video
video_meta["video_duration"] = meta.video_duration
video_meta["video_fps"] = meta.video_fps
video_meta["audio_timebas"] = meta.audio_timebase
video_meta["audio_numerator"] = meta.audio_timebase.numerator
video_meta["audio_denominator"] = meta.audio_timebase.denominator
video_meta["has_audio"] = meta.has_audio
video_meta["audio_duration"] = meta.audio_duration
video_meta["audio_sample_rate"] = meta.audio_sample_rate
fps = video_meta["video_fps"]
if (
video_meta["has_video"]
and video_meta["video_denominator"] > 0
and video_meta["video_duration"] > 0
):
# try selective decoding.
decode_all_video = False
clip_size = sampling_rate * num_frames / target_fps * fps
start_idx, end_idx = get_start_end_idx(
fps * video_meta["video_duration"],
clip_size,
clip_idx,
num_clips,
use_offset=use_offset,
)
# Convert frame index to pts.
pts_per_frame = video_meta["video_denominator"] / fps
video_start_pts = int(start_idx * pts_per_frame)
video_end_pts = int(end_idx * pts_per_frame)
# Decode the raw video with the tv decoder.
v_frames, _ = io._read_video_from_memory(
video_tensor,
seek_frame_margin=1.0,
read_video_stream="visual" in modalities,
video_width=0,
video_height=0,
video_min_dimension=max_spatial_scale,
video_pts_range=(video_start_pts, video_end_pts),
video_timebase_numerator=video_meta["video_numerator"],
video_timebase_denominator=video_meta["video_denominator"],
)
if v_frames.shape == torch.Size([0]):
# failed selective decoding
decode_all_video = True
video_start_pts, video_end_pts = 0, -1
v_frames, _ = io._read_video_from_memory(
video_tensor,
seek_frame_margin=1.0,
read_video_stream="visual" in modalities,
video_width=0,
video_height=0,
video_min_dimension=max_spatial_scale,
video_pts_range=(video_start_pts, video_end_pts),
video_timebase_numerator=video_meta["video_numerator"],
video_timebase_denominator=video_meta["video_denominator"],
)
return v_frames, fps, decode_all_video
def pyav_decode(
container,
sampling_rate,
num_frames,
clip_idx,
num_clips=10,
target_fps=30,
use_offset=False,
):
"""
Convert the video from its original fps to the target_fps. If the video
support selective decoding (contain decoding information in the video head),
the perform temporal selective decoding and sample a clip from the video
with the PyAV decoder. If the video does not support selective decoding,
decode the entire video.
Args:
container (container): pyav container.
sampling_rate (int): frame sampling rate (interval between two sampled
frames.
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal sampling. If
clip_idx is larger than -1, uniformly split the video to num_clips
clips, and select the clip_idx-th video clip.
num_clips (int): overall number of clips to uniformly sample from the
given video.
target_fps (int): the input video may has different fps, convert it to
the target video fps before frame sampling.
Returns:
frames (tensor): decoded frames from the video. Return None if the no
video stream was found.
fps (float): the number of frames per second of the video.
decode_all_video (bool): If True, the entire video was decoded.
"""
# Try to fetch the decoding information from the video head. Some of the
# videos does not support fetching the decoding information, for that case
# it will get None duration.
fps = float(container.streams.video[0].average_rate)
frames_length = container.streams.video[0].frames
duration = container.streams.video[0].duration
if duration is None:
# If failed to fetch the decoding information, decode the entire video.
decode_all_video = True
video_start_pts, video_end_pts = 0, math.inf
else:
# Perform selective decoding.
decode_all_video = False
start_idx, end_idx = get_start_end_idx(
frames_length,
sampling_rate * num_frames / target_fps * fps,
clip_idx,
num_clips,
use_offset=use_offset,
)
timebase = duration / frames_length
video_start_pts = int(start_idx * timebase)
video_end_pts = int(end_idx * timebase)
frames = None
# If video stream was found, fetch video frames from the video.
if container.streams.video:
video_frames, max_pts = pyav_decode_stream(
container,
video_start_pts,
video_end_pts,
container.streams.video[0],
{"video": 0},
)
container.close()
frames = [frame.to_rgb().to_ndarray() for frame in video_frames]
frames = torch.as_tensor(np.stack(frames))
return frames, fps, decode_all_video
def decode(
container,
sampling_rate,
num_frames,
clip_idx=-1,
num_clips=10,
video_meta=None,
target_fps=30,
backend="pyav",
max_spatial_scale=0,
use_offset=False,
):
"""
Decode the video and perform temporal sampling.
Args:
container (container): pyav container.
sampling_rate (int): frame sampling rate (interval between two sampled
frames).
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal
sampling. If clip_idx is larger than -1, uniformly split the
video to num_clips clips, and select the
clip_idx-th video clip.
num_clips (int): overall number of clips to uniformly
sample from the given video.
video_meta (dict): a dict contains VideoMetaData. Details can be find
at `pytorch/vision/torchvision/io/_video_opt.py`.
target_fps (int): the input video may have different fps, convert it to
the target video fps before frame sampling.
backend (str): decoding backend includes `pyav` and `torchvision`. The
default one is `pyav`.
max_spatial_scale (int): keep the aspect ratio and resize the frame so
that shorter edge size is max_spatial_scale. Only used in
`torchvision` backend.
Returns:
frames (tensor): decoded frames from the video.
"""
# Currently support two decoders: 1) PyAV, and 2) TorchVision.
assert clip_idx >= -1, "Not valied clip_idx {}".format(clip_idx)
try:
if backend == "pyav":
frames, fps, decode_all_video = pyav_decode(
container,
sampling_rate,
num_frames,
clip_idx,
num_clips,
target_fps,
use_offset=use_offset,
)
elif backend == "torchvision":
frames, fps, decode_all_video = torchvision_decode(
container,
sampling_rate,
num_frames,
clip_idx,
video_meta,
num_clips,
target_fps,
("visual",),
max_spatial_scale,
use_offset=use_offset,
)
else:
raise NotImplementedError(
"Unknown decoding backend {}".format(backend)
)
except Exception as e:
print("Failed to decode by {} with exception: {}".format(backend, e))
return None
# Return None if the frames was not decoded successfully.
if frames is None or frames.size(0) == 0:
return None
clip_sz = sampling_rate * num_frames / target_fps * fps
start_idx, end_idx = get_start_end_idx(
frames.shape[0],
clip_sz,
clip_idx if decode_all_video else 0,
num_clips if decode_all_video else 1,
use_offset=use_offset,
)
# Perform temporal sampling from the decoded video.
frames = temporal_sampling(frames, start_idx, end_idx, num_frames)
return frames
| 15,165 | 37.787724 | 80 | py |
STTS | STTS-main/MViT/slowfast/datasets/ssv2.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import json
import numpy as np
import os
import random
from itertools import chain as chain
import torch
import torch.utils.data
import slowfast.utils.logging as logging
from slowfast.utils.env import pathmgr
from . import utils as utils
from .build import DATASET_REGISTRY
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class Ssv2(torch.utils.data.Dataset):
"""
Something-Something v2 (SSV2) video loader. Construct the SSV2 video loader,
then sample clips from the videos. For training and validation, a single
clip is randomly sampled from every video with random cropping, scaling, and
flipping. For testing, multiple clips are uniformaly sampled from every
video with uniform cropping. For uniform cropping, we take the left, center,
and right crop if the width is larger than height, or take top, center, and
bottom crop if the height is larger than the width.
"""
def __init__(self, cfg, mode, num_retries=10):
"""
Load Something-Something V2 data (frame paths, labels, etc. ) to a given
Dataset object. The dataset could be downloaded from Something-Something
official website (https://20bn.com/datasets/something-something).
Please see datasets/DATASET.md for more information about the data format.
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
For the train and val mode, the data loader will take data
from the train or val set, and sample one clip per video.
For the test mode, the data loader will take data from test set,
and sample multiple clips per video.
num_retries (int): number of retries for reading frames from disk.
"""
# Only support train, val, and test mode.
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported for Something-Something V2".format(mode)
self.mode = mode
self.cfg = cfg
self._video_meta = {}
self._num_retries = num_retries
# For training or validation mode, one single clip is sampled from every
# video. For testing, NUM_ENSEMBLE_VIEWS clips are sampled from every
# video. For every clip, NUM_SPATIAL_CROPS is cropped spatially from
# the frames.
if self.mode in ["train", "val"]:
self._num_clips = 1
elif self.mode in ["test"]:
self._num_clips = (
cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS
)
logger.info("Constructing Something-Something V2 {}...".format(mode))
self._construct_loader()
def _construct_loader(self):
"""
Construct the video loader.
"""
# Loading label names.
with pathmgr.open(
os.path.join(
self.cfg.DATA.PATH_TO_DATA_DIR,
"something-something-v2-labels.json",
),
"r",
) as f:
label_dict = json.load(f)
# Loading labels.
label_file = os.path.join(
self.cfg.DATA.PATH_TO_DATA_DIR,
"something-something-v2-{}.json".format(
"train" if self.mode == "train" else "validation"
),
)
with pathmgr.open(label_file, "r") as f:
label_json = json.load(f)
self._video_names = []
self._labels = []
for video in label_json:
video_name = video["id"]
template = video["template"]
template = template.replace("[", "")
template = template.replace("]", "")
label = int(label_dict[template])
self._video_names.append(video_name)
self._labels.append(label)
path_to_file = os.path.join(
self.cfg.DATA.PATH_TO_DATA_DIR,
"{}.csv".format("train" if self.mode == "train" else "val"),
)
assert pathmgr.exists(path_to_file), "{} dir not found".format(
path_to_file
)
self._path_to_videos, _ = utils.load_image_lists(
path_to_file, self.cfg.DATA.PATH_PREFIX
)
assert len(self._path_to_videos) == len(self._video_names), (
len(self._path_to_videos),
len(self._video_names),
)
# From dict to list.
new_paths, new_labels = [], []
for index in range(len(self._video_names)):
if self._video_names[index] in self._path_to_videos:
new_paths.append(self._path_to_videos[self._video_names[index]])
new_labels.append(self._labels[index])
self._labels = new_labels
self._path_to_videos = new_paths
# Extend self when self._num_clips > 1 (during testing).
self._path_to_videos = list(
chain.from_iterable(
[[x] * self._num_clips for x in self._path_to_videos]
)
)
self._labels = list(
chain.from_iterable([[x] * self._num_clips for x in self._labels])
)
self._spatial_temporal_idx = list(
chain.from_iterable(
[
range(self._num_clips)
for _ in range(len(self._path_to_videos))
]
)
)
logger.info(
"Something-Something V2 dataloader constructed "
" (size: {}) from {}".format(
len(self._path_to_videos), path_to_file
)
)
def get_seq_frames(self, index):
"""
Given the video index, return the list of sampled frame indexes.
Args:
index (int): the video index.
Returns:
seq (list): the indexes of frames of sampled from the video.
"""
num_frames = self.cfg.DATA.NUM_FRAMES
video_length = len(self._path_to_videos[index])
seg_size = float(video_length - 1) / num_frames
seq = []
for i in range(num_frames):
start = int(np.round(seg_size * i))
end = int(np.round(seg_size * (i + 1)))
if self.mode == "train":
seq.append(random.randint(start, end))
else:
seq.append((start + end) // 2)
return seq
def __getitem__(self, index):
"""
Given the video index, return the list of frames, label, and video
index if the video frames can be fetched.
Args:
index (int): the video index provided by the pytorch sampler.
Returns:
frames (tensor): the frames of sampled from the video. The dimension
is `channel` x `num frames` x `height` x `width`.
label (int): the label of the current video.
index (int): the index of the video.
"""
short_cycle_idx = None
# When short cycle is used, input index is a tupple.
if isinstance(index, tuple):
index, short_cycle_idx = index
if self.mode in ["train", "val"]:
# -1 indicates random sampling.
spatial_sample_index = -1
min_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[0]
max_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[1]
crop_size = self.cfg.DATA.TRAIN_CROP_SIZE
if short_cycle_idx in [0, 1]:
crop_size = int(
round(
self.cfg.MULTIGRID.SHORT_CYCLE_FACTORS[short_cycle_idx]
* self.cfg.MULTIGRID.DEFAULT_S
)
)
if self.cfg.MULTIGRID.DEFAULT_S > 0:
# Decreasing the scale is equivalent to using a larger "span"
# in a sampling grid.
min_scale = int(
round(
float(min_scale)
* crop_size
/ self.cfg.MULTIGRID.DEFAULT_S
)
)
elif self.mode in ["test"]:
# spatial_sample_index is in [0, 1, 2]. Corresponding to left,
# center, or right if width is larger than height, and top, middle,
# or bottom if height is larger than width.
spatial_sample_index = (
self._spatial_temporal_idx[index]
% self.cfg.TEST.NUM_SPATIAL_CROPS
)
min_scale, max_scale, crop_size = [self.cfg.DATA.TEST_CROP_SIZE] * 3
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale, crop_size}) == 1
else:
raise NotImplementedError(
"Does not support {} mode".format(self.mode)
)
label = self._labels[index]
seq = self.get_seq_frames(index)
frames = torch.as_tensor(
utils.retry_load_images(
[self._path_to_videos[index][frame] for frame in seq],
self._num_retries,
)
)
# Perform color normalization.
frames = utils.tensor_normalize(
frames, self.cfg.DATA.MEAN, self.cfg.DATA.STD
)
# T H W C -> C T H W.
frames = frames.permute(3, 0, 1, 2)
# Perform data augmentation.
frames = utils.spatial_sampling(
frames,
spatial_idx=spatial_sample_index,
min_scale=min_scale,
max_scale=max_scale,
crop_size=crop_size,
random_horizontal_flip=self.cfg.DATA.RANDOM_FLIP,
inverse_uniform_sampling=self.cfg.DATA.INV_UNIFORM_SAMPLE,
)
frames = utils.pack_pathway_output(self.cfg, frames)
return frames, label, index, {}
def __len__(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return self.num_videos
@property
def num_videos(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return len(self._path_to_videos)
| 10,293 | 35.246479 | 82 | py |
STTS | STTS-main/MViT/slowfast/datasets/random_erasing.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/random_erasing.py
pulished under an Apache License 2.0.
COMMENT FROM ORIGINAL:
Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
Copyright Zhun Zhong & Liang Zheng
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import random
import torch
def _get_pixels(
per_pixel, rand_color, patch_size, dtype=torch.float32, device="cuda"
):
# NOTE I've seen CUDA illegal memory access errors being caused by the normal_()
# paths, flip the order so normal is run on CPU if this becomes a problem
# Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508
if per_pixel:
return torch.empty(patch_size, dtype=dtype, device=device).normal_()
elif rand_color:
return torch.empty(
(patch_size[0], 1, 1), dtype=dtype, device=device
).normal_()
else:
return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device)
class RandomErasing:
"""Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
This variant of RandomErasing is intended to be applied to either a batch
or single image tensor after it has been normalized by dataset mean and std.
Args:
probability: Probability that the Random Erasing operation will be performed.
min_area: Minimum percentage of erased area wrt input image area.
max_area: Maximum percentage of erased area wrt input image area.
min_aspect: Minimum aspect ratio of erased area.
mode: pixel color mode, one of 'const', 'rand', or 'pixel'
'const' - erase block is constant color of 0 for all channels
'rand' - erase block is same per-channel random (normal) color
'pixel' - erase block is per-pixel random (normal) color
max_count: maximum number of erasing blocks per image, area per box is scaled by count.
per-image count is randomly chosen between 1 and this value.
"""
def __init__(
self,
probability=0.5,
min_area=0.02,
max_area=1 / 3,
min_aspect=0.3,
max_aspect=None,
mode="const",
min_count=1,
max_count=None,
num_splits=0,
device="cuda",
cube=True,
):
self.probability = probability
self.min_area = min_area
self.max_area = max_area
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
self.min_count = min_count
self.max_count = max_count or min_count
self.num_splits = num_splits
mode = mode.lower()
self.rand_color = False
self.per_pixel = False
self.cube = cube
if mode == "rand":
self.rand_color = True # per block random normal
elif mode == "pixel":
self.per_pixel = True # per pixel random normal
else:
assert not mode or mode == "const"
self.device = device
def _erase(self, img, chan, img_h, img_w, dtype):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(10):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
img[:, top : top + h, left : left + w] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def _erase_cube(
self,
img,
batch_start,
batch_size,
chan,
img_h,
img_w,
dtype,
):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(100):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
for i in range(batch_start, batch_size):
img_instance = img[i]
img_instance[
:, top : top + h, left : left + w
] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def __call__(self, input):
if len(input.size()) == 3:
self._erase(input, *input.size(), input.dtype)
else:
batch_size, chan, img_h, img_w = input.size()
# skip first slice of batch if num_splits is set (for clean portion of samples)
batch_start = (
batch_size // self.num_splits if self.num_splits > 1 else 0
)
if self.cube:
self._erase_cube(
input,
batch_start,
batch_size,
chan,
img_h,
img_w,
input.dtype,
)
else:
for i in range(batch_start, batch_size):
self._erase(input[i], chan, img_h, img_w, input.dtype)
return input
| 6,887 | 37.055249 | 95 | py |
STTS | STTS-main/MViT/slowfast/datasets/kinetics.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import random
import torch
import torch.utils.data
import numpy as np
from torchvision import transforms
import slowfast.utils.logging as logging
from slowfast.utils.env import pathmgr
from . import decoder as decoder
from . import utils as utils
from . import video_container as container
from .build import DATASET_REGISTRY
from .random_erasing import RandomErasing
from .transform import create_random_augment
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class Kinetics(torch.utils.data.Dataset):
"""
Kinetics video loader. Construct the Kinetics video loader, then sample
clips from the videos. For training and validation, a single clip is
randomly sampled from every video with random cropping, scaling, and
flipping. For testing, multiple clips are uniformaly sampled from every
video with uniform cropping. For uniform cropping, we take the left, center,
and right crop if the width is larger than height, or take top, center, and
bottom crop if the height is larger than the width.
"""
def __init__(self, cfg, mode, num_retries=10):
"""
Construct the Kinetics video loader with a given csv file. The format of
the csv file is:
```
path_to_video_1 label_1
path_to_video_2 label_2
...
path_to_video_N label_N
```
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
For the train and val mode, the data loader will take data
from the train or val set, and sample one clip per video.
For the test mode, the data loader will take data from test set,
and sample multiple clips per video.
num_retries (int): number of retries.
"""
# Only support train, val, and test mode.
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported for Kinetics".format(mode)
self.mode = mode
self.cfg = cfg
self._video_meta = {}
self._num_retries = num_retries
# For training or validation mode, one single clip is sampled from every
# video. For testing, NUM_ENSEMBLE_VIEWS clips are sampled from every
# video. For every clip, NUM_SPATIAL_CROPS is cropped spatially from
# the frames.
if self.mode in ["train", "val"]:
self._num_clips = 1
elif self.mode in ["test"]:
self._num_clips = (
cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS
)
logger.info("Constructing Kinetics {}...".format(mode))
self._construct_loader()
self.aug = False
self.rand_erase = False
self.use_temporal_gradient = False
self.temporal_gradient_rate = 0.0
if self.mode == "train" and self.cfg.AUG.ENABLE:
self.aug = True
if self.cfg.AUG.RE_PROB > 0:
self.rand_erase = True
def _construct_loader(self):
"""
Construct the video loader.
"""
if self.cfg.USE_MINI:
path_to_file = os.path.join(
self.cfg.DATA.PATH_TO_DATA_DIR, "mini_{}.csv".format(self.mode)
)
else:
if self.cfg.TEST.SUBSET == 'full':
path_to_file = os.path.join(
self.cfg.DATA.PATH_TO_DATA_DIR, "{}.csv".format(self.mode)
)
elif self.cfg.TEST.SUBSET == 'temporal':
path_to_file = os.path.join(
self.cfg.DATA.PATH_TO_DATA_DIR, "{}_temporal.csv".format(self.mode)
)
else:
path_to_file = os.path.join(
self.cfg.DATA.PATH_TO_DATA_DIR, "{}_static.csv".format(self.mode)
)
assert pathmgr.exists(path_to_file), "{} dir not found".format(
path_to_file
)
self._path_to_videos = []
self._labels = []
self._spatial_temporal_idx = []
with pathmgr.open(path_to_file, "r") as f:
for clip_idx, path_label in enumerate(f.read().splitlines()):
assert (
len(path_label.split(self.cfg.DATA.PATH_LABEL_SEPARATOR))
== 2
)
path, label = path_label.split(
self.cfg.DATA.PATH_LABEL_SEPARATOR
)
for idx in range(self._num_clips):
self._path_to_videos.append(
os.path.join(self.cfg.DATA.PATH_PREFIX, path)
)
self._labels.append(int(label))
self._spatial_temporal_idx.append(idx)
self._video_meta[clip_idx * self._num_clips + idx] = {}
assert (
len(self._path_to_videos) > 0
), "Failed to load Kinetics split {} from {}".format(
self._split_idx, path_to_file
)
logger.info(
"Constructing kinetics dataloader (size: {}) from {}".format(
len(self._path_to_videos), path_to_file
)
)
def __getitem__(self, index):
"""
Given the video index, return the list of frames, label, and video
index if the video can be fetched and decoded successfully, otherwise
repeatly find a random video that can be decoded as a replacement.
Args:
index (int): the video index provided by the pytorch sampler.
Returns:
frames (tensor): the frames of sampled from the video. The dimension
is `channel` x `num frames` x `height` x `width`.
label (int): the label of the current video.
index (int): if the video provided by pytorch sampler can be
decoded, then return the index of the video. If not, return the
index of the video replacement that can be decoded.
"""
# print(index, self._path_to_videos[index])
short_cycle_idx = None
# When short cycle is used, input index is a tupple.
if isinstance(index, tuple):
index, short_cycle_idx = index
if self.mode in ["train", "val"]:
# -1 indicates random sampling.
temporal_sample_index = -1
spatial_sample_index = -1
min_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[0]
max_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[1]
crop_size = self.cfg.DATA.TRAIN_CROP_SIZE
if short_cycle_idx in [0, 1]:
crop_size = int(
round(
self.cfg.MULTIGRID.SHORT_CYCLE_FACTORS[short_cycle_idx]
* self.cfg.MULTIGRID.DEFAULT_S
)
)
if self.cfg.MULTIGRID.DEFAULT_S > 0:
# Decreasing the scale is equivalent to using a larger "span"
# in a sampling grid.
min_scale = int(
round(
float(min_scale)
* crop_size
/ self.cfg.MULTIGRID.DEFAULT_S
)
)
elif self.mode in ["test"]:
temporal_sample_index = (
self._spatial_temporal_idx[index]
// self.cfg.TEST.NUM_SPATIAL_CROPS
)
# spatial_sample_index is in [0, 1, 2]. Corresponding to left,
# center, or right if width is larger than height, and top, middle,
# or bottom if height is larger than width.
spatial_sample_index = (
(
self._spatial_temporal_idx[index]
% self.cfg.TEST.NUM_SPATIAL_CROPS
)
if self.cfg.TEST.NUM_SPATIAL_CROPS > 1
else 1
)
min_scale, max_scale, crop_size = (
[self.cfg.DATA.TEST_CROP_SIZE] * 3
if self.cfg.TEST.NUM_SPATIAL_CROPS > 1
else [self.cfg.DATA.TRAIN_JITTER_SCALES[0]] * 2
+ [self.cfg.DATA.TEST_CROP_SIZE]
)
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale}) == 1
else:
raise NotImplementedError(
"Does not support {} mode".format(self.mode)
)
sampling_rate = utils.get_random_sampling_rate(
self.cfg.MULTIGRID.LONG_CYCLE_SAMPLING_RATE,
self.cfg.DATA.SAMPLING_RATE,
)
# Try to decode and sample a clip from a video. If the video can not be
# decoded, repeatly find a random video replacement that can be decoded.
for i_try in range(self._num_retries):
video_container = None
try:
video_container = container.get_video_container(
self._path_to_videos[index],
self.cfg.DATA_LOADER.ENABLE_MULTI_THREAD_DECODE,
self.cfg.DATA.DECODING_BACKEND,
)
except Exception as e:
logger.info(
"Failed to load video from {} with error {}".format(
self._path_to_videos[index], e
)
)
# Select a random video if the current video was not able to access.
if video_container is None:
logger.warning(
"Failed to meta load video idx {} from {}; trial {}".format(
index, self._path_to_videos[index], i_try
)
)
if self.mode not in ["test"] and i_try > self._num_retries // 2:
# let's try another one
index = random.randint(0, len(self._path_to_videos) - 1)
continue
# Decode video. Meta info is used to perform selective decoding.
frames = decoder.decode(
video_container,
sampling_rate,
self.cfg.DATA.NUM_FRAMES,
temporal_sample_index,
self.cfg.TEST.NUM_ENSEMBLE_VIEWS,
video_meta=self._video_meta[index],
target_fps=self.cfg.DATA.TARGET_FPS,
backend=self.cfg.DATA.DECODING_BACKEND,
max_spatial_scale=min_scale,
use_offset=self.cfg.DATA.USE_OFFSET_SAMPLING,
)
# If decoding failed (wrong format, video is too short, and etc),
# select another video.
if frames is None:
logger.warning(
"Failed to decode video idx {} from {}; trial {}".format(
index, self._path_to_videos[index], i_try
)
)
if self.mode not in ["test"] and i_try > self._num_retries // 2:
# let's try another one
index = random.randint(0, len(self._path_to_videos) - 1)
continue
if self.aug:
if self.cfg.AUG.NUM_SAMPLE > 1:
frame_list = []
label_list = []
index_list = []
for _ in range(self.cfg.AUG.NUM_SAMPLE):
new_frames = self._aug_frame(
frames,
spatial_sample_index,
min_scale,
max_scale,
crop_size,
)
label = self._labels[index]
new_frames = utils.pack_pathway_output(
self.cfg, new_frames
)
frame_list.append(new_frames)
label_list.append(label)
index_list.append(index)
return frame_list, label_list, index_list, {}
else:
frames = self._aug_frame(
frames,
spatial_sample_index,
min_scale,
max_scale,
crop_size,
)
else:
frames = utils.tensor_normalize(
frames, self.cfg.DATA.MEAN, self.cfg.DATA.STD
)
# T H W C -> C T H W.
frames = frames.permute(3, 0, 1, 2)
# Perform data augmentation.
frames = utils.spatial_sampling(
frames,
spatial_idx=spatial_sample_index,
min_scale=min_scale,
max_scale=max_scale,
crop_size=crop_size,
random_horizontal_flip=self.cfg.DATA.RANDOM_FLIP,
inverse_uniform_sampling=self.cfg.DATA.INV_UNIFORM_SAMPLE,
)
label = self._labels[index]
frames = utils.pack_pathway_output(self.cfg, frames)
return frames, label, index, {}
else:
raise RuntimeError(
"Failed to fetch video after {} retries.".format(
self._num_retries
)
)
def _aug_frame(
self,
frames,
spatial_sample_index,
min_scale,
max_scale,
crop_size,
):
aug_transform = create_random_augment(
input_size=(frames.size(1), frames.size(2)),
auto_augment=self.cfg.AUG.AA_TYPE,
interpolation=self.cfg.AUG.INTERPOLATION,
)
# T H W C -> T C H W.
frames = frames.permute(0, 3, 1, 2)
list_img = self._frame_to_list_img(frames)
list_img = aug_transform(list_img)
frames = self._list_img_to_frames(list_img)
frames = frames.permute(0, 2, 3, 1)
frames = utils.tensor_normalize(
frames, self.cfg.DATA.MEAN, self.cfg.DATA.STD
)
# T H W C -> C T H W.
frames = frames.permute(3, 0, 1, 2)
# Perform data augmentation.
scl, asp = (
self.cfg.DATA.TRAIN_JITTER_SCALES_RELATIVE,
self.cfg.DATA.TRAIN_JITTER_ASPECT_RELATIVE,
)
relative_scales = (
None if (self.mode not in ["train"] or len(scl) == 0) else scl
)
relative_aspect = (
None if (self.mode not in ["train"] or len(asp) == 0) else asp
)
frames = utils.spatial_sampling(
frames,
spatial_idx=spatial_sample_index,
min_scale=min_scale,
max_scale=max_scale,
crop_size=crop_size,
random_horizontal_flip=self.cfg.DATA.RANDOM_FLIP,
inverse_uniform_sampling=self.cfg.DATA.INV_UNIFORM_SAMPLE,
aspect_ratio=relative_aspect,
scale=relative_scales,
motion_shift=self.cfg.DATA.TRAIN_JITTER_MOTION_SHIFT
if self.mode in ["train"]
else False,
)
if self.rand_erase:
erase_transform = RandomErasing(
self.cfg.AUG.RE_PROB,
mode=self.cfg.AUG.RE_MODE,
max_count=self.cfg.AUG.RE_COUNT,
num_splits=self.cfg.AUG.RE_COUNT,
device="cpu",
)
frames = frames.permute(1, 0, 2, 3)
frames = erase_transform(frames)
frames = frames.permute(1, 0, 2, 3)
return frames
def _frame_to_list_img(self, frames):
img_list = [
transforms.ToPILImage()(frames[i]) for i in range(frames.size(0))
]
return img_list
def _list_img_to_frames(self, img_list):
img_list = [transforms.ToTensor()(np.array(img)) for img in img_list]
return torch.stack(img_list)
def __len__(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return self.num_videos
@property
def num_videos(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return len(self._path_to_videos)
| 16,469 | 37.661972 | 87 | py |
STTS | STTS-main/MViT/slowfast/datasets/loader.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Data loader."""
import itertools
import numpy as np
from functools import partial
import torch
from torch.utils.data._utils.collate import default_collate
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from slowfast.datasets.multigrid_helper import ShortCycleBatchSampler
from . import utils as utils
from .build import build_dataset
def multiple_samples_collate(batch, fold=False):
"""
Collate function for repeated augmentation. Each instance in the batch has
more than one sample.
Args:
batch (tuple or list): data batch to collate.
Returns:
(tuple): collated data batch.
"""
inputs, labels, video_idx, extra_data = zip(*batch)
inputs = [item for sublist in inputs for item in sublist]
labels = [item for sublist in labels for item in sublist]
video_idx = [item for sublist in video_idx for item in sublist]
inputs, labels, video_idx, extra_data = (
default_collate(inputs),
default_collate(labels),
default_collate(video_idx),
default_collate(extra_data),
)
if fold:
return [inputs], labels, video_idx, extra_data
else:
return inputs, labels, video_idx, extra_data
def detection_collate(batch):
"""
Collate function for detection task. Concatanate bboxes, labels and
metadata from different samples in the first dimension instead of
stacking them to have a batch-size dimension.
Args:
batch (tuple or list): data batch to collate.
Returns:
(tuple): collated detection data batch.
"""
inputs, labels, video_idx, extra_data = zip(*batch)
inputs, video_idx = default_collate(inputs), default_collate(video_idx)
labels = torch.tensor(np.concatenate(labels, axis=0)).float()
collated_extra_data = {}
for key in extra_data[0].keys():
data = [d[key] for d in extra_data]
if key == "boxes" or key == "ori_boxes":
# Append idx info to the bboxes before concatenating them.
bboxes = [
np.concatenate(
[np.full((data[i].shape[0], 1), float(i)), data[i]], axis=1
)
for i in range(len(data))
]
bboxes = np.concatenate(bboxes, axis=0)
collated_extra_data[key] = torch.tensor(bboxes).float()
elif key == "metadata":
collated_extra_data[key] = torch.tensor(
list(itertools.chain(*data))
).view(-1, 2)
else:
collated_extra_data[key] = default_collate(data)
return inputs, labels, video_idx, collated_extra_data
def construct_loader(cfg, split, is_precise_bn=False):
"""
Constructs the data loader for the given dataset.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
split (str): the split of the data loader. Options include `train`,
`val`, and `test`.
"""
assert split in ["train", "val", "test"]
if split in ["train"]:
dataset_name = cfg.TRAIN.DATASET
batch_size = int(cfg.TRAIN.BATCH_SIZE / max(1, cfg.NUM_GPUS))
shuffle = True
drop_last = True
elif split in ["val"]:
dataset_name = cfg.TRAIN.DATASET
batch_size = int(cfg.TRAIN.BATCH_SIZE / max(1, cfg.NUM_GPUS))
shuffle = False
drop_last = False
elif split in ["test"]:
dataset_name = cfg.TEST.DATASET
batch_size = int(cfg.TEST.BATCH_SIZE / max(1, cfg.NUM_GPUS))
shuffle = False
drop_last = False
# Construct the dataset
dataset = build_dataset(dataset_name, cfg, split)
if isinstance(dataset, torch.utils.data.IterableDataset):
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=drop_last,
collate_fn=detection_collate if cfg.DETECTION.ENABLE else None,
worker_init_fn=utils.loader_worker_init_fn(dataset),
)
else:
if (
cfg.MULTIGRID.SHORT_CYCLE
and split in ["train"]
and not is_precise_bn
):
# Create a sampler for multi-process training
sampler = utils.create_sampler(dataset, shuffle, cfg)
batch_sampler = ShortCycleBatchSampler(
sampler, batch_size=batch_size, drop_last=drop_last, cfg=cfg
)
# Create a loader
loader = torch.utils.data.DataLoader(
dataset,
batch_sampler=batch_sampler,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
worker_init_fn=utils.loader_worker_init_fn(dataset),
)
else:
# Create a sampler for multi-process training
sampler = utils.create_sampler(dataset, shuffle, cfg)
# Create a loader
if cfg.DETECTION.ENABLE:
collate_func = detection_collate
elif cfg.AUG.NUM_SAMPLE > 1 and split in ["train"]:
collate_func = partial(
multiple_samples_collate, fold="imagenet" in dataset_name
)
else:
collate_func = None
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=(False if sampler else shuffle),
sampler=sampler,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=drop_last,
collate_fn=collate_func,
worker_init_fn=utils.loader_worker_init_fn(dataset),
)
return loader
def shuffle_dataset(loader, cur_epoch):
""" "
Shuffles the data.
Args:
loader (loader): data loader to perform shuffle.
cur_epoch (int): number of the current epoch.
"""
if (
loader._dataset_kind
== torch.utils.data.dataloader._DatasetKind.Iterable
):
if hasattr(loader.dataset, "sampler"):
sampler = loader.dataset.sampler
else:
raise RuntimeError(
"Unknown sampler for IterableDataset when shuffling dataset"
)
else:
sampler = (
loader.batch_sampler.sampler
if isinstance(loader.batch_sampler, ShortCycleBatchSampler)
else loader.sampler
)
assert isinstance(
sampler, (RandomSampler, DistributedSampler)
), "Sampler type '{}' not supported".format(type(sampler))
# RandomSampler handles shuffling automatically
if isinstance(sampler, DistributedSampler):
# DistributedSampler shuffles data based on epoch
sampler.set_epoch(cur_epoch)
| 7,067 | 34.878173 | 79 | py |
STTS | STTS-main/MViT/slowfast/datasets/imagenet.py | # Copyright (c) Facebook, Inc. and its affiliates.
import json
import numpy as np
import os
import random
import re
import torch
import torch.utils.data
from PIL import Image
from torchvision import transforms as transforms_tv
import slowfast.datasets.transform as transform
import slowfast.utils.logging as logging
# import cv2
from slowfast.utils.env import pathmgr
from .build import DATASET_REGISTRY
from .transform import transforms_imagenet_train
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class Imagenet(torch.utils.data.Dataset):
"""ImageNet dataset."""
def __init__(self, cfg, mode, num_retries=10):
self.num_retries = num_retries
self.cfg = cfg
self.mode = mode
self.data_path = cfg.DATA.PATH_TO_DATA_DIR
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported for ImageNet".format(mode)
logger.info("Constructing ImageNet {}...".format(mode))
if cfg.DATA.PATH_TO_PRELOAD_IMDB == "":
self._construct_imdb()
else:
self._load_imdb()
def _load_imdb(self):
split_path = os.path.join(
self.cfg.DATA.PATH_TO_PRELOAD_IMDB, f"{self.mode}.json"
)
with pathmgr.open(split_path, "r") as f:
data = f.read()
self._imdb = json.loads(data)
def _construct_imdb(self):
"""Constructs the imdb."""
# Compile the split data path
split_path = os.path.join(self.data_path, self.mode)
logger.info("{} data path: {}".format(self.mode, split_path))
# Images are stored per class in subdirs (format: n<number>)
split_files = pathmgr.ls(split_path)
self._class_ids = sorted(
f for f in split_files if re.match(r"^n[0-9]+$", f)
)
# Map ImageNet class ids to contiguous ids
self._class_id_cont_id = {v: i for i, v in enumerate(self._class_ids)}
# Construct the image db
self._imdb = []
for class_id in self._class_ids:
cont_id = self._class_id_cont_id[class_id]
im_dir = os.path.join(split_path, class_id)
for im_name in pathmgr.ls(im_dir):
im_path = os.path.join(im_dir, im_name)
self._imdb.append({"im_path": im_path, "class": cont_id})
logger.info("Number of images: {}".format(len(self._imdb)))
logger.info("Number of classes: {}".format(len(self._class_ids)))
def load_image(self, im_path):
"""Prepares the image for network input with format of CHW RGB float"""
with pathmgr.open(im_path, "rb") as f:
with Image.open(f) as im:
im = im.convert("RGB")
im = torch.from_numpy(np.array(im).astype(np.float32) / 255.0)
# H W C to C H W
im = im.permute([2, 0, 1])
return im
def _prepare_im_res(self, im_path):
# Prepare resnet style augmentation.
im = self.load_image(im_path)
# Train and test setups differ
train_size, test_size = (
self.cfg.DATA.TRAIN_CROP_SIZE,
self.cfg.DATA.TEST_CROP_SIZE,
)
if self.mode == "train":
# For training use random_sized_crop, horizontal_flip, augment, lighting
im = transform.random_sized_crop_img(
im,
train_size,
jitter_scale=self.cfg.DATA.TRAIN_JITTER_SCALES_RELATIVE,
jitter_aspect=self.cfg.DATA.TRAIN_JITTER_ASPECT_RELATIVE,
)
im, _ = transform.horizontal_flip(prob=0.5, images=im)
# im = transforms.augment(im, cfg.TRAIN.AUGMENT)
im = transform.lighting_jitter(
im,
0.1,
self.cfg.DATA.TRAIN_PCA_EIGVAL,
self.cfg.DATA.TRAIN_PCA_EIGVEC,
)
else:
# For testing use scale and center crop
im, _ = transform.uniform_crop(
im, test_size, spatial_idx=1, scale_size=train_size
)
# For training and testing use color normalization
im = transform.color_normalization(
im, self.cfg.DATA.MEAN, self.cfg.DATA.STD
)
# Convert HWC/RGB/float to CHW/BGR/float format
# im = np.ascontiguousarray(im[:, :, ::-1].transpose([2, 0, 1]))
return im
def _prepare_im_tf(self, im_path):
with pathmgr.open(im_path, "rb") as f:
with Image.open(f) as im:
im = im.convert("RGB")
# Convert HWC/BGR/int to HWC/RGB/float format for applying transforms
train_size, test_size = (
self.cfg.DATA.TRAIN_CROP_SIZE,
self.cfg.DATA.TEST_CROP_SIZE,
)
if self.mode == "train":
aug_transform = transforms_imagenet_train(
img_size=(train_size, train_size),
color_jitter=self.cfg.AUG.COLOR_JITTER,
auto_augment=self.cfg.AUG.AA_TYPE,
interpolation=self.cfg.AUG.INTERPOLATION,
re_prob=self.cfg.AUG.RE_PROB,
re_mode=self.cfg.AUG.RE_MODE,
re_count=self.cfg.AUG.RE_COUNT,
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
)
else:
t = []
size = int((256 / 224) * test_size)
t.append(
transforms_tv.Resize(
size, interpolation=3
), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms_tv.CenterCrop(test_size))
t.append(transforms_tv.ToTensor())
t.append(
transforms_tv.Normalize(self.cfg.DATA.MEAN, self.cfg.DATA.STD)
)
aug_transform = transforms_tv.Compose(t)
im = aug_transform(im)
return im
def __load__(self, index):
try:
# Load the image
im_path = self._imdb[index]["im_path"]
# Prepare the image for training / testing
if self.cfg.AUG.ENABLE:
if self.mode == "train" and self.cfg.AUG.NUM_SAMPLE > 1:
im = []
for _ in range(self.cfg.AUG.NUM_SAMPLE):
crop = self._prepare_im_tf(im_path)
im.append(crop)
return im
else:
im = self._prepare_im_tf(im_path)
return im
else:
im = self._prepare_im_res(im_path)
return im
except Exception:
return None
def __getitem__(self, index):
# if the current image is corrupted, load a different image.
for _ in range(self.num_retries):
im = self.__load__(index)
# Data corrupted, retry with a different image.
if im is None:
index = random.randint(0, len(self._imdb) - 1)
else:
break
# Retrieve the label
label = self._imdb[index]["class"]
if isinstance(im, list):
label = [label for _ in range(len(im))]
dummy = [torch.Tensor() for _ in range(len(im))]
return im, label, dummy, {}
else:
dummy = torch.Tensor()
return [im], label, dummy, {}
def __len__(self):
return len(self._imdb)
| 7,414 | 35.348039 | 84 | py |
STTS | STTS-main/MViT/slowfast/datasets/rand_augment.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py
pulished under an Apache License 2.0.
COMMENT FROM ORIGINAL:
AutoAugment, RandAugment, and AugMix for PyTorch
This code implements the searched ImageNet policies with various tweaks and
improvements and does not include any of the search code. AA and RA
Implementation adapted from:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
AugMix adapted from:
https://github.com/google-research/augmix
Papers:
AutoAugment: Learning Augmentation Policies from Data
https://arxiv.org/abs/1805.09501
Learning Data Augmentation Strategies for Object Detection
https://arxiv.org/abs/1906.11172
RandAugment: Practical automated data augmentation...
https://arxiv.org/abs/1909.13719
AugMix: A Simple Data Processing Method to Improve Robustness and
Uncertainty https://arxiv.org/abs/1912.02781
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_PIL_VER = tuple([int(x) for x in PIL.__version__.split(".")[:2]])
_FILL = (128, 128, 128)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.0
_HPARAMS_DEFAULT = {
"translate_const": 250,
"img_mean": _FILL,
}
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def _interpolation(kwargs):
interpolation = kwargs.pop("resample", Image.BILINEAR)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
def _check_args_tf(kwargs):
if "fillcolor" in kwargs and _PIL_VER < (5, 0):
kwargs.pop("fillcolor")
kwargs["resample"] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs
)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs
)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0],
-rotn_center[1] - post_trans[1],
matrix,
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs["resample"])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level, _hparams):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.0
level = _randomly_negate(level)
return (level,)
def _enhance_level_to_arg(level, _hparams):
# range [0.1, 1.9]
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _enhance_increasing_level_to_arg(level, _hparams):
# the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend
# range [0.1, 1.9]
level = (level / _MAX_LEVEL) * 0.9
level = 1.0 + _randomly_negate(level)
return (level,)
def _shear_level_to_arg(level, _hparams):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams["translate_const"]
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return (level,)
def _translate_rel_level_to_arg(level, hparams):
# default range [-0.45, 0.45]
translate_pct = hparams.get("translate_pct", 0.45)
level = (level / _MAX_LEVEL) * translate_pct
level = _randomly_negate(level)
return (level,)
def _posterize_level_to_arg(level, _hparams):
# As per Tensorflow TPU EfficientNet impl
# range [0, 4], 'keep 0 up to 4 MSB of original image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4),)
def _posterize_increasing_level_to_arg(level, hparams):
# As per Tensorflow models research and UDA impl
# range [4, 0], 'keep 4 down to 0 MSB of original image',
# intensity/severity of augmentation increases with level
return (4 - _posterize_level_to_arg(level, hparams)[0],)
def _posterize_original_level_to_arg(level, _hparams):
# As per original AutoAugment paper description
# range [4, 8], 'keep 4 up to 8 MSB of image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4) + 4,)
def _solarize_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 256),)
def _solarize_increasing_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation increases with level
return (256 - _solarize_level_to_arg(level, _hparams)[0],)
def _solarize_add_level_to_arg(level, _hparams):
# range [0, 110]
return (int((level / _MAX_LEVEL) * 110),)
LEVEL_TO_ARG = {
"AutoContrast": None,
"Equalize": None,
"Invert": None,
"Rotate": _rotate_level_to_arg,
# There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers
"Posterize": _posterize_level_to_arg,
"PosterizeIncreasing": _posterize_increasing_level_to_arg,
"PosterizeOriginal": _posterize_original_level_to_arg,
"Solarize": _solarize_level_to_arg,
"SolarizeIncreasing": _solarize_increasing_level_to_arg,
"SolarizeAdd": _solarize_add_level_to_arg,
"Color": _enhance_level_to_arg,
"ColorIncreasing": _enhance_increasing_level_to_arg,
"Contrast": _enhance_level_to_arg,
"ContrastIncreasing": _enhance_increasing_level_to_arg,
"Brightness": _enhance_level_to_arg,
"BrightnessIncreasing": _enhance_increasing_level_to_arg,
"Sharpness": _enhance_level_to_arg,
"SharpnessIncreasing": _enhance_increasing_level_to_arg,
"ShearX": _shear_level_to_arg,
"ShearY": _shear_level_to_arg,
"TranslateX": _translate_abs_level_to_arg,
"TranslateY": _translate_abs_level_to_arg,
"TranslateXRel": _translate_rel_level_to_arg,
"TranslateYRel": _translate_rel_level_to_arg,
}
NAME_TO_OP = {
"AutoContrast": auto_contrast,
"Equalize": equalize,
"Invert": invert,
"Rotate": rotate,
"Posterize": posterize,
"PosterizeIncreasing": posterize,
"PosterizeOriginal": posterize,
"Solarize": solarize,
"SolarizeIncreasing": solarize,
"SolarizeAdd": solarize_add,
"Color": color,
"ColorIncreasing": color,
"Contrast": contrast,
"ContrastIncreasing": contrast,
"Brightness": brightness,
"BrightnessIncreasing": brightness,
"Sharpness": sharpness,
"SharpnessIncreasing": sharpness,
"ShearX": shear_x,
"ShearY": shear_y,
"TranslateX": translate_x_abs,
"TranslateY": translate_y_abs,
"TranslateXRel": translate_x_rel,
"TranslateYRel": translate_y_rel,
}
class AugmentOp:
"""
Apply for video.
"""
def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
hparams = hparams or _HPARAMS_DEFAULT
self.aug_fn = NAME_TO_OP[name]
self.level_fn = LEVEL_TO_ARG[name]
self.prob = prob
self.magnitude = magnitude
self.hparams = hparams.copy()
self.kwargs = {
"fillcolor": hparams["img_mean"]
if "img_mean" in hparams
else _FILL,
"resample": hparams["interpolation"]
if "interpolation" in hparams
else _RANDOM_INTERPOLATION,
}
# If magnitude_std is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from a normal distribution
# with mean `magnitude` and std-dev of `magnitude_std`.
# NOTE This is my own hack, being tested, not in papers or reference impls.
self.magnitude_std = self.hparams.get("magnitude_std", 0)
def __call__(self, img_list):
if self.prob < 1.0 and random.random() > self.prob:
return img_list
magnitude = self.magnitude
if self.magnitude_std and self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range
level_args = (
self.level_fn(magnitude, self.hparams)
if self.level_fn is not None
else ()
)
if isinstance(img_list, list):
return [
self.aug_fn(img, *level_args, **self.kwargs) for img in img_list
]
else:
return self.aug_fn(img_list, *level_args, **self.kwargs)
_RAND_TRANSFORMS = [
"AutoContrast",
"Equalize",
"Invert",
"Rotate",
"Posterize",
"Solarize",
"SolarizeAdd",
"Color",
"Contrast",
"Brightness",
"Sharpness",
"ShearX",
"ShearY",
"TranslateXRel",
"TranslateYRel",
]
_RAND_INCREASING_TRANSFORMS = [
"AutoContrast",
"Equalize",
"Invert",
"Rotate",
"PosterizeIncreasing",
"SolarizeIncreasing",
"SolarizeAdd",
"ColorIncreasing",
"ContrastIncreasing",
"BrightnessIncreasing",
"SharpnessIncreasing",
"ShearX",
"ShearY",
"TranslateXRel",
"TranslateYRel",
]
# These experimental weights are based loosely on the relative improvements mentioned in paper.
# They may not result in increased performance, but could likely be tuned to so.
_RAND_CHOICE_WEIGHTS_0 = {
"Rotate": 0.3,
"ShearX": 0.2,
"ShearY": 0.2,
"TranslateXRel": 0.1,
"TranslateYRel": 0.1,
"Color": 0.025,
"Sharpness": 0.025,
"AutoContrast": 0.025,
"Solarize": 0.005,
"SolarizeAdd": 0.005,
"Contrast": 0.005,
"Brightness": 0.005,
"Equalize": 0.005,
"Posterize": 0,
"Invert": 0,
}
def _select_rand_weights(weight_idx=0, transforms=None):
transforms = transforms or _RAND_TRANSFORMS
assert weight_idx == 0 # only one set of weights currently
rand_weights = _RAND_CHOICE_WEIGHTS_0
probs = [rand_weights[k] for k in transforms]
probs /= np.sum(probs)
return probs
def rand_augment_ops(magnitude=10, hparams=None, transforms=None):
hparams = hparams or _HPARAMS_DEFAULT
transforms = transforms or _RAND_TRANSFORMS
return [
AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams)
for name in transforms
]
class RandAugment:
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
# no replacement when using weighted choice
ops = np.random.choice(
self.ops,
self.num_layers,
replace=self.choice_weights is None,
p=self.choice_weights,
)
for op in ops:
img = op(img)
return img
def rand_augment_transform(config_str, hparams):
"""
RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719
Create a RandAugment transform
:param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by
dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining
sections, not order sepecific determine
'm' - integer magnitude of rand augment
'n' - integer num layers (number of transform ops selected per image)
'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)
'mstd' - float std deviation of magnitude noise applied
'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)
Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5
'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2
:param hparams: Other hparams (kwargs) for the RandAugmentation scheme
:return: A PyTorch compatible Transform
"""
magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10)
num_layers = 2 # default to 2 ops per image
weight_idx = None # default to no probability weights for op choice
transforms = _RAND_TRANSFORMS
config = config_str.split("-")
assert config[0] == "rand"
config = config[1:]
for c in config:
cs = re.split(r"(\d.*)", c)
if len(cs) < 2:
continue
key, val = cs[:2]
if key == "mstd":
# noise param injected via hparams for now
hparams.setdefault("magnitude_std", float(val))
elif key == "inc":
if bool(val):
transforms = _RAND_INCREASING_TRANSFORMS
elif key == "m":
magnitude = int(val)
elif key == "n":
num_layers = int(val)
elif key == "w":
weight_idx = int(val)
else:
assert NotImplementedError
ra_ops = rand_augment_ops(
magnitude=magnitude, hparams=hparams, transforms=transforms
)
choice_weights = (
None if weight_idx is None else _select_rand_weights(weight_idx)
)
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
| 16,199 | 29.337079 | 119 | py |
STTS | STTS-main/MViT/slowfast/datasets/build.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from fvcore.common.registry import Registry
DATASET_REGISTRY = Registry("DATASET")
DATASET_REGISTRY.__doc__ = """
Registry for dataset.
The registered object will be called with `obj(cfg, split)`.
The call should return a `torch.utils.data.Dataset` object.
"""
def build_dataset(dataset_name, cfg, split):
"""
Build a dataset, defined by `dataset_name`.
Args:
dataset_name (str): the name of the dataset to be constructed.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
split (str): the split of the data loader. Options include `train`,
`val`, and `test`.
Returns:
Dataset: a constructed dataset specified by dataset_name.
"""
# Capitalize the the first letter of the dataset_name since the dataset_name
# in configs may be in lowercase but the name of dataset class should always
# start with an uppercase letter.
name = dataset_name.capitalize()
return DATASET_REGISTRY.get(name)(cfg, split)
| 1,120 | 34.03125 | 80 | py |
STTS | STTS-main/MViT/slowfast/visualization/async_predictor.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import atexit
import numpy as np
import queue
import torch
import torch.multiprocessing as mp
import slowfast.utils.logging as logging
from slowfast.datasets import cv2_transform
from slowfast.visualization.predictor import Predictor
logger = logging.get_logger(__name__)
class AsycnActionPredictor:
class _Predictor(mp.Process):
def __init__(self, cfg, task_queue, result_queue, gpu_id=None):
"""
Predict Worker for Detectron2.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
task_queue (mp.Queue): a shared queue for incoming task.
result_queue (mp.Queue): a shared queue for predicted results.
gpu_id (int): index of the GPU device for the current child process.
"""
super().__init__()
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
self.gpu_id = gpu_id
self.device = (
torch.device("cuda:{}".format(self.gpu_id))
if self.cfg.NUM_GPUS
else "cpu"
)
def run(self):
"""
Run prediction asynchronously.
"""
# Build the video model and print model statistics.
model = Predictor(self.cfg, gpu_id=self.gpu_id)
while True:
task = self.task_queue.get()
if isinstance(task, _StopToken):
break
task = model(task)
self.result_queue.put(task)
def __init__(self, cfg, result_queue=None):
num_workers = cfg.NUM_GPUS
self.task_queue = mp.Queue()
self.result_queue = mp.Queue() if result_queue is None else result_queue
self.get_idx = -1
self.put_idx = -1
self.procs = []
cfg = cfg.clone()
cfg.defrost()
cfg.NUM_GPUS = 1
for gpu_id in range(num_workers):
self.procs.append(
AsycnActionPredictor._Predictor(
cfg, self.task_queue, self.result_queue, gpu_id
)
)
self.result_data = {}
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, task):
"""
Add the new task to task queue.
Args:
task (TaskInfo object): task object that contain
the necessary information for action prediction. (e.g. frames)
"""
self.put_idx += 1
self.task_queue.put(task)
def get(self):
"""
Return a task object in the correct order based on task id if
result(s) is available. Otherwise, raise queue.Empty exception.
"""
if self.result_data.get(self.get_idx + 1) is not None:
self.get_idx += 1
res = self.result_data[self.get_idx]
del self.result_data[self.get_idx]
return res
while True:
res = self.result_queue.get(block=False)
idx = res.id
if idx == self.get_idx + 1:
self.get_idx += 1
return res
self.result_data[idx] = res
def __call__(self, task):
self.put(task)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(_StopToken())
@property
def result_available(self):
"""
How many results are ready to be returned.
"""
return self.result_queue.qsize() + len(self.result_data)
@property
def default_buffer_size(self):
return len(self.procs) * 5
class AsyncVis:
class _VisWorker(mp.Process):
def __init__(self, video_vis, task_queue, result_queue):
"""
Visualization Worker for AsyncVis.
Args:
video_vis (VideoVisualizer object): object with tools for visualization.
task_queue (mp.Queue): a shared queue for incoming task for visualization.
result_queue (mp.Queue): a shared queue for visualized results.
"""
self.video_vis = video_vis
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
"""
Run visualization asynchronously.
"""
while True:
task = self.task_queue.get()
if isinstance(task, _StopToken):
break
frames = draw_predictions(task, self.video_vis)
task.frames = np.array(frames)
self.result_queue.put(task)
def __init__(self, video_vis, n_workers=None):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
n_workers (Optional[int]): number of CPUs for running video visualizer.
If not given, use all CPUs.
"""
num_workers = mp.cpu_count() if n_workers is None else n_workers
self.task_queue = mp.Queue()
self.result_queue = mp.Queue()
self.get_indices_ls = []
self.procs = []
self.result_data = {}
self.put_id = -1
for _ in range(max(num_workers, 1)):
self.procs.append(
AsyncVis._VisWorker(
video_vis, self.task_queue, self.result_queue
)
)
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, task):
"""
Add the new task to task queue.
Args:
task (TaskInfo object): task object that contain
the necessary information for action prediction. (e.g. frames, boxes, predictions)
"""
self.put_id += 1
self.task_queue.put(task)
def get(self):
"""
Return visualized frames/clips in the correct order based on task id if
result(s) is available. Otherwise, raise queue.Empty exception.
"""
get_idx = self.get_indices_ls[0]
if self.result_data.get(get_idx) is not None:
res = self.result_data[get_idx]
del self.result_data[get_idx]
del self.get_indices_ls[0]
return res
while True:
res = self.result_queue.get(block=False)
idx = res.id
if idx == get_idx:
del self.get_indices_ls[0]
return res
self.result_data[idx] = res
def __call__(self, task):
"""
How many results are ready to be returned.
"""
self.put(task)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(_StopToken())
@property
def result_available(self):
return self.result_queue.qsize() + len(self.result_data)
@property
def default_buffer_size(self):
return len(self.procs) * 5
class _StopToken:
pass
class AsyncDemo:
"""
Asynchronous Action Prediction and Visualization pipeline with AsyncVis.
"""
def __init__(self, cfg, async_vis):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
async_vis (AsyncVis object): asynchronous visualizer.
"""
self.model = AsycnActionPredictor(
cfg=cfg, result_queue=async_vis.task_queue
)
self.async_vis = async_vis
def put(self, task):
"""
Put task into task queue for prediction and visualization.
Args:
task (TaskInfo object): task object that contain
the necessary information for action prediction. (e.g. frames)
"""
self.async_vis.get_indices_ls.append(task.id)
self.model.put(task)
def get(self):
"""
Get the visualized clips if any.
"""
try:
task = self.async_vis.get()
except (queue.Empty, IndexError):
raise IndexError("Results are not available yet.")
return task
def draw_predictions(task, video_vis):
"""
Draw prediction for the given task.
Args:
task (TaskInfo object): task object that contain
the necessary information for visualization. (e.g. frames, preds)
All attributes must lie on CPU devices.
video_vis (VideoVisualizer object): the video visualizer object.
"""
boxes = task.bboxes
frames = task.frames
preds = task.action_preds
if boxes is not None:
img_width = task.img_width
img_height = task.img_height
if boxes.device != torch.device("cpu"):
boxes = boxes.cpu()
boxes = cv2_transform.revert_scaled_boxes(
task.crop_size, boxes, img_height, img_width
)
keyframe_idx = len(frames) // 2 - task.num_buffer_frames
draw_range = [
keyframe_idx - task.clip_vis_size,
keyframe_idx + task.clip_vis_size,
]
buffer = frames[: task.num_buffer_frames]
frames = frames[task.num_buffer_frames :]
if boxes is not None:
if len(boxes) != 0:
frames = video_vis.draw_clip_range(
frames,
preds,
boxes,
keyframe_idx=keyframe_idx,
draw_range=draw_range,
)
else:
frames = video_vis.draw_clip_range(
frames, preds, keyframe_idx=keyframe_idx, draw_range=draw_range
)
del task
return buffer + frames
| 9,808 | 29.653125 | 98 | py |
STTS | STTS-main/MViT/slowfast/visualization/predictor.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import queue
import cv2
import torch
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
import slowfast.utils.checkpoint as cu
from slowfast.datasets import cv2_transform
from slowfast.models import build_model
from slowfast.utils import logging
from slowfast.visualization.utils import process_cv2_inputs
logger = logging.get_logger(__name__)
class Predictor:
"""
Action Predictor for action recognition.
"""
def __init__(self, cfg, gpu_id=None):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
gpu_id (Optional[int]): GPU id.
"""
if cfg.NUM_GPUS:
self.gpu_id = (
torch.cuda.current_device() if gpu_id is None else gpu_id
)
# Build the video model and print model statistics.
self.model = build_model(cfg, gpu_id=gpu_id)
self.model.eval()
self.cfg = cfg
if cfg.DETECTION.ENABLE:
self.object_detector = Detectron2Predictor(cfg, gpu_id=self.gpu_id)
logger.info("Start loading model weights.")
cu.load_test_checkpoint(cfg, self.model)
logger.info("Finish loading model weights")
def __call__(self, task):
"""
Returns the prediction results for the current task.
Args:
task (TaskInfo object): task object that contain
the necessary information for action prediction. (e.g. frames, boxes)
Returns:
task (TaskInfo object): the same task info object but filled with
prediction values (a tensor) and the corresponding boxes for
action detection task.
"""
if self.cfg.DETECTION.ENABLE:
task = self.object_detector(task)
frames, bboxes = task.frames, task.bboxes
if bboxes is not None:
bboxes = cv2_transform.scale_boxes(
self.cfg.DATA.TEST_CROP_SIZE,
bboxes,
task.img_height,
task.img_width,
)
if self.cfg.DEMO.INPUT_FORMAT == "BGR":
frames = [
cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in frames
]
frames = [
cv2_transform.scale(self.cfg.DATA.TEST_CROP_SIZE, frame)
for frame in frames
]
inputs = process_cv2_inputs(frames, self.cfg)
if bboxes is not None:
index_pad = torch.full(
size=(bboxes.shape[0], 1),
fill_value=float(0),
device=bboxes.device,
)
# Pad frame index for each box.
bboxes = torch.cat([index_pad, bboxes], axis=1)
if self.cfg.NUM_GPUS > 0:
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(
device=torch.device(self.gpu_id), non_blocking=True
)
else:
inputs = inputs.cuda(
device=torch.device(self.gpu_id), non_blocking=True
)
if self.cfg.DETECTION.ENABLE and not bboxes.shape[0]:
preds = torch.tensor([])
else:
preds = self.model(inputs, bboxes)
if self.cfg.NUM_GPUS:
preds = preds.cpu()
if bboxes is not None:
bboxes = bboxes.detach().cpu()
preds = preds.detach()
task.add_action_preds(preds)
if bboxes is not None:
task.add_bboxes(bboxes[:, 1:])
return task
class ActionPredictor:
"""
Synchronous Action Prediction and Visualization pipeline with AsyncVis.
"""
def __init__(self, cfg, async_vis=None, gpu_id=None):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
async_vis (AsyncVis object): asynchronous visualizer.
gpu_id (Optional[int]): GPU id.
"""
self.predictor = Predictor(cfg=cfg, gpu_id=gpu_id)
self.async_vis = async_vis
def put(self, task):
"""
Make prediction and put the results in `async_vis` task queue.
Args:
task (TaskInfo object): task object that contain
the necessary information for action prediction. (e.g. frames, boxes)
"""
task = self.predictor(task)
self.async_vis.get_indices_ls.append(task.id)
self.async_vis.put(task)
def get(self):
"""
Get the visualized clips if any.
"""
try:
task = self.async_vis.get()
except (queue.Empty, IndexError):
raise IndexError("Results are not available yet.")
return task
class Detectron2Predictor:
"""
Wrapper around Detectron2 to return the required predicted bounding boxes
as a ndarray.
"""
def __init__(self, cfg, gpu_id=None):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
gpu_id (Optional[int]): GPU id.
"""
self.cfg = get_cfg()
self.cfg.merge_from_file(
model_zoo.get_config_file(cfg.DEMO.DETECTRON2_CFG)
)
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = cfg.DEMO.DETECTRON2_THRESH
self.cfg.MODEL.WEIGHTS = cfg.DEMO.DETECTRON2_WEIGHTS
self.cfg.INPUT.FORMAT = cfg.DEMO.INPUT_FORMAT
if cfg.NUM_GPUS and gpu_id is None:
gpu_id = torch.cuda.current_device()
self.cfg.MODEL.DEVICE = (
"cuda:{}".format(gpu_id) if cfg.NUM_GPUS > 0 else "cpu"
)
logger.info("Initialized Detectron2 Object Detection Model.")
self.predictor = DefaultPredictor(self.cfg)
def __call__(self, task):
"""
Return bounding boxes predictions as a tensor.
Args:
task (TaskInfo object): task object that contain
the necessary information for action prediction. (e.g. frames)
Returns:
task (TaskInfo object): the same task info object but filled with
prediction values (a tensor) and the corresponding boxes for
action detection task.
"""
middle_frame = task.frames[len(task.frames) // 2]
outputs = self.predictor(middle_frame)
# Get only human instances
mask = outputs["instances"].pred_classes == 0
pred_boxes = outputs["instances"].pred_boxes.tensor[mask]
task.add_bboxes(pred_boxes)
return task
| 6,847 | 31.923077 | 85 | py |
STTS | STTS-main/MViT/slowfast/visualization/gradcam_utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
import slowfast.datasets.utils as data_utils
from slowfast.visualization.utils import get_layer
class GradCAM:
"""
GradCAM class helps create localization maps using the Grad-CAM method for input videos
and overlap the maps over the input videos as heatmaps.
https://arxiv.org/pdf/1610.02391.pdf
"""
def __init__(
self, model, target_layers, data_mean, data_std, colormap="viridis"
):
"""
Args:
model (model): the model to be used.
target_layers (list of str(s)): name of convolutional layer to be used to get
gradients and feature maps from for creating localization maps.
data_mean (tensor or list): mean value to add to input videos.
data_std (tensor or list): std to multiply for input videos.
colormap (Optional[str]): matplotlib colormap used to create heatmap.
See https://matplotlib.org/3.3.0/tutorials/colors/colormaps.html
"""
self.model = model
# Run in eval mode.
self.model.eval()
self.target_layers = target_layers
self.gradients = {}
self.activations = {}
self.colormap = plt.get_cmap(colormap)
self.data_mean = data_mean
self.data_std = data_std
self._register_hooks()
def _register_single_hook(self, layer_name):
"""
Register forward and backward hook to a layer, given layer_name,
to obtain gradients and activations.
Args:
layer_name (str): name of the layer.
"""
def get_gradients(module, grad_input, grad_output):
self.gradients[layer_name] = grad_output[0].detach()
def get_activations(module, input, output):
self.activations[layer_name] = output.clone().detach()
target_layer = get_layer(self.model, layer_name=layer_name)
target_layer.register_forward_hook(get_activations)
target_layer.register_backward_hook(get_gradients)
def _register_hooks(self):
"""
Register hooks to layers in `self.target_layers`.
"""
for layer_name in self.target_layers:
self._register_single_hook(layer_name=layer_name)
def _calculate_localization_map(self, inputs, labels=None):
"""
Calculate localization map for all inputs with Grad-CAM.
Args:
inputs (list of tensor(s)): the input clips.
labels (Optional[tensor]): labels of the current input clips.
Returns:
localization_maps (list of ndarray(s)): the localization map for
each corresponding input.
preds (tensor): shape (n_instances, n_class). Model predictions for `inputs`.
"""
assert len(inputs) == len(
self.target_layers
), "Must register the same number of target layers as the number of input pathways."
input_clone = [inp.clone() for inp in inputs]
preds = self.model(input_clone)
if labels is None:
score = torch.max(preds, dim=-1)[0]
else:
if labels.ndim == 1:
labels = labels.unsqueeze(-1)
score = torch.gather(preds, dim=1, index=labels)
self.model.zero_grad()
score = torch.sum(score)
score.backward()
localization_maps = []
for i, inp in enumerate(inputs):
_, _, T, H, W = inp.size()
gradients = self.gradients[self.target_layers[i]]
activations = self.activations[self.target_layers[i]]
B, C, Tg, _, _ = gradients.size()
weights = torch.mean(gradients.view(B, C, Tg, -1), dim=3)
weights = weights.view(B, C, Tg, 1, 1)
localization_map = torch.sum(
weights * activations, dim=1, keepdim=True
)
localization_map = F.relu(localization_map)
localization_map = F.interpolate(
localization_map,
size=(T, H, W),
mode="trilinear",
align_corners=False,
)
localization_map_min, localization_map_max = (
torch.min(localization_map.view(B, -1), dim=-1, keepdim=True)[
0
],
torch.max(localization_map.view(B, -1), dim=-1, keepdim=True)[
0
],
)
localization_map_min = torch.reshape(
localization_map_min, shape=(B, 1, 1, 1, 1)
)
localization_map_max = torch.reshape(
localization_map_max, shape=(B, 1, 1, 1, 1)
)
# Normalize the localization map.
localization_map = (localization_map - localization_map_min) / (
localization_map_max - localization_map_min + 1e-6
)
localization_map = localization_map.data
localization_maps.append(localization_map)
return localization_maps, preds
def __call__(self, inputs, labels=None, alpha=0.5):
"""
Visualize the localization maps on their corresponding inputs as heatmap,
using Grad-CAM.
Args:
inputs (list of tensor(s)): the input clips.
labels (Optional[tensor]): labels of the current input clips.
alpha (float): transparency level of the heatmap, in the range [0, 1].
Returns:
result_ls (list of tensor(s)): the visualized inputs.
preds (tensor): shape (n_instances, n_class). Model predictions for `inputs`.
"""
result_ls = []
localization_maps, preds = self._calculate_localization_map(
inputs, labels=labels
)
for i, localization_map in enumerate(localization_maps):
# Convert (B, 1, T, H, W) to (B, T, H, W)
localization_map = localization_map.squeeze(dim=1)
if localization_map.device != torch.device("cpu"):
localization_map = localization_map.cpu()
heatmap = self.colormap(localization_map)
heatmap = heatmap[:, :, :, :, :3]
# Permute input from (B, C, T, H, W) to (B, T, H, W, C)
curr_inp = inputs[i].permute(0, 2, 3, 4, 1)
if curr_inp.device != torch.device("cpu"):
curr_inp = curr_inp.cpu()
curr_inp = data_utils.revert_tensor_normalize(
curr_inp, self.data_mean, self.data_std
)
heatmap = torch.from_numpy(heatmap)
curr_inp = alpha * heatmap + (1 - alpha) * curr_inp
# Permute inp to (B, T, C, H, W)
curr_inp = curr_inp.permute(0, 1, 4, 2, 3)
result_ls.append(curr_inp)
return result_ls, preds
| 6,958 | 37.877095 | 92 | py |
STTS | STTS-main/MViT/slowfast/visualization/utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import confusion_matrix
import slowfast.utils.logging as logging
from slowfast.datasets.utils import pack_pathway_output, tensor_normalize
logger = logging.get_logger(__name__)
def get_confusion_matrix(preds, labels, num_classes, normalize="true"):
"""
Calculate confusion matrix on the provided preds and labels.
Args:
preds (tensor or lists of tensors): predictions. Each tensor is in
in the shape of (n_batch, num_classes). Tensor(s) must be on CPU.
labels (tensor or lists of tensors): corresponding labels. Each tensor is
in the shape of either (n_batch,) or (n_batch, num_classes).
num_classes (int): number of classes. Tensor(s) must be on CPU.
normalize (Optional[str]) : {‘true’, ‘pred’, ‘all’}, default="true"
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix
will not be normalized.
Returns:
cmtx (ndarray): confusion matrix of size (num_classes x num_classes)
"""
if isinstance(preds, list):
preds = torch.cat(preds, dim=0)
if isinstance(labels, list):
labels = torch.cat(labels, dim=0)
# If labels are one-hot encoded, get their indices.
if labels.ndim == preds.ndim:
labels = torch.argmax(labels, dim=-1)
# Get the predicted class indices for examples.
preds = torch.flatten(torch.argmax(preds, dim=-1))
labels = torch.flatten(labels)
cmtx = confusion_matrix(
labels, preds, labels=list(range(num_classes)), normalize=normalize
)
return cmtx
def plot_confusion_matrix(cmtx, num_classes, class_names=None, figsize=None):
"""
A function to create a colored and labeled confusion matrix matplotlib figure
given true labels and preds.
Args:
cmtx (ndarray): confusion matrix.
num_classes (int): total number of classes.
class_names (Optional[list of strs]): a list of class names.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
Returns:
img (figure): matplotlib figure.
"""
if class_names is None or type(class_names) != list:
class_names = [str(i) for i in range(num_classes)]
figure = plt.figure(figsize=figsize)
plt.imshow(cmtx, interpolation="nearest", cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
# Use white text if squares are dark; otherwise black.
threshold = cmtx.max() / 2.0
for i, j in itertools.product(range(cmtx.shape[0]), range(cmtx.shape[1])):
color = "white" if cmtx[i, j] > threshold else "black"
plt.text(
j,
i,
format(cmtx[i, j], ".2f") if cmtx[i, j] != 0 else ".",
horizontalalignment="center",
color=color,
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
return figure
def plot_topk_histogram(tag, array, k=10, class_names=None, figsize=None):
"""
Plot histogram of top-k value from the given array.
Args:
tag (str): histogram title.
array (tensor): a tensor to draw top k value from.
k (int): number of top values to draw from array.
Defaut to 10.
class_names (list of strings, optional):
a list of names for values in array.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
Returns:
fig (matplotlib figure): a matplotlib figure of the histogram.
"""
val, ind = torch.topk(array, k)
fig = plt.Figure(figsize=figsize, facecolor="w", edgecolor="k")
ax = fig.add_subplot(1, 1, 1)
if class_names is None:
class_names = [str(i) for i in ind]
else:
class_names = [class_names[i] for i in ind]
tick_marks = np.arange(k)
width = 0.75
ax.bar(
tick_marks,
val,
width,
color="orange",
tick_label=class_names,
edgecolor="w",
linewidth=1,
)
ax.set_xlabel("Candidates")
ax.set_xticks(tick_marks)
ax.set_xticklabels(class_names, rotation=-45, ha="center")
ax.xaxis.set_label_position("bottom")
ax.xaxis.tick_bottom()
y_tick = np.linspace(0, 1, num=10)
ax.set_ylabel("Frequency")
ax.set_yticks(y_tick)
y_labels = [format(i, ".1f") for i in y_tick]
ax.set_yticklabels(y_labels, ha="center")
for i, v in enumerate(val.numpy()):
ax.text(
i - 0.1,
v + 0.03,
format(v, ".2f"),
color="orange",
fontweight="bold",
)
ax.set_title(tag)
fig.set_tight_layout(True)
return fig
class GetWeightAndActivation:
"""
A class used to get weights and activations from specified layers from a Pytorch model.
"""
def __init__(self, model, layers):
"""
Args:
model (nn.Module): the model containing layers to obtain weights and activations from.
layers (list of strings): a list of layer names to obtain weights and activations from.
Names are hierarchical, separated by /. For example, If a layer follow a path
"s1" ---> "pathway0_stem" ---> "conv", the layer path is "s1/pathway0_stem/conv".
"""
self.model = model
self.hooks = {}
self.layers_names = layers
# eval mode
self.model.eval()
self._register_hooks()
def _get_layer(self, layer_name):
"""
Return a layer (nn.Module Object) given a hierarchical layer name, separated by /.
Args:
layer_name (str): the name of the layer.
"""
layer_ls = layer_name.split("/")
prev_module = self.model
for layer in layer_ls:
prev_module = prev_module._modules[layer]
return prev_module
def _register_single_hook(self, layer_name):
"""
Register hook to a layer, given layer_name, to obtain activations.
Args:
layer_name (str): name of the layer.
"""
def hook_fn(module, input, output):
self.hooks[layer_name] = output.clone().detach()
layer = get_layer(self.model, layer_name)
layer.register_forward_hook(hook_fn)
def _register_hooks(self):
"""
Register hooks to layers in `self.layers_names`.
"""
for layer_name in self.layers_names:
self._register_single_hook(layer_name)
def get_activations(self, input, bboxes=None):
"""
Obtain all activations from layers that we register hooks for.
Args:
input (tensors, list of tensors): the model input.
bboxes (Optional): Bouding boxes data that might be required
by the model.
Returns:
activation_dict (Python dictionary): a dictionary of the pair
{layer_name: list of activations}, where activations are outputs returned
by the layer.
"""
input_clone = [inp.clone() for inp in input]
if bboxes is not None:
preds = self.model(input_clone, bboxes)
else:
preds = self.model(input_clone)
activation_dict = {}
for layer_name, hook in self.hooks.items():
# list of activations for each instance.
activation_dict[layer_name] = hook
return activation_dict, preds
def get_weights(self):
"""
Returns weights from registered layers.
Returns:
weights (Python dictionary): a dictionary of the pair
{layer_name: weight}, where weight is the weight tensor.
"""
weights = {}
for layer in self.layers_names:
cur_layer = get_layer(self.model, layer)
if hasattr(cur_layer, "weight"):
weights[layer] = cur_layer.weight.clone().detach()
else:
logger.error(
"Layer {} does not have weight attribute.".format(layer)
)
return weights
def get_indexing(string):
"""
Parse numpy-like fancy indexing from a string.
Args:
string (str): string represent the indices to take
a subset of from array. Indices for each dimension
are separated by `,`; indices for different dimensions
are separated by `;`.
e.g.: For a numpy array `arr` of shape (3,3,3), the string "1,2;1,2"
means taking the sub-array `arr[[1,2], [1,2]]
Returns:
final_indexing (tuple): the parsed indexing.
"""
index_ls = string.strip().split(";")
final_indexing = []
for index in index_ls:
index_single_dim = index.split(",")
index_single_dim = [int(i) for i in index_single_dim]
final_indexing.append(index_single_dim)
return tuple(final_indexing)
def process_layer_index_data(layer_ls, layer_name_prefix=""):
"""
Extract layer names and numpy-like fancy indexing from a string.
Args:
layer_ls (list of strs): list of strings containing data about layer names
and their indexing. For each string, layer name and indexing is separated by whitespaces.
e.g.: [layer1 1,2;2, layer2, layer3 150;3,4]
layer_name_prefix (Optional[str]): prefix to be added to each layer name.
Returns:
layer_name (list of strings): a list of layer names.
indexing_dict (Python dict): a dictionary of the pair
{one_layer_name: indexing_for_that_layer}
"""
layer_name, indexing_dict = [], {}
for layer in layer_ls:
ls = layer.split()
name = layer_name_prefix + ls[0]
layer_name.append(name)
if len(ls) == 2:
indexing_dict[name] = get_indexing(ls[1])
else:
indexing_dict[name] = ()
return layer_name, indexing_dict
def process_cv2_inputs(frames, cfg):
"""
Normalize and prepare inputs as a list of tensors. Each tensor
correspond to a unique pathway.
Args:
frames (list of array): list of input images (correspond to one clip) in range [0, 255].
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
inputs = torch.from_numpy(np.array(frames)).float() / 255
inputs = tensor_normalize(inputs, cfg.DATA.MEAN, cfg.DATA.STD)
# T H W C -> C T H W.
inputs = inputs.permute(3, 0, 1, 2)
# Sample frames for num_frames specified.
index = torch.linspace(0, inputs.shape[1] - 1, cfg.DATA.NUM_FRAMES).long()
inputs = torch.index_select(inputs, 1, index)
inputs = pack_pathway_output(cfg, inputs)
inputs = [inp.unsqueeze(0) for inp in inputs]
return inputs
def get_layer(model, layer_name):
"""
Return the targeted layer (nn.Module Object) given a hierarchical layer name,
separated by /.
Args:
model (model): model to get layers from.
layer_name (str): name of the layer.
Returns:
prev_module (nn.Module): the layer from the model with `layer_name` name.
"""
layer_ls = layer_name.split("/")
prev_module = model
for layer in layer_ls:
prev_module = prev_module._modules[layer]
return prev_module
class TaskInfo:
def __init__(self):
self.frames = None
self.id = -1
self.bboxes = None
self.action_preds = None
self.num_buffer_frames = 0
self.img_height = -1
self.img_width = -1
self.crop_size = -1
self.clip_vis_size = -1
def add_frames(self, idx, frames):
"""
Add the clip and corresponding id.
Args:
idx (int): the current index of the clip.
frames (list[ndarray]): list of images in "BGR" format.
"""
self.frames = frames
self.id = idx
def add_bboxes(self, bboxes):
"""
Add correspondding bounding boxes.
"""
self.bboxes = bboxes
def add_action_preds(self, preds):
"""
Add the corresponding action predictions.
"""
self.action_preds = preds
| 12,593 | 32.494681 | 101 | py |
STTS | STTS-main/MViT/slowfast/visualization/prediction_vis.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import torch
import slowfast.datasets.utils as data_utils
import slowfast.utils.logging as logging
import slowfast.visualization.tensorboard_vis as tb
from slowfast.utils.misc import get_class_names
from slowfast.visualization.video_visualizer import VideoVisualizer
logger = logging.get_logger(__name__)
class WrongPredictionVis:
"""
WrongPredictionVis class for visualizing video inputs to Tensorboard
for instances that the model makes wrong predictions.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
self.cfg = cfg
self.class_names, _, self.subset = get_class_names(
cfg.TENSORBOARD.CLASS_NAMES_PATH,
subset_path=cfg.TENSORBOARD.WRONG_PRED_VIS.SUBSET_PATH,
)
if self.subset is not None:
self.subset = set(self.subset)
self.num_class = cfg.MODEL.NUM_CLASSES
self.video_vis = VideoVisualizer(
cfg.MODEL.NUM_CLASSES,
cfg.TENSORBOARD.CLASS_NAMES_PATH,
1,
cfg.TENSORBOARD.MODEL_VIS.COLORMAP,
)
self.tag = cfg.TENSORBOARD.WRONG_PRED_VIS.TAG
self.writer = tb.TensorboardWriter(cfg)
self.model_incorrect_classes = set()
def _pick_wrong_preds(self, labels, preds):
"""
Returns a 1D tensor that contains the indices of instances that have
wrong predictions, where true labels in in the specified subset.
Args:
labels (tensor): tensor of shape (n_instances,) containing class ids.
preds (tensor): class scores from model, shape (n_intances, n_classes)
Returns:
mask (tensor): boolean tensor. `mask[i]` is True if `model` makes a wrong prediction.
"""
subset_mask = torch.ones(size=(len(labels),), dtype=torch.bool)
if self.subset is not None:
for i, label in enumerate(labels):
if label not in self.subset:
subset_mask[i] = False
preds_ids = torch.argmax(preds, dim=-1)
mask = preds_ids != labels
mask &= subset_mask
for i, wrong_pred in enumerate(mask):
if wrong_pred:
self.model_incorrect_classes.add(labels[i])
return mask
def visualize_vid(self, video_input, labels, preds, batch_idx):
"""
Draw predicted labels on video inputs and visualize all incorrectly classified
videos in the current batch.
Args:
video_input (list of list of tensor(s)): list of videos for all pathways.
labels (array-like): shape (n_instances,) of true label for each instance.
preds (tensor): shape (n, instances, n_classes). The predicted scores for all instances.
tag (Optional[str]): all visualized video will be added under this tag. This is for organization
purposes in Tensorboard.
batch_idx (int): batch index of the current videos.
"""
def add_video(vid, preds, tag, true_class_name):
"""
Draw predicted label on video and add it to Tensorboard.
Args:
vid (array-like): shape (C, T, H, W). Each image in `vid` is a RGB image.
preds (tensor): shape (n_classes,) or (1, n_classes). The predicted scores
for the current `vid`.
tag (str): tag for `vid` in Tensorboard.
true_class_name (str): the ground-truth class name of the current `vid` instance.
"""
# Permute to (T, H, W, C).
vid = vid.permute(1, 2, 3, 0)
vid = data_utils.revert_tensor_normalize(
vid.cpu(), self.cfg.DATA.MEAN, self.cfg.DATA.STD
)
vid = self.video_vis.draw_clip(vid, preds)
vid = torch.from_numpy(np.array(vid)).permute(0, 3, 1, 2)
vid = torch.unsqueeze(vid, dim=0)
self.writer.add_video(
vid, tag="{}: {}".format(tag, true_class_name)
)
mask = self._pick_wrong_preds(labels, preds)
video_indices = torch.squeeze(mask.nonzero(), dim=-1)
# Visualize each wrongly classfied video.
for vid_idx in video_indices:
cur_vid_idx = batch_idx * len(video_input[0]) + vid_idx
for pathway in range(len(video_input)):
add_video(
video_input[pathway][vid_idx],
preds=preds[vid_idx],
tag=self.tag
+ "/Video {}, Pathway {}".format(cur_vid_idx, pathway),
true_class_name=self.class_names[labels[vid_idx]],
)
@property
def wrong_class_prediction(self):
"""
Return class ids that the model predicted incorrectly.
"""
incorrect_class_names = [
self.class_names[i] for i in self.model_incorrect_classes
]
return list(set(incorrect_class_names))
def clean(self):
"""
Close Tensorboard writer.
"""
self.writer.close()
| 5,296 | 37.948529 | 108 | py |
STTS | STTS-main/MViT/slowfast/visualization/ava_demo_precomputed_boxes.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import os
import cv2
import torch
import tqdm
import slowfast.utils.checkpoint as cu
import slowfast.utils.logging as logging
from slowfast.datasets.ava_helper import parse_bboxes_file
from slowfast.datasets.cv2_transform import scale, scale_boxes
from slowfast.datasets.utils import get_sequence
from slowfast.models import build_model
from slowfast.utils import misc
from slowfast.utils.env import pathmgr
from slowfast.visualization.utils import process_cv2_inputs
from slowfast.visualization.video_visualizer import VideoVisualizer
logger = logging.get_logger(__name__)
class AVAVisualizerWithPrecomputedBox:
"""
Visualize action predictions for videos or folder of images with precomputed
and ground-truth boxes in AVA format.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
self.source = pathmgr.get_local_path(path=cfg.DEMO.INPUT_VIDEO)
self.fps = None
if pathmgr.isdir(self.source):
self.fps = cfg.DEMO.FPS
self.video_name = self.source.split("/")[-1]
self.source = os.path.join(
self.source, "{}_%06d.jpg".format(self.video_name)
)
else:
self.video_name = self.source.split("/")[-1]
self.video_name = self.video_name.split(".")[0]
self.cfg = cfg
self.cap = cv2.VideoCapture(self.source)
if self.fps is None:
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.display_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.display_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
if not self.cap.isOpened():
raise IOError("Video {} cannot be opened".format(self.source))
self.output_file = None
if cfg.DEMO.OUTPUT_FILE != "":
self.output_file = self.get_output_file(cfg.DEMO.OUTPUT_FILE)
self.pred_boxes, self.gt_boxes = load_boxes_labels(
cfg,
self.video_name,
self.fps,
self.display_width,
self.display_height,
)
self.seq_length = cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE
self.no_frames_repeat = cfg.DEMO.SLOWMO
def get_output_file(self, path):
"""
Return a video writer object.
Args:
path (str): path to the output video file.
"""
return cv2.VideoWriter(
filename=path,
fourcc=cv2.VideoWriter_fourcc(*"mp4v"),
fps=float(30),
frameSize=(self.display_width, self.display_height),
isColor=True,
)
def get_input_clip(self, keyframe_idx):
"""
Get input clip from the video/folder of images for a given
keyframe index.
Args:
keyframe_idx (int): index of the current keyframe.
Returns:
clip (list of tensors): formatted input clip(s) corresponding to
the current keyframe.
"""
seq = get_sequence(
keyframe_idx,
self.seq_length // 2,
self.cfg.DATA.SAMPLING_RATE,
self.total_frames,
)
clip = []
for frame_idx in seq:
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
was_read, frame = self.cap.read()
if was_read:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = scale(self.cfg.DATA.TEST_CROP_SIZE, frame)
clip.append(frame)
else:
logger.error(
"Unable to read frame. Duplicating previous frame."
)
clip.append(clip[-1])
clip = process_cv2_inputs(clip, self.cfg)
return clip
def get_predictions(self):
"""
Predict and append prediction results to each box in each keyframe in
`self.pred_boxes` dictionary.
"""
# Set random seed from configs.
np.random.seed(self.cfg.RNG_SEED)
torch.manual_seed(self.cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(self.cfg.OUTPUT_DIR)
# Print config.
logger.info("Run demo with config:")
logger.info(self.cfg)
assert (
self.cfg.NUM_GPUS <= 1
), "Cannot run demo visualization on multiple GPUs."
# Build the video model and print model statistics.
model = build_model(self.cfg)
model.eval()
logger.info("Start loading model info")
misc.log_model_info(model, self.cfg, use_train_input=False)
logger.info("Start loading model weights")
cu.load_test_checkpoint(self.cfg, model)
logger.info("Finish loading model weights")
logger.info("Start making predictions for precomputed boxes.")
for keyframe_idx, boxes_and_labels in tqdm.tqdm(
self.pred_boxes.items()
):
inputs = self.get_input_clip(keyframe_idx)
boxes = boxes_and_labels[0]
boxes = torch.from_numpy(np.array(boxes)).float()
box_transformed = scale_boxes(
self.cfg.DATA.TEST_CROP_SIZE,
boxes,
self.display_height,
self.display_width,
)
# Pad frame index for each box.
box_inputs = torch.cat(
[
torch.full((box_transformed.shape[0], 1), float(0)),
box_transformed,
],
axis=1,
)
if self.cfg.NUM_GPUS:
# Transfer the data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
box_inputs = box_inputs.cuda()
preds = model(inputs, box_inputs)
preds = preds.detach()
if self.cfg.NUM_GPUS:
preds = preds.cpu()
boxes_and_labels[1] = preds
def draw_video(self):
"""
Draw predicted and ground-truth (if provided) results on the video/folder of images.
Write the visualized result to a video output file.
"""
all_boxes = merge_pred_gt_boxes(self.pred_boxes, self.gt_boxes)
common_classes = (
self.cfg.DEMO.COMMON_CLASS_NAMES
if len(self.cfg.DEMO.LABEL_FILE_PATH) != 0
else None
)
video_vis = VideoVisualizer(
num_classes=self.cfg.MODEL.NUM_CLASSES,
class_names_path=self.cfg.DEMO.LABEL_FILE_PATH,
top_k=self.cfg.TENSORBOARD.MODEL_VIS.TOPK_PREDS,
thres=self.cfg.DEMO.COMMON_CLASS_THRES,
lower_thres=self.cfg.DEMO.UNCOMMON_CLASS_THRES,
common_class_names=common_classes,
colormap=self.cfg.TENSORBOARD.MODEL_VIS.COLORMAP,
mode=self.cfg.DEMO.VIS_MODE,
)
all_keys = sorted(all_boxes.keys())
# Draw around the keyframe for 2/10 of the sequence length.
# This is chosen using heuristics.
draw_range = [
self.seq_length // 2 - self.seq_length // 10,
self.seq_length // 2 + self.seq_length // 10,
]
draw_range_repeat = [
draw_range[0],
(draw_range[1] - draw_range[0]) * self.no_frames_repeat
+ draw_range[0],
]
prev_buffer = []
prev_end_idx = 0
logger.info("Start Visualization...")
for keyframe_idx in tqdm.tqdm(all_keys):
pred_gt_boxes = all_boxes[keyframe_idx]
# Find the starting index of the clip. If start_idx exceeds the beginning
# of the video, we only choose valid frame from index 0.
start_idx = max(0, keyframe_idx - self.seq_length // 2)
# Number of frames from the start of the current clip and the
# end of the previous clip.
dist = start_idx - prev_end_idx
# If there are unwritten frames in between clips.
if dist >= 0:
# Get the frames in between previous clip and current clip.
frames = self._get_frame_range(prev_end_idx, dist)
# We keep a buffer of frames for overlapping visualization.
# Write these to the output file.
for frame in prev_buffer:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
self.display(frame)
# Write them to output file without any visualization
# since they don't have any corresponding keyframes.
for frame in frames:
self.display(frame)
prev_buffer = []
num_new_frames = self.seq_length
# If there are overlapping frames in between clips.
elif dist < 0:
# Flush all ready frames.
for frame in prev_buffer[:dist]:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
self.display(frame)
prev_buffer = prev_buffer[dist:]
num_new_frames = self.seq_length + dist
# Obtain new frames for the current clip from the input video file.
new_frames = self._get_frame_range(
max(start_idx, prev_end_idx), num_new_frames
)
new_frames = [
cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in new_frames
]
clip = prev_buffer + new_frames
# Calculate the end of this clip. This will be `prev_end_idx` for the
# next iteration.
prev_end_idx = max(start_idx, prev_end_idx) + len(new_frames)
# For each precomputed or gt boxes.
for i, boxes in enumerate(pred_gt_boxes):
if i == 0:
repeat = self.no_frames_repeat
current_draw_range = draw_range
else:
repeat = 1
current_draw_range = draw_range_repeat
# Make sure draw range does not fall out of end of clip.
current_draw_range[1] = min(
current_draw_range[1], len(clip) - 1
)
ground_truth = boxes[0]
bboxes = boxes[1]
label = boxes[2]
# Draw predictions.
clip = video_vis.draw_clip_range(
clip,
label,
bboxes=torch.Tensor(bboxes),
ground_truth=ground_truth,
draw_range=current_draw_range,
repeat_frame=repeat,
)
# Store the current clip as buffer.
prev_buffer = clip
# Write the remaining buffer to output file.
for frame in prev_buffer:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
self.display(frame)
# If we still have some remaining frames in the input file,
# write those to the output file as well.
if prev_end_idx < self.total_frames:
dist = self.total_frames - prev_end_idx
remaining_clip = self._get_frame_range(prev_end_idx, dist)
for frame in remaining_clip:
self.display(frame)
def __call__(self):
self.get_predictions()
self.draw_video()
def display(self, frame):
"""
Either display a single frame (BGR image) to a window or write to
an output file if output path is provided.
"""
if self.output_file is None:
cv2.imshow("SlowFast", frame)
else:
self.output_file.write(frame)
def _get_keyframe_clip(self, keyframe_idx):
"""
Return a clip corresponding to a keyframe index for visualization.
Args:
keyframe_idx (int): keyframe index.
"""
start_idx = max(0, keyframe_idx - self.seq_length // 2)
clip = self._get_frame_range(start_idx, self.seq_length)
return clip
def _get_frame_range(self, start_idx, num_frames):
"""
Return a clip of `num_frames` frames starting from `start_idx`. If not enough frames
from `start_idx`, return the remaining frames from `start_idx`.
Args:
start_idx (int): starting idx.
num_frames (int): number of frames in the returned clip.
"""
was_read = True
assert start_idx < self.total_frames, "Start index out of range."
self.cap.set(cv2.CAP_PROP_POS_FRAMES, start_idx)
all_frames = []
for _ in range(num_frames):
was_read, frame = self.cap.read()
if was_read:
all_frames.append(frame)
else:
break
return all_frames
def merge_pred_gt_boxes(pred_dict, gt_dict=None):
"""
Merge data from precomputed and ground-truth boxes dictionaries.
Args:
pred_dict (dict): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is
a list of labels for `boxes[i]`.
gt_dict (Optional[dict]): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is
a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes.
Returns:
merged_dict (dict): merged dictionary from `pred_dict` and `gt_dict` if given.
It is a dict which maps from `frame_idx` to a list of [`is_gt`, `boxes`, `labels`],
where `is_gt` is a boolean indicate whether the `boxes` and `labels` are ground-truth.
"""
merged_dict = {}
for key, item in pred_dict.items():
merged_dict[key] = [[False, item[0], item[1]]]
if gt_dict is not None:
for key, item in gt_dict.items():
if merged_dict.get(key) is None:
merged_dict[key] = [[True, item[0], item[1]]]
else:
merged_dict[key].append([True, item[0], item[1]])
return merged_dict
def load_boxes_labels(cfg, video_name, fps, img_width, img_height):
"""
Loading boxes and labels from AVA bounding boxes csv files.
Args:
cfg (CfgNode): config.
video_name (str): name of the given video.
fps (int or float): frames per second of the input video/images folder.
img_width (int): width of images in input video/images folder.
img_height (int): height of images in input video/images folder.
Returns:
preds_boxes (dict): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is
a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes.
gt_boxes (dict): if cfg.DEMO.GT_BOXES is given, return similar dict as
all_pred_boxes but for ground-truth boxes.
"""
starting_second = cfg.DEMO.STARTING_SECOND
def sec_to_frameidx(sec):
return (sec - starting_second) * fps
def process_bboxes_dict(dictionary):
"""
Replace all `keyframe_sec` in `dictionary` with `keyframe_idx` and
merge all [`box_coordinate`, `box_labels`] pairs into
[`all_boxes_coordinates`, `all_boxes_labels`] for each `keyframe_idx`.
Args:
dictionary (dict): a dictionary which maps `frame_sec` to a list of `box`.
Each `box` is a [`box_coord`, `box_labels`] where `box_coord` is the
coordinates of box and 'box_labels` are the corresponding
labels for the box.
Returns:
new_dict (dict): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` in `boxes` is a list of 4 box coordinates. `labels[i]`
is a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes.
"""
# Replace all keyframe_sec with keyframe_idx.
new_dict = {}
for keyframe_sec, boxes_and_labels in dictionary.items():
# Ignore keyframes with no boxes
if len(boxes_and_labels) == 0:
continue
keyframe_idx = sec_to_frameidx(keyframe_sec)
boxes, labels = list(zip(*boxes_and_labels))
# Shift labels from [1, n_classes] to [0, n_classes - 1].
labels = [[i - 1 for i in box_label] for box_label in labels]
boxes = np.array(boxes)
boxes[:, [0, 2]] *= img_width
boxes[:, [1, 3]] *= img_height
new_dict[keyframe_idx] = [boxes.tolist(), list(labels)]
return new_dict
preds_boxes_path = cfg.DEMO.PREDS_BOXES
gt_boxes_path = cfg.DEMO.GT_BOXES
preds_boxes, _, _ = parse_bboxes_file(
ann_filenames=[preds_boxes_path],
ann_is_gt_box=[False],
detect_thresh=cfg.AVA.DETECTION_SCORE_THRESH,
boxes_sample_rate=1,
)
preds_boxes = preds_boxes[video_name]
if gt_boxes_path == "":
gt_boxes = None
else:
gt_boxes, _, _ = parse_bboxes_file(
ann_filenames=[gt_boxes_path],
ann_is_gt_box=[True],
detect_thresh=cfg.AVA.DETECTION_SCORE_THRESH,
boxes_sample_rate=1,
)
gt_boxes = gt_boxes[video_name]
preds_boxes = process_bboxes_dict(preds_boxes)
if gt_boxes is not None:
gt_boxes = process_bboxes_dict(gt_boxes)
return preds_boxes, gt_boxes
| 17,969 | 37.397436 | 98 | py |
STTS | STTS-main/MViT/slowfast/visualization/tensorboard_vis.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging as log
import math
import os
import matplotlib.pyplot as plt
import torch
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
import slowfast.utils.logging as logging
import slowfast.visualization.utils as vis_utils
from slowfast.utils.misc import get_class_names
logger = logging.get_logger(__name__)
log.getLogger("matplotlib").setLevel(log.ERROR)
class TensorboardWriter(object):
"""
Helper class to log information to Tensorboard.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# class_names: list of class names.
# cm_subset_classes: a list of class ids -- a user-specified subset.
# parent_map: dictionary where key is the parent class name and
# value is a list of ids of its children classes.
# hist_subset_classes: a list of class ids -- user-specified to plot histograms.
(
self.class_names,
self.cm_subset_classes,
self.parent_map,
self.hist_subset_classes,
) = (None, None, None, None)
self.cfg = cfg
self.cm_figsize = cfg.TENSORBOARD.CONFUSION_MATRIX.FIGSIZE
self.hist_figsize = cfg.TENSORBOARD.HISTOGRAM.FIGSIZE
if cfg.TENSORBOARD.LOG_DIR == "":
log_dir = os.path.join(
cfg.OUTPUT_DIR, "runs-{}".format(cfg.TRAIN.DATASET)
)
else:
log_dir = os.path.join(cfg.OUTPUT_DIR, cfg.TENSORBOARD.LOG_DIR)
self.writer = SummaryWriter(log_dir=log_dir)
logger.info(
"To see logged results in Tensorboard, please launch using the command \
`tensorboard --port=<port-number> --logdir {}`".format(
log_dir
)
)
if cfg.TENSORBOARD.CLASS_NAMES_PATH != "":
if cfg.DETECTION.ENABLE:
logger.info(
"Plotting confusion matrix is currently \
not supported for detection."
)
(
self.class_names,
self.parent_map,
self.cm_subset_classes,
) = get_class_names(
cfg.TENSORBOARD.CLASS_NAMES_PATH,
cfg.TENSORBOARD.CATEGORIES_PATH,
cfg.TENSORBOARD.CONFUSION_MATRIX.SUBSET_PATH,
)
if cfg.TENSORBOARD.HISTOGRAM.ENABLE:
if cfg.DETECTION.ENABLE:
logger.info(
"Plotting histogram is not currently \
supported for detection tasks."
)
if cfg.TENSORBOARD.HISTOGRAM.SUBSET_PATH != "":
_, _, self.hist_subset_classes = get_class_names(
cfg.TENSORBOARD.CLASS_NAMES_PATH,
None,
cfg.TENSORBOARD.HISTOGRAM.SUBSET_PATH,
)
def add_scalars(self, data_dict, global_step=None):
"""
Add multiple scalars to Tensorboard logs.
Args:
data_dict (dict): key is a string specifying the tag of value.
global_step (Optinal[int]): Global step value to record.
"""
if self.writer is not None:
for key, item in data_dict.items():
self.writer.add_scalar(key, item, global_step)
def plot_eval(self, preds, labels, global_step=None):
"""
Plot confusion matrices and histograms for eval/test set.
Args:
preds (tensor or list of tensors): list of predictions.
labels (tensor or list of tensors): list of labels.
global step (Optional[int]): current step in eval/test.
"""
if not self.cfg.DETECTION.ENABLE:
cmtx = None
if self.cfg.TENSORBOARD.CONFUSION_MATRIX.ENABLE:
cmtx = vis_utils.get_confusion_matrix(
preds, labels, self.cfg.MODEL.NUM_CLASSES
)
# Add full confusion matrix.
add_confusion_matrix(
self.writer,
cmtx,
self.cfg.MODEL.NUM_CLASSES,
global_step=global_step,
class_names=self.class_names,
figsize=self.cm_figsize,
)
# If a list of subset is provided, plot confusion matrix subset.
if self.cm_subset_classes is not None:
add_confusion_matrix(
self.writer,
cmtx,
self.cfg.MODEL.NUM_CLASSES,
global_step=global_step,
subset_ids=self.cm_subset_classes,
class_names=self.class_names,
tag="Confusion Matrix Subset",
figsize=self.cm_figsize,
)
# If a parent-child classes mapping is provided, plot confusion
# matrices grouped by parent classes.
if self.parent_map is not None:
# Get list of tags (parent categories names) and their children.
for parent_class, children_ls in self.parent_map.items():
tag = (
"Confusion Matrices Grouped by Parent Classes/"
+ parent_class
)
add_confusion_matrix(
self.writer,
cmtx,
self.cfg.MODEL.NUM_CLASSES,
global_step=global_step,
subset_ids=children_ls,
class_names=self.class_names,
tag=tag,
figsize=self.cm_figsize,
)
if self.cfg.TENSORBOARD.HISTOGRAM.ENABLE:
if cmtx is None:
cmtx = vis_utils.get_confusion_matrix(
preds, labels, self.cfg.MODEL.NUM_CLASSES
)
plot_hist(
self.writer,
cmtx,
self.cfg.MODEL.NUM_CLASSES,
self.cfg.TENSORBOARD.HISTOGRAM.TOPK,
global_step=global_step,
subset_ids=self.hist_subset_classes,
class_names=self.class_names,
figsize=self.hist_figsize,
)
def add_video(self, vid_tensor, tag="Video Input", global_step=None, fps=4):
"""
Add input to tensorboard SummaryWriter as a video.
Args:
vid_tensor (tensor): shape of (B, T, C, H, W). Values should lie
[0, 255] for type uint8 or [0, 1] for type float.
tag (Optional[str]): name of the video.
global_step(Optional[int]): current step.
fps (int): frames per second.
"""
self.writer.add_video(tag, vid_tensor, global_step=global_step, fps=fps)
def plot_weights_and_activations(
self,
weight_activation_dict,
tag="",
normalize=False,
global_step=None,
batch_idx=None,
indexing_dict=None,
heat_map=True,
):
"""
Visualize weights/ activations tensors to Tensorboard.
Args:
weight_activation_dict (dict[str, tensor]): a dictionary of the pair {layer_name: tensor},
where layer_name is a string and tensor is the weights/activations of
the layer we want to visualize.
tag (Optional[str]): name of the video.
normalize (bool): If True, the tensor is normalized. (Default to False)
global_step(Optional[int]): current step.
batch_idx (Optional[int]): current batch index to visualize. If None,
visualize the entire batch.
indexing_dict (Optional[dict]): a dictionary of the {layer_name: indexing}.
where indexing is numpy-like fancy indexing.
heatmap (bool): whether to add heatmap to the weights/ activations.
"""
for name, array in weight_activation_dict.items():
if batch_idx is None:
# Select all items in the batch if batch_idx is not provided.
batch_idx = list(range(array.shape[0]))
if indexing_dict is not None:
fancy_indexing = indexing_dict[name]
fancy_indexing = (batch_idx,) + fancy_indexing
array = array[fancy_indexing]
else:
array = array[batch_idx]
add_ndim_array(
self.writer,
array,
tag + name,
normalize=normalize,
global_step=global_step,
heat_map=heat_map,
)
def flush(self):
self.writer.flush()
def close(self):
self.writer.flush()
self.writer.close()
def add_confusion_matrix(
writer,
cmtx,
num_classes,
global_step=None,
subset_ids=None,
class_names=None,
tag="Confusion Matrix",
figsize=None,
):
"""
Calculate and plot confusion matrix to a SummaryWriter.
Args:
writer (SummaryWriter): the SummaryWriter to write the matrix to.
cmtx (ndarray): confusion matrix.
num_classes (int): total number of classes.
global_step (Optional[int]): current step.
subset_ids (list of ints): a list of label indices to keep.
class_names (list of strs, optional): a list of all class names.
tag (str or list of strs): name(s) of the confusion matrix image.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
"""
if subset_ids is None or len(subset_ids) != 0:
# If class names are not provided, use class indices as class names.
if class_names is None:
class_names = [str(i) for i in range(num_classes)]
# If subset is not provided, take every classes.
if subset_ids is None:
subset_ids = list(range(num_classes))
sub_cmtx = cmtx[subset_ids, :][:, subset_ids]
sub_names = [class_names[j] for j in subset_ids]
sub_cmtx = vis_utils.plot_confusion_matrix(
sub_cmtx,
num_classes=len(subset_ids),
class_names=sub_names,
figsize=figsize,
)
# Add the confusion matrix image to writer.
writer.add_figure(tag=tag, figure=sub_cmtx, global_step=global_step)
def plot_hist(
writer,
cmtx,
num_classes,
k=10,
global_step=None,
subset_ids=None,
class_names=None,
figsize=None,
):
"""
Given all predictions and all true labels, plot histograms of top-k most
frequently predicted classes for each true class.
Args:
writer (SummaryWriter object): a tensorboard SummaryWriter object.
cmtx (ndarray): confusion matrix.
num_classes (int): total number of classes.
k (int): top k to plot histograms.
global_step (Optional[int]): current step.
subset_ids (list of ints, optional): class indices to plot histogram.
mapping (list of strings): names of all classes.
figsize (Optional[float, float]): the figure size of the confusion matrix.
If None, default to [6.4, 4.8].
"""
if subset_ids is None or len(subset_ids) != 0:
if subset_ids is None:
subset_ids = set(range(num_classes))
else:
subset_ids = set(subset_ids)
# If class names are not provided, use their indices as names.
if class_names is None:
class_names = list(range(num_classes))
for i in subset_ids:
pred = cmtx[i]
hist = vis_utils.plot_topk_histogram(
class_names[i],
torch.Tensor(pred),
k,
class_names,
figsize=figsize,
)
writer.add_figure(
tag="Top {} predictions by classes/{}".format(
k, class_names[i]
),
figure=hist,
global_step=global_step,
)
def add_ndim_array(
writer,
array,
name,
nrow=None,
normalize=False,
global_step=None,
heat_map=True,
):
"""
Visualize and add tensors of n-dimentionals to a Tensorboard SummaryWriter. Tensors
will be visualized as a 2D grid image.
Args:
writer (SummaryWriter): Tensorboard SummaryWriter.
array (tensor): tensor to visualize.
name (str): name of the tensor.
nrow (Optional[int]): number of 2D filters in each row in the grid image.
normalize (bool): whether to normalize when we have multiple 2D filters.
Default to False.
global_step (Optional[int]): current step.
heat_map (bool): whether to add heat map to 2D each 2D filters in array.
"""
if array is not None and array.ndim != 0:
if array.ndim == 1:
reshaped_array = array.unsqueeze(0)
if nrow is None:
nrow = int(math.sqrt(reshaped_array.size()[1]))
reshaped_array = reshaped_array.view(-1, nrow)
if heat_map:
reshaped_array = add_heatmap(reshaped_array)
writer.add_image(
name,
reshaped_array,
global_step=global_step,
dataformats="CHW",
)
else:
writer.add_image(
name,
reshaped_array,
global_step=global_step,
dataformats="HW",
)
elif array.ndim == 2:
reshaped_array = array
if heat_map:
heatmap = add_heatmap(reshaped_array)
writer.add_image(
name, heatmap, global_step=global_step, dataformats="CHW"
)
else:
writer.add_image(
name,
reshaped_array,
global_step=global_step,
dataformats="HW",
)
else:
last2_dims = array.size()[-2:]
reshaped_array = array.view(-1, *last2_dims)
if heat_map:
reshaped_array = [
add_heatmap(array_2d).unsqueeze(0)
for array_2d in reshaped_array
]
reshaped_array = torch.cat(reshaped_array, dim=0)
else:
reshaped_array = reshaped_array.unsqueeze(1)
if nrow is None:
nrow = int(math.sqrt(reshaped_array.size()[0]))
img_grid = make_grid(
reshaped_array, nrow, padding=1, normalize=normalize
)
writer.add_image(name, img_grid, global_step=global_step)
def add_heatmap(tensor):
"""
Add heatmap to 2D tensor.
Args:
tensor (tensor): a 2D tensor. Tensor value must be in [0..1] range.
Returns:
heatmap (tensor): a 3D tensor. Result of applying heatmap to the 2D tensor.
"""
assert tensor.ndim == 2, "Only support 2D tensors."
# Move tensor to cpu if necessary.
if tensor.device != torch.device("cpu"):
arr = tensor.cpu()
else:
arr = tensor
arr = arr.numpy()
# Get the color map by name.
cm = plt.get_cmap("viridis")
heatmap = cm(arr)
heatmap = heatmap[:, :, :3]
# Convert (H, W, C) to (C, H, W)
heatmap = torch.Tensor(heatmap).permute(2, 0, 1)
return heatmap
| 16,010 | 36.234884 | 102 | py |
STTS | STTS-main/MViT/slowfast/visualization/video_visualizer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import logging as log
import numpy as np
import matplotlib.pyplot as plt
import torch
from detectron2.utils.visualizer import Visualizer
import slowfast.utils.logging as logging
from slowfast.utils.misc import get_class_names
logger = logging.get_logger(__name__)
log.getLogger("matplotlib").setLevel(log.ERROR)
def _create_text_labels(classes, scores, class_names, ground_truth=False):
"""
Create text labels.
Args:
classes (list[int]): a list of class ids for each example.
scores (list[float] or None): list of scores for each example.
class_names (list[str]): a list of class names, ordered by their ids.
ground_truth (bool): whether the labels are ground truth.
Returns:
labels (list[str]): formatted text labels.
"""
try:
labels = [class_names[i] for i in classes]
except IndexError:
logger.error("Class indices get out of range: {}".format(classes))
return None
if ground_truth:
labels = ["[{}] {}".format("GT", label) for label in labels]
elif scores is not None:
assert len(classes) == len(scores)
labels = [
"[{:.2f}] {}".format(s, label) for s, label in zip(scores, labels)
]
return labels
class ImgVisualizer(Visualizer):
def __init__(self, img_rgb, meta, **kwargs):
"""
See https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/visualizer.py
for more details.
Args:
img_rgb: a tensor or numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
meta (MetadataCatalog): image metadata.
See https://github.com/facebookresearch/detectron2/blob/81d5a87763bfc71a492b5be89b74179bd7492f6b/detectron2/data/catalog.py#L90
"""
super(ImgVisualizer, self).__init__(img_rgb, meta, **kwargs)
def draw_text(
self,
text,
position,
*,
font_size=None,
color="w",
horizontal_alignment="center",
vertical_alignment="bottom",
box_facecolor="black",
alpha=0.5,
):
"""
Draw text at the specified position.
Args:
text (str): the text to draw on image.
position (list of 2 ints): the x,y coordinate to place the text.
font_size (Optional[int]): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color (str): color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`.
vertical_alignment (str): see `matplotlib.text.Text`.
box_facecolor (str): color of the box wrapped around the text. Refer to
`matplotlib.colors` for full list of formats that are accepted.
alpha (float): transparency level of the box.
"""
if not font_size:
font_size = self._default_font_size
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="monospace",
bbox={
"facecolor": box_facecolor,
"alpha": alpha,
"pad": 0.7,
"edgecolor": "none",
},
verticalalignment=vertical_alignment,
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
)
def draw_multiple_text(
self,
text_ls,
box_coordinate,
*,
top_corner=True,
font_size=None,
color="w",
box_facecolors="black",
alpha=0.5,
):
"""
Draw a list of text labels for some bounding box on the image.
Args:
text_ls (list of strings): a list of text labels.
box_coordinate (tensor): shape (4,). The (x_left, y_top, x_right, y_bottom)
coordinates of the box.
top_corner (bool): If True, draw the text labels at (x_left, y_top) of the box.
Else, draw labels at (x_left, y_bottom).
font_size (Optional[int]): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color (str): color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
box_facecolors (str): colors of the box wrapped around the text. Refer to
`matplotlib.colors` for full list of formats that are accepted.
alpha (float): transparency level of the box.
"""
if not isinstance(box_facecolors, list):
box_facecolors = [box_facecolors] * len(text_ls)
assert len(box_facecolors) == len(
text_ls
), "Number of colors provided is not equal to the number of text labels."
if not font_size:
font_size = self._default_font_size
text_box_width = font_size + font_size // 2
# If the texts does not fit in the assigned location,
# we split the text and draw it in another place.
if top_corner:
num_text_split = self._align_y_top(
box_coordinate, len(text_ls), text_box_width
)
y_corner = 1
else:
num_text_split = len(text_ls) - self._align_y_bottom(
box_coordinate, len(text_ls), text_box_width
)
y_corner = 3
text_color_sorted = sorted(
zip(text_ls, box_facecolors), key=lambda x: x[0], reverse=True
)
if len(text_color_sorted) != 0:
text_ls, box_facecolors = zip(*text_color_sorted)
else:
text_ls, box_facecolors = [], []
text_ls, box_facecolors = list(text_ls), list(box_facecolors)
self.draw_multiple_text_upward(
text_ls[:num_text_split][::-1],
box_coordinate,
y_corner=y_corner,
font_size=font_size,
color=color,
box_facecolors=box_facecolors[:num_text_split][::-1],
alpha=alpha,
)
self.draw_multiple_text_downward(
text_ls[num_text_split:],
box_coordinate,
y_corner=y_corner,
font_size=font_size,
color=color,
box_facecolors=box_facecolors[num_text_split:],
alpha=alpha,
)
def draw_multiple_text_upward(
self,
text_ls,
box_coordinate,
*,
y_corner=1,
font_size=None,
color="w",
box_facecolors="black",
alpha=0.5,
):
"""
Draw a list of text labels for some bounding box on the image in upward direction.
The next text label will be on top of the previous one.
Args:
text_ls (list of strings): a list of text labels.
box_coordinate (tensor): shape (4,). The (x_left, y_top, x_right, y_bottom)
coordinates of the box.
y_corner (int): Value of either 1 or 3. Indicate the index of the y-coordinate of
the box to draw labels around.
font_size (Optional[int]): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color (str): color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
box_facecolors (str or list of strs): colors of the box wrapped around the text. Refer to
`matplotlib.colors` for full list of formats that are accepted.
alpha (float): transparency level of the box.
"""
if not isinstance(box_facecolors, list):
box_facecolors = [box_facecolors] * len(text_ls)
assert len(box_facecolors) == len(
text_ls
), "Number of colors provided is not equal to the number of text labels."
assert y_corner in [1, 3], "Y_corner must be either 1 or 3"
if not font_size:
font_size = self._default_font_size
x, horizontal_alignment = self._align_x_coordinate(box_coordinate)
y = box_coordinate[y_corner].item()
for i, text in enumerate(text_ls):
self.draw_text(
text,
(x, y),
font_size=font_size,
color=color,
horizontal_alignment=horizontal_alignment,
vertical_alignment="bottom",
box_facecolor=box_facecolors[i],
alpha=alpha,
)
y -= font_size + font_size // 2
def draw_multiple_text_downward(
self,
text_ls,
box_coordinate,
*,
y_corner=1,
font_size=None,
color="w",
box_facecolors="black",
alpha=0.5,
):
"""
Draw a list of text labels for some bounding box on the image in downward direction.
The next text label will be below the previous one.
Args:
text_ls (list of strings): a list of text labels.
box_coordinate (tensor): shape (4,). The (x_left, y_top, x_right, y_bottom)
coordinates of the box.
y_corner (int): Value of either 1 or 3. Indicate the index of the y-coordinate of
the box to draw labels around.
font_size (Optional[int]): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color (str): color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
box_facecolors (str): colors of the box wrapped around the text. Refer to
`matplotlib.colors` for full list of formats that are accepted.
alpha (float): transparency level of the box.
"""
if not isinstance(box_facecolors, list):
box_facecolors = [box_facecolors] * len(text_ls)
assert len(box_facecolors) == len(
text_ls
), "Number of colors provided is not equal to the number of text labels."
assert y_corner in [1, 3], "Y_corner must be either 1 or 3"
if not font_size:
font_size = self._default_font_size
x, horizontal_alignment = self._align_x_coordinate(box_coordinate)
y = box_coordinate[y_corner].item()
for i, text in enumerate(text_ls):
self.draw_text(
text,
(x, y),
font_size=font_size,
color=color,
horizontal_alignment=horizontal_alignment,
vertical_alignment="top",
box_facecolor=box_facecolors[i],
alpha=alpha,
)
y += font_size + font_size // 2
def _align_x_coordinate(self, box_coordinate):
"""
Choose an x-coordinate from the box to make sure the text label
does not go out of frames. By default, the left x-coordinate is
chosen and text is aligned left. If the box is too close to the
right side of the image, then the right x-coordinate is chosen
instead and the text is aligned right.
Args:
box_coordinate (array-like): shape (4,). The (x_left, y_top, x_right, y_bottom)
coordinates of the box.
Returns:
x_coordinate (float): the chosen x-coordinate.
alignment (str): whether to align left or right.
"""
# If the x-coordinate is greater than 5/6 of the image width,
# then we align test to the right of the box. This is
# chosen by heuristics.
if box_coordinate[0] > (self.output.width * 5) // 6:
return box_coordinate[2], "right"
return box_coordinate[0], "left"
def _align_y_top(self, box_coordinate, num_text, textbox_width):
"""
Calculate the number of text labels to plot on top of the box
without going out of frames.
Args:
box_coordinate (array-like): shape (4,). The (x_left, y_top, x_right, y_bottom)
coordinates of the box.
num_text (int): the number of text labels to plot.
textbox_width (float): the width of the box wrapped around text label.
"""
dist_to_top = box_coordinate[1]
num_text_top = dist_to_top // textbox_width
if isinstance(num_text_top, torch.Tensor):
num_text_top = int(num_text_top.item())
return min(num_text, num_text_top)
def _align_y_bottom(self, box_coordinate, num_text, textbox_width):
"""
Calculate the number of text labels to plot at the bottom of the box
without going out of frames.
Args:
box_coordinate (array-like): shape (4,). The (x_left, y_top, x_right, y_bottom)
coordinates of the box.
num_text (int): the number of text labels to plot.
textbox_width (float): the width of the box wrapped around text label.
"""
dist_to_bottom = self.output.height - box_coordinate[3]
num_text_bottom = dist_to_bottom // textbox_width
if isinstance(num_text_bottom, torch.Tensor):
num_text_bottom = int(num_text_bottom.item())
return min(num_text, num_text_bottom)
class VideoVisualizer:
def __init__(
self,
num_classes,
class_names_path,
top_k=1,
colormap="rainbow",
thres=0.7,
lower_thres=0.3,
common_class_names=None,
mode="top-k",
):
"""
Args:
num_classes (int): total number of classes.
class_names_path (str): path to json file that maps class names to ids.
Must be in the format {classname: id}.
top_k (int): number of top predicted classes to plot.
colormap (str): the colormap to choose color for class labels from.
See https://matplotlib.org/tutorials/colors/colormaps.html
thres (float): threshold for picking predicted classes to visualize.
lower_thres (Optional[float]): If `common_class_names` if given,
this `lower_thres` will be applied to uncommon classes and
`thres` will be applied to classes in `common_class_names`.
common_class_names (Optional[list of str(s)]): list of common class names
to apply `thres`. Class names not included in `common_class_names` will
have `lower_thres` as a threshold. If None, all classes will have `thres` as a threshold.
This is helpful for model trained on highly imbalanced dataset.
mode (str): Supported modes are {"top-k", "thres"}.
This is used for choosing predictions for visualization.
"""
assert mode in ["top-k", "thres"], "Mode {} is not supported.".format(
mode
)
self.mode = mode
self.num_classes = num_classes
self.class_names, _, _ = get_class_names(class_names_path, None, None)
self.top_k = top_k
self.thres = thres
self.lower_thres = lower_thres
if mode == "thres":
self._get_thres_array(common_class_names=common_class_names)
self.color_map = plt.get_cmap(colormap)
def _get_color(self, class_id):
"""
Get color for a class id.
Args:
class_id (int): class id.
"""
return self.color_map(class_id / self.num_classes)[:3]
def draw_one_frame(
self,
frame,
preds,
bboxes=None,
alpha=0.5,
text_alpha=0.7,
ground_truth=False,
):
"""
Draw labels and bouding boxes for one image. By default, predicted labels are drawn in
the top left corner of the image or corresponding bounding boxes. For ground truth labels
(setting True for ground_truth flag), labels will be drawn in the bottom left corner.
Args:
frame (array-like): a tensor or numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
preds (tensor or list): If ground_truth is False, provide a float tensor of shape (num_boxes, num_classes)
that contains all of the confidence scores of the model.
For recognition task, input shape can be (num_classes,). To plot true label (ground_truth is True),
preds is a list contains int32 of the shape (num_boxes, true_class_ids) or (true_class_ids,).
bboxes (Optional[tensor]): shape (num_boxes, 4) that contains the coordinates of the bounding boxes.
alpha (Optional[float]): transparency level of the bounding boxes.
text_alpha (Optional[float]): transparency level of the box wrapped around text labels.
ground_truth (bool): whether the prodived bounding boxes are ground-truth.
"""
if isinstance(preds, torch.Tensor):
if preds.ndim == 1:
preds = preds.unsqueeze(0)
n_instances = preds.shape[0]
elif isinstance(preds, list):
n_instances = len(preds)
else:
logger.error("Unsupported type of prediction input.")
return
if ground_truth:
top_scores, top_classes = [None] * n_instances, preds
elif self.mode == "top-k":
top_scores, top_classes = torch.topk(preds, k=self.top_k)
top_scores, top_classes = top_scores.tolist(), top_classes.tolist()
elif self.mode == "thres":
top_scores, top_classes = [], []
for pred in preds:
mask = pred >= self.thres
top_scores.append(pred[mask].tolist())
top_class = torch.squeeze(torch.nonzero(mask), dim=-1).tolist()
top_classes.append(top_class)
# Create labels top k predicted classes with their scores.
text_labels = []
for i in range(n_instances):
text_labels.append(
_create_text_labels(
top_classes[i],
top_scores[i],
self.class_names,
ground_truth=ground_truth,
)
)
frame_visualizer = ImgVisualizer(frame, meta=None)
font_size = min(
max(np.sqrt(frame.shape[0] * frame.shape[1]) // 35, 5), 9
)
top_corner = not ground_truth
if bboxes is not None:
assert len(preds) == len(
bboxes
), "Encounter {} predictions and {} bounding boxes".format(
len(preds), len(bboxes)
)
for i, box in enumerate(bboxes):
text = text_labels[i]
pred_class = top_classes[i]
colors = [self._get_color(pred) for pred in pred_class]
box_color = "r" if ground_truth else "g"
line_style = "--" if ground_truth else "-."
frame_visualizer.draw_box(
box,
alpha=alpha,
edge_color=box_color,
line_style=line_style,
)
frame_visualizer.draw_multiple_text(
text,
box,
top_corner=top_corner,
font_size=font_size,
box_facecolors=colors,
alpha=text_alpha,
)
else:
text = text_labels[0]
pred_class = top_classes[0]
colors = [self._get_color(pred) for pred in pred_class]
frame_visualizer.draw_multiple_text(
text,
torch.Tensor([0, 5, frame.shape[1], frame.shape[0] - 5]),
top_corner=top_corner,
font_size=font_size,
box_facecolors=colors,
alpha=text_alpha,
)
return frame_visualizer.output.get_image()
def draw_clip_range(
self,
frames,
preds,
bboxes=None,
text_alpha=0.5,
ground_truth=False,
keyframe_idx=None,
draw_range=None,
repeat_frame=1,
):
"""
Draw predicted labels or ground truth classes to clip. Draw bouding boxes to clip
if bboxes is provided. Boxes will gradually fade in and out the clip, centered around
the clip's central frame, within the provided `draw_range`.
Args:
frames (array-like): video data in the shape (T, H, W, C).
preds (tensor): a tensor of shape (num_boxes, num_classes) that contains all of the confidence scores
of the model. For recognition task or for ground_truth labels, input shape can be (num_classes,).
bboxes (Optional[tensor]): shape (num_boxes, 4) that contains the coordinates of the bounding boxes.
text_alpha (float): transparency label of the box wrapped around text labels.
ground_truth (bool): whether the prodived bounding boxes are ground-truth.
keyframe_idx (int): the index of keyframe in the clip.
draw_range (Optional[list[ints]): only draw frames in range [start_idx, end_idx] inclusively in the clip.
If None, draw on the entire clip.
repeat_frame (int): repeat each frame in draw_range for `repeat_frame` time for slow-motion effect.
"""
if draw_range is None:
draw_range = [0, len(frames) - 1]
if draw_range is not None:
draw_range[0] = max(0, draw_range[0])
left_frames = frames[: draw_range[0]]
right_frames = frames[draw_range[1] + 1 :]
draw_frames = frames[draw_range[0] : draw_range[1] + 1]
if keyframe_idx is None:
keyframe_idx = len(frames) // 2
img_ls = (
list(left_frames)
+ self.draw_clip(
draw_frames,
preds,
bboxes=bboxes,
text_alpha=text_alpha,
ground_truth=ground_truth,
keyframe_idx=keyframe_idx - draw_range[0],
repeat_frame=repeat_frame,
)
+ list(right_frames)
)
return img_ls
def draw_clip(
self,
frames,
preds,
bboxes=None,
text_alpha=0.5,
ground_truth=False,
keyframe_idx=None,
repeat_frame=1,
):
"""
Draw predicted labels or ground truth classes to clip. Draw bouding boxes to clip
if bboxes is provided. Boxes will gradually fade in and out the clip, centered around
the clip's central frame.
Args:
frames (array-like): video data in the shape (T, H, W, C).
preds (tensor): a tensor of shape (num_boxes, num_classes) that contains all of the confidence scores
of the model. For recognition task or for ground_truth labels, input shape can be (num_classes,).
bboxes (Optional[tensor]): shape (num_boxes, 4) that contains the coordinates of the bounding boxes.
text_alpha (float): transparency label of the box wrapped around text labels.
ground_truth (bool): whether the prodived bounding boxes are ground-truth.
keyframe_idx (int): the index of keyframe in the clip.
repeat_frame (int): repeat each frame in draw_range for `repeat_frame` time for slow-motion effect.
"""
assert repeat_frame >= 1, "`repeat_frame` must be a positive integer."
repeated_seq = range(0, len(frames))
repeated_seq = list(
itertools.chain.from_iterable(
itertools.repeat(x, repeat_frame) for x in repeated_seq
)
)
frames, adjusted = self._adjust_frames_type(frames)
if keyframe_idx is None:
half_left = len(repeated_seq) // 2
half_right = (len(repeated_seq) + 1) // 2
else:
mid = int((keyframe_idx / len(frames)) * len(repeated_seq))
half_left = mid
half_right = len(repeated_seq) - mid
alpha_ls = np.concatenate(
[
np.linspace(0, 1, num=half_left),
np.linspace(1, 0, num=half_right),
]
)
text_alpha = text_alpha
frames = frames[repeated_seq]
img_ls = []
for alpha, frame in zip(alpha_ls, frames):
draw_img = self.draw_one_frame(
frame,
preds,
bboxes,
alpha=alpha,
text_alpha=text_alpha,
ground_truth=ground_truth,
)
if adjusted:
draw_img = draw_img.astype("float32") / 255
img_ls.append(draw_img)
return img_ls
def _adjust_frames_type(self, frames):
"""
Modify video data to have dtype of uint8 and values range in [0, 255].
Args:
frames (array-like): 4D array of shape (T, H, W, C).
Returns:
frames (list of frames): list of frames in range [0, 1].
adjusted (bool): whether the original frames need adjusted.
"""
assert (
frames is not None and len(frames) != 0
), "Frames does not contain any values"
frames = np.array(frames)
assert np.array(frames).ndim == 4, "Frames must have 4 dimensions"
adjusted = False
if frames.dtype in [np.float32, np.float64]:
frames *= 255
frames = frames.astype(np.uint8)
adjusted = True
return frames, adjusted
def _get_thres_array(self, common_class_names=None):
"""
Compute a thresholds array for all classes based on `self.thes` and `self.lower_thres`.
Args:
common_class_names (Optional[list of strs]): a list of common class names.
"""
common_class_ids = []
if common_class_names is not None:
common_classes = set(common_class_names)
for i, name in enumerate(self.class_names):
if name in common_classes:
common_class_ids.append(i)
else:
common_class_ids = list(range(self.num_classes))
thres_array = np.full(
shape=(self.num_classes,), fill_value=self.lower_thres
)
thres_array[common_class_ids] = self.thres
self.thres = torch.from_numpy(thres_array)
| 27,400 | 39.414454 | 143 | py |
STTS | STTS-main/MViT/slowfast/utils/c2_model_loading.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Caffe2 to PyTorch checkpoint name converting utility."""
import re
def get_name_convert_func():
"""
Get the function to convert Caffe2 layer names to PyTorch layer names.
Returns:
(func): function to convert parameter name from Caffe2 format to PyTorch
format.
"""
pairs = [
# ------------------------------------------------------------
# 'nonlocal_conv3_1_theta_w' -> 's3.pathway0_nonlocal3.conv_g.weight'
[
r"^nonlocal_conv([0-9]+)_([0-9]+)_(.*)",
r"s\1.pathway0_nonlocal\2_\3",
],
# 'theta' -> 'conv_theta'
[r"^(.*)_nonlocal([0-9]+)_(theta)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'g' -> 'conv_g'
[r"^(.*)_nonlocal([0-9]+)_(g)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'phi' -> 'conv_phi'
[r"^(.*)_nonlocal([0-9]+)_(phi)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'out' -> 'conv_out'
[r"^(.*)_nonlocal([0-9]+)_(out)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'nonlocal_conv4_5_bn_s' -> 's4.pathway0_nonlocal3.bn.weight'
[r"^(.*)_nonlocal([0-9]+)_(bn)_(.*)", r"\1_nonlocal\2.\3.\4"],
# ------------------------------------------------------------
# 't_pool1_subsample_bn' -> 's1_fuse.conv_f2s.bn.running_mean'
[r"^t_pool1_subsample_bn_(.*)", r"s1_fuse.bn.\1"],
# 't_pool1_subsample' -> 's1_fuse.conv_f2s'
[r"^t_pool1_subsample_(.*)", r"s1_fuse.conv_f2s.\1"],
# 't_res4_5_branch2c_bn_subsample_bn_rm' -> 's4_fuse.conv_f2s.bias'
[
r"^t_res([0-9]+)_([0-9]+)_branch2c_bn_subsample_bn_(.*)",
r"s\1_fuse.bn.\3",
],
# 't_pool1_subsample' -> 's1_fuse.conv_f2s'
[
r"^t_res([0-9]+)_([0-9]+)_branch2c_bn_subsample_(.*)",
r"s\1_fuse.conv_f2s.\3",
],
# ------------------------------------------------------------
# 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b'
[
r"^res([0-9]+)_([0-9]+)_branch([0-9]+)([a-z])_(.*)",
r"s\1.pathway0_res\2.branch\3.\4_\5",
],
# 'res_conv1_bn_' -> 's1.pathway0_stem.bn.'
[r"^res_conv1_bn_(.*)", r"s1.pathway0_stem.bn.\1"],
# 'conv1_xy_w_momentum' -> 's1.pathway0_stem.conv_xy.'
[r"^conv1_xy(.*)", r"s1.pathway0_stem.conv_xy\1"],
# 'conv1_w_momentum' -> 's1.pathway0_stem.conv.'
[r"^conv1_(.*)", r"s1.pathway0_stem.conv.\1"],
# 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight'
[
r"^res([0-9]+)_([0-9]+)_branch([0-9]+)_(.*)",
r"s\1.pathway0_res\2.branch\3_\4",
],
# 'res_conv1_' -> 's1.pathway0_stem.conv.'
[r"^res_conv1_(.*)", r"s1.pathway0_stem.conv.\1"],
# ------------------------------------------------------------
# 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b'
[
r"^t_res([0-9]+)_([0-9]+)_branch([0-9]+)([a-z])_(.*)",
r"s\1.pathway1_res\2.branch\3.\4_\5",
],
# 'res_conv1_bn_' -> 's1.pathway0_stem.bn.'
[r"^t_res_conv1_bn_(.*)", r"s1.pathway1_stem.bn.\1"],
# 'conv1_w_momentum' -> 's1.pathway0_stem.conv.'
[r"^t_conv1_(.*)", r"s1.pathway1_stem.conv.\1"],
# 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight'
[
r"^t_res([0-9]+)_([0-9]+)_branch([0-9]+)_(.*)",
r"s\1.pathway1_res\2.branch\3_\4",
],
# 'res_conv1_' -> 's1.pathway0_stem.conv.'
[r"^t_res_conv1_(.*)", r"s1.pathway1_stem.conv.\1"],
# ------------------------------------------------------------
# pred_ -> head.projection.
[r"pred_(.*)", r"head.projection.\1"],
# '.b_bn_fc' -> '.se.fc'
[r"(.*)b_bn_fc(.*)", r"\1se.fc\2"],
# conv_5 -> head.conv_5.
[r"conv_5(.*)", r"head.conv_5\1"],
# conv_5 -> head.conv_5.
[r"lin_5(.*)", r"head.lin_5\1"],
# '.bn_b' -> '.weight'
[r"(.*)bn.b\Z", r"\1bn.bias"],
# '.bn_s' -> '.weight'
[r"(.*)bn.s\Z", r"\1bn.weight"],
# '_bn_rm' -> '.running_mean'
[r"(.*)bn.rm\Z", r"\1bn.running_mean"],
# '_bn_riv' -> '.running_var'
[r"(.*)bn.riv\Z", r"\1bn.running_var"],
# '_b' -> '.bias'
[r"(.*)[\._]b\Z", r"\1.bias"],
# '_w' -> '.weight'
[r"(.*)[\._]w\Z", r"\1.weight"],
]
def convert_caffe2_name_to_pytorch(caffe2_layer_name):
"""
Convert the caffe2_layer_name to pytorch format by apply the list of
regular expressions.
Args:
caffe2_layer_name (str): caffe2 layer name.
Returns:
(str): pytorch layer name.
"""
for source, dest in pairs:
caffe2_layer_name = re.sub(source, dest, caffe2_layer_name)
return caffe2_layer_name
return convert_caffe2_name_to_pytorch
| 5,005 | 40.371901 | 80 | py |
STTS | STTS-main/MViT/slowfast/utils/parser.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Argument parser functions."""
import argparse
import sys
import slowfast.utils.checkpoint as cu
from slowfast.config.defaults import get_cfg
def parse_args():
"""
Parse the following arguments for a default parser for PySlowFast users.
Args:
shard_id (int): shard id for the current machine. Starts from 0 to
num_shards - 1. If single machine is used, then set shard id to 0.
num_shards (int): number of shards using by the job.
init_method (str): initialization method to launch the job with multiple
devices. Options includes TCP or shared file-system for
initialization. details can be find in
https://pytorch.org/docs/stable/distributed.html#tcp-initialization
cfg (str): path to the config file.
opts (argument): provide addtional options from the command line, it
overwrites the config loaded from file.
"""
parser = argparse.ArgumentParser(
description="Provide SlowFast video training and testing pipeline."
)
parser.add_argument(
"--shard_id",
help="The shard id of current node, Starts from 0 to num_shards - 1",
default=0,
type=int,
)
parser.add_argument(
"--num_shards",
help="Number of shards using by the job",
default=1,
type=int,
)
parser.add_argument(
"--init_method",
help="Initialization method, includes TCP or shared file-system",
default="tcp://localhost:9999",
type=str,
)
parser.add_argument(
"--cfg",
dest="cfg_file",
help="Path to the config file",
default="configs/Kinetics/SLOWFAST_4x16_R50.yaml",
type=str,
)
parser.add_argument(
"opts",
help="See slowfast/config/defaults.py for all options",
default=None,
nargs=argparse.REMAINDER,
)
if len(sys.argv) == 1:
parser.print_help()
return parser.parse_args()
def load_config(args):
"""
Given the arguemnts, load and initialize the configs.
Args:
args (argument): arguments includes `shard_id`, `num_shards`,
`init_method`, `cfg_file`, and `opts`.
"""
# Setup cfg.
cfg = get_cfg()
# Load config from cfg.
if args.cfg_file is not None:
cfg.merge_from_file(args.cfg_file)
# Load config from command line, overwrite config from opts.
if args.opts is not None:
cfg.merge_from_list(args.opts)
# Inherit parameters from args.
if hasattr(args, "num_shards") and hasattr(args, "shard_id"):
cfg.NUM_SHARDS = args.num_shards
cfg.SHARD_ID = args.shard_id
if hasattr(args, "rng_seed"):
cfg.RNG_SEED = args.rng_seed
if hasattr(args, "output_dir"):
cfg.OUTPUT_DIR = args.output_dir
# Create the checkpoint dir.
cu.make_checkpoint_dir(cfg.OUTPUT_DIR)
return cfg
| 3,021 | 30.810526 | 80 | py |
STTS | STTS-main/MViT/slowfast/utils/checkpoint.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Functions that handle saving and loading of checkpoints."""
import copy
import numpy as np
import os
import pickle
from collections import OrderedDict
import torch
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
from slowfast.utils.c2_model_loading import get_name_convert_func
from slowfast.utils.env import checkpoint_pathmgr as pathmgr
logger = logging.get_logger(__name__)
def make_checkpoint_dir(path_to_job):
"""
Creates the checkpoint directory (if not present already).
Args:
path_to_job (string): the path to the folder of the current job.
"""
checkpoint_dir = os.path.join(path_to_job, "checkpoints")
# Create the checkpoint dir from the master process
if du.is_master_proc() and not pathmgr.exists(checkpoint_dir):
try:
pathmgr.mkdirs(checkpoint_dir)
except Exception:
pass
return checkpoint_dir
def get_checkpoint_dir(path_to_job):
"""
Get path for storing checkpoints.
Args:
path_to_job (string): the path to the folder of the current job.
"""
return os.path.join(path_to_job, "checkpoints")
def get_path_to_checkpoint(path_to_job, epoch):
"""
Get the full path to a checkpoint file.
Args:
path_to_job (string): the path to the folder of the current job.
epoch (int): the number of epoch for the checkpoint.
"""
name = "checkpoint_epoch_{:05d}.pyth".format(epoch)
return os.path.join(get_checkpoint_dir(path_to_job), name)
def get_last_checkpoint(path_to_job):
"""
Get the last checkpoint from the checkpointing folder.
Args:
path_to_job (string): the path to the folder of the current job.
"""
d = get_checkpoint_dir(path_to_job)
names = pathmgr.ls(d) if pathmgr.exists(d) else []
names = [f for f in names if "checkpoint" in f]
assert len(names), "No checkpoints found in '{}'.".format(d)
# Sort the checkpoints by epoch.
name = sorted(names)[-1]
return os.path.join(d, name)
def has_checkpoint(path_to_job):
"""
Determines if the given directory contains a checkpoint.
Args:
path_to_job (string): the path to the folder of the current job.
"""
d = get_checkpoint_dir(path_to_job)
files = pathmgr.ls(d) if pathmgr.exists(d) else []
return any("checkpoint" in f for f in files)
def is_checkpoint_epoch(cfg, cur_epoch, multigrid_schedule=None):
"""
Determine if a checkpoint should be saved on current epoch.
Args:
cfg (CfgNode): configs to save.
cur_epoch (int): current number of epoch of the model.
multigrid_schedule (List): schedule for multigrid training.
"""
if cur_epoch + 1 == cfg.SOLVER.MAX_EPOCH:
return True
if multigrid_schedule is not None:
prev_epoch = 0
for s in multigrid_schedule:
if cur_epoch < s[-1]:
period = max(
(s[-1] - prev_epoch) // cfg.MULTIGRID.EVAL_FREQ + 1, 1
)
return (s[-1] - 1 - cur_epoch) % period == 0
prev_epoch = s[-1]
return (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0
def save_checkpoint(path_to_job, model, optimizer, epoch, cfg, scaler=None):
"""
Save a checkpoint.
Args:
model (model): model to save the weight to the checkpoint.
optimizer (optim): optimizer to save the historical state.
epoch (int): current number of epoch of the model.
cfg (CfgNode): configs to save.
scaler (GradScaler): the mixed precision scale.
"""
# Save checkpoints only from the master process.
if not du.is_master_proc(cfg.NUM_GPUS * cfg.NUM_SHARDS):
return
# Ensure that the checkpoint dir exists.
pathmgr.mkdirs(get_checkpoint_dir(path_to_job))
# Omit the DDP wrapper in the multi-gpu setting.
sd = model.module.state_dict() if cfg.NUM_GPUS > 1 else model.state_dict()
normalized_sd = sub_to_normal_bn(sd)
# Record the state.
checkpoint = {
"epoch": epoch,
"model_state": normalized_sd,
"optimizer_state": optimizer.state_dict(),
"cfg": cfg.dump(),
}
if scaler is not None:
checkpoint["scaler_state"] = scaler.state_dict()
# Write the checkpoint.
path_to_checkpoint = get_path_to_checkpoint(path_to_job, epoch + 1)
with pathmgr.open(path_to_checkpoint, "wb") as f:
torch.save(checkpoint, f)
return path_to_checkpoint
def inflate_weight(state_dict_2d, state_dict_3d):
"""
Inflate 2D model weights in state_dict_2d to the 3D model weights in
state_dict_3d. The details can be found in:
Joao Carreira, and Andrew Zisserman.
"Quo vadis, action recognition? a new model and the kinetics dataset."
Args:
state_dict_2d (OrderedDict): a dict of parameters from a 2D model.
state_dict_3d (OrderedDict): a dict of parameters from a 3D model.
Returns:
state_dict_inflated (OrderedDict): a dict of inflated parameters.
"""
state_dict_inflated = OrderedDict()
for k, v2d in state_dict_2d.items():
assert k in state_dict_3d.keys()
v3d = state_dict_3d[k]
# Inflate the weight of 2D conv to 3D conv.
if len(v2d.shape) == 4 and len(v3d.shape) == 5:
logger.info(
"Inflate {}: {} -> {}: {}".format(k, v2d.shape, k, v3d.shape)
)
# Dimension need to be match.
assert v2d.shape[-2:] == v3d.shape[-2:]
assert v2d.shape[:2] == v3d.shape[:2]
v3d = (
v2d.unsqueeze(2).repeat(1, 1, v3d.shape[2], 1, 1) / v3d.shape[2]
)
elif v2d.shape == v3d.shape:
v3d = v2d
else:
logger.info(
"Unexpected {}: {} -|> {}: {}".format(
k, v2d.shape, k, v3d.shape
)
)
state_dict_inflated[k] = v3d.clone()
return state_dict_inflated
def load_checkpoint(
path_to_checkpoint,
model,
data_parallel=True,
optimizer=None,
scaler=None,
inflation=False,
convert_from_caffe2=False,
epoch_reset=False,
clear_name_pattern=(),
):
"""
Load the checkpoint from the given file. If inflation is True, inflate the
2D Conv weights from the checkpoint to 3D Conv.
Args:
path_to_checkpoint (string): path to the checkpoint to load.
model (model): model to load the weights from the checkpoint.
data_parallel (bool): if true, model is wrapped by
torch.nn.parallel.DistributedDataParallel.
optimizer (optim): optimizer to load the historical state.
scaler (GradScaler): GradScaler to load the mixed precision scale.
inflation (bool): if True, inflate the weights from the checkpoint.
convert_from_caffe2 (bool): if True, load the model from caffe2 and
convert it to pytorch.
epoch_reset (bool): if True, reset #train iterations from the checkpoint.
clear_name_pattern (string): if given, this (sub)string will be cleared
from a layer name if it can be matched.
Returns:
(int): the number of training epoch of the checkpoint.
"""
assert pathmgr.exists(
path_to_checkpoint
), "Checkpoint '{}' not found".format(path_to_checkpoint)
logger.info("Loading network weights from {}.".format(path_to_checkpoint))
# Account for the DDP wrapper in the multi-gpu setting.
ms = model.module if data_parallel else model
if convert_from_caffe2:
with pathmgr.open(path_to_checkpoint, "rb") as f:
caffe2_checkpoint = pickle.load(f, encoding="latin1")
state_dict = OrderedDict()
name_convert_func = get_name_convert_func()
for key in caffe2_checkpoint["blobs"].keys():
converted_key = name_convert_func(key)
converted_key = c2_normal_to_sub_bn(converted_key, ms.state_dict())
if converted_key in ms.state_dict():
c2_blob_shape = caffe2_checkpoint["blobs"][key].shape
model_blob_shape = ms.state_dict()[converted_key].shape
# expand shape dims if they differ (eg for converting linear to conv params)
if len(c2_blob_shape) < len(model_blob_shape):
c2_blob_shape += (1,) * (
len(model_blob_shape) - len(c2_blob_shape)
)
caffe2_checkpoint["blobs"][key] = np.reshape(
caffe2_checkpoint["blobs"][key], c2_blob_shape
)
# Load BN stats to Sub-BN.
if (
len(model_blob_shape) == 1
and len(c2_blob_shape) == 1
and model_blob_shape[0] > c2_blob_shape[0]
and model_blob_shape[0] % c2_blob_shape[0] == 0
):
caffe2_checkpoint["blobs"][key] = np.concatenate(
[caffe2_checkpoint["blobs"][key]]
* (model_blob_shape[0] // c2_blob_shape[0])
)
c2_blob_shape = caffe2_checkpoint["blobs"][key].shape
if c2_blob_shape == tuple(model_blob_shape):
state_dict[converted_key] = torch.tensor(
caffe2_checkpoint["blobs"][key]
).clone()
logger.info(
"{}: {} => {}: {}".format(
key,
c2_blob_shape,
converted_key,
tuple(model_blob_shape),
)
)
else:
logger.warn(
"!! {}: {} does not match {}: {}".format(
key,
c2_blob_shape,
converted_key,
tuple(model_blob_shape),
)
)
else:
if not any(
prefix in key for prefix in ["momentum", "lr", "model_iter"]
):
logger.warn(
"!! {}: can not be converted, got {}".format(
key, converted_key
)
)
diff = set(ms.state_dict()) - set(state_dict)
diff = {d for d in diff if "num_batches_tracked" not in d}
if len(diff) > 0:
logger.warn("Not loaded {}".format(diff))
ms.load_state_dict(state_dict, strict=False)
epoch = -1
else:
# Load the checkpoint on CPU to avoid GPU mem spike.
with pathmgr.open(path_to_checkpoint, "rb") as f:
checkpoint = torch.load(f, map_location="cpu")
#print(checkpoint["model_state"].keys())
model_state_dict_3d = (
model.module.state_dict() if data_parallel else model.state_dict()
)
checkpoint["model_state"] = normal_to_sub_bn(
checkpoint["model_state"], model_state_dict_3d
)
if inflation:
# Try to inflate the model.
inflated_model_dict = inflate_weight(
checkpoint["model_state"], model_state_dict_3d
)
ms.load_state_dict(inflated_model_dict, strict=False)
else:
if clear_name_pattern:
for item in clear_name_pattern:
model_state_dict_new = OrderedDict()
for k in checkpoint["model_state"]:
if item in k:
k_re = k.replace(item, "")
model_state_dict_new[k_re] = checkpoint[
"model_state"
][k]
logger.info("renaming: {} -> {}".format(k, k_re))
else:
model_state_dict_new[k] = checkpoint["model_state"][
k
]
checkpoint["model_state"] = model_state_dict_new
pre_train_dict = checkpoint["model_state"]
model_dict = ms.state_dict()
# Match pre-trained weights that have same shape as current model.
pre_train_dict_match = {
k: v
for k, v in pre_train_dict.items()
if k in model_dict and v.size() == model_dict[k].size()
}
# Weights that do not have match from the pre-trained model.
not_load_layers = [
k
for k in model_dict.keys()
if k not in pre_train_dict_match.keys()
]
# Log weights that are not loaded with the pre-trained weights.
if not_load_layers:
for k in not_load_layers:
logger.info("Network weights {} not loaded.".format(k))
# Load pre-trained weights.
ms.load_state_dict(pre_train_dict_match, strict=False)
epoch = -1
# Load the optimizer state (commonly not done when fine-tuning)
if "epoch" in checkpoint.keys() and not epoch_reset:
epoch = checkpoint["epoch"]
if optimizer:
optimizer.load_state_dict(checkpoint["optimizer_state"])
if scaler:
scaler.load_state_dict(checkpoint["scaler_state"])
else:
epoch = -1
return epoch
def sub_to_normal_bn(sd):
"""
Convert the Sub-BN paprameters to normal BN parameters in a state dict.
There are two copies of BN layers in a Sub-BN implementation: `bn.bn` and
`bn.split_bn`. `bn.split_bn` is used during training and
"compute_precise_bn". Before saving or evaluation, its stats are copied to
`bn.bn`. We rename `bn.bn` to `bn` and store it to be consistent with normal
BN layers.
Args:
sd (OrderedDict): a dict of parameters whitch might contain Sub-BN
parameters.
Returns:
new_sd (OrderedDict): a dict with Sub-BN parameters reshaped to
normal parameters.
"""
new_sd = copy.deepcopy(sd)
modifications = [
("bn.bn.running_mean", "bn.running_mean"),
("bn.bn.running_var", "bn.running_var"),
("bn.split_bn.num_batches_tracked", "bn.num_batches_tracked"),
]
to_remove = ["bn.bn.", ".split_bn."]
for key in sd:
for before, after in modifications:
if key.endswith(before):
new_key = key.split(before)[0] + after
new_sd[new_key] = new_sd.pop(key)
for rm in to_remove:
if rm in key and key in new_sd:
del new_sd[key]
for key in new_sd:
if key.endswith("bn.weight") or key.endswith("bn.bias"):
if len(new_sd[key].size()) == 4:
assert all(d == 1 for d in new_sd[key].size()[1:])
new_sd[key] = new_sd[key][:, 0, 0, 0]
return new_sd
def c2_normal_to_sub_bn(key, model_keys):
"""
Convert BN parameters to Sub-BN parameters if model contains Sub-BNs.
Args:
key (OrderedDict): source dict of parameters.
mdoel_key (OrderedDict): target dict of parameters.
Returns:
new_sd (OrderedDict): converted dict of parameters.
"""
if "bn.running_" in key:
if key in model_keys:
return key
new_key = key.replace("bn.running_", "bn.split_bn.running_")
if new_key in model_keys:
return new_key
else:
return key
def normal_to_sub_bn(checkpoint_sd, model_sd):
"""
Convert BN parameters to Sub-BN parameters if model contains Sub-BNs.
Args:
checkpoint_sd (OrderedDict): source dict of parameters.
model_sd (OrderedDict): target dict of parameters.
Returns:
new_sd (OrderedDict): converted dict of parameters.
"""
for key in model_sd:
if key not in checkpoint_sd:
if "bn.split_bn." in key:
load_key = key.replace("bn.split_bn.", "bn.")
bn_key = key.replace("bn.split_bn.", "bn.bn.")
checkpoint_sd[key] = checkpoint_sd.pop(load_key)
checkpoint_sd[bn_key] = checkpoint_sd[key]
for key in model_sd:
if key in checkpoint_sd:
model_blob_shape = model_sd[key].shape
c2_blob_shape = checkpoint_sd[key].shape
if (
len(model_blob_shape) == 1
and len(c2_blob_shape) == 1
and model_blob_shape[0] > c2_blob_shape[0]
and model_blob_shape[0] % c2_blob_shape[0] == 0
):
before_shape = checkpoint_sd[key].shape
checkpoint_sd[key] = torch.cat(
[checkpoint_sd[key]]
* (model_blob_shape[0] // c2_blob_shape[0])
)
logger.info(
"{} {} -> {}".format(
key, before_shape, checkpoint_sd[key].shape
)
)
return checkpoint_sd
def load_test_checkpoint(cfg, model):
"""
Loading checkpoint logic for testing.
"""
# Load a checkpoint to test if applicable.
if cfg.TEST.CHECKPOINT_FILE_PATH != "":
# If no checkpoint found in MODEL_VIS.CHECKPOINT_FILE_PATH or in the current
# checkpoint folder, try to load checkpoint from
# TEST.CHECKPOINT_FILE_PATH and test it.
load_checkpoint(
cfg.TEST.CHECKPOINT_FILE_PATH,
model,
cfg.NUM_GPUS > 1,
None,
inflation=False,
convert_from_caffe2=cfg.TEST.CHECKPOINT_TYPE == "caffe2",
)
elif has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = get_last_checkpoint(cfg.OUTPUT_DIR)
load_checkpoint(last_checkpoint, model, cfg.NUM_GPUS > 1)
elif cfg.TRAIN.CHECKPOINT_FILE_PATH != "":
# If no checkpoint found in TEST.CHECKPOINT_FILE_PATH or in the current
# checkpoint folder, try to load checkpoint from
# TRAIN.CHECKPOINT_FILE_PATH and test it.
load_checkpoint(
cfg.TRAIN.CHECKPOINT_FILE_PATH,
model,
cfg.NUM_GPUS > 1,
None,
inflation=False,
convert_from_caffe2=cfg.TRAIN.CHECKPOINT_TYPE == "caffe2",
)
else:
logger.info(
"Unknown way of loading checkpoint. Using with random initialization, only for debugging."
)
def load_train_checkpoint(cfg, model, optimizer, scaler=None):
"""
Loading checkpoint logic for training.
"""
if cfg.TRAIN.AUTO_RESUME and has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = get_last_checkpoint(cfg.OUTPUT_DIR)
logger.info("Load from last checkpoint, {}.".format(last_checkpoint))
checkpoint_epoch = load_checkpoint(
last_checkpoint, model, cfg.NUM_GPUS > 1, optimizer, scaler=scaler
)
start_epoch = checkpoint_epoch + 1
elif cfg.TRAIN.CHECKPOINT_FILE_PATH != "":
logger.info("Load from given checkpoint file.")
checkpoint_epoch = load_checkpoint(
cfg.TRAIN.CHECKPOINT_FILE_PATH,
model,
cfg.NUM_GPUS > 1,
optimizer,
scaler=scaler,
inflation=cfg.TRAIN.CHECKPOINT_INFLATE,
convert_from_caffe2=cfg.TRAIN.CHECKPOINT_TYPE == "caffe2",
epoch_reset=cfg.TRAIN.CHECKPOINT_EPOCH_RESET,
clear_name_pattern=cfg.TRAIN.CHECKPOINT_CLEAR_NAME_PATTERN,
)
start_epoch = checkpoint_epoch + 1
else:
start_epoch = 0
return start_epoch
| 19,869 | 36.775665 | 102 | py |
STTS | STTS-main/MViT/slowfast/utils/benchmark.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Functions for benchmarks.
"""
import numpy as np
import pprint
import torch
import tqdm
from fvcore.common.timer import Timer
import slowfast.utils.logging as logging
import slowfast.utils.misc as misc
from slowfast.datasets import loader
from slowfast.utils.env import setup_environment
logger = logging.get_logger(__name__)
def benchmark_data_loading(cfg):
"""
Benchmark the speed of data loading in PySlowFast.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
setup_environment()
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Print config.
logger.info("Benchmark data loading with config:")
logger.info(pprint.pformat(cfg))
timer = Timer()
dataloader = loader.construct_loader(cfg, "train")
logger.info(
"Initialize loader using {:.2f} seconds.".format(timer.seconds())
)
# Total batch size across different machines.
batch_size = cfg.TRAIN.BATCH_SIZE * cfg.NUM_SHARDS
log_period = cfg.BENCHMARK.LOG_PERIOD
epoch_times = []
# Test for a few epochs.
for cur_epoch in range(cfg.BENCHMARK.NUM_EPOCHS):
timer = Timer()
timer_epoch = Timer()
iter_times = []
if cfg.BENCHMARK.SHUFFLE:
loader.shuffle_dataset(dataloader, cur_epoch)
for cur_iter, _ in enumerate(tqdm.tqdm(dataloader)):
if cur_iter > 0 and cur_iter % log_period == 0:
iter_times.append(timer.seconds())
ram_usage, ram_total = misc.cpu_mem_usage()
logger.info(
"Epoch {}: {} iters ({} videos) in {:.2f} seconds. "
"RAM Usage: {:.2f}/{:.2f} GB.".format(
cur_epoch,
log_period,
log_period * batch_size,
iter_times[-1],
ram_usage,
ram_total,
)
)
timer.reset()
epoch_times.append(timer_epoch.seconds())
ram_usage, ram_total = misc.cpu_mem_usage()
logger.info(
"Epoch {}: in total {} iters ({} videos) in {:.2f} seconds. "
"RAM Usage: {:.2f}/{:.2f} GB.".format(
cur_epoch,
len(dataloader),
len(dataloader) * batch_size,
epoch_times[-1],
ram_usage,
ram_total,
)
)
logger.info(
"Epoch {}: on average every {} iters ({} videos) take {:.2f}/{:.2f} "
"(avg/std) seconds.".format(
cur_epoch,
log_period,
log_period * batch_size,
np.mean(iter_times),
np.std(iter_times),
)
)
logger.info(
"On average every epoch ({} videos) takes {:.2f}/{:.2f} "
"(avg/std) seconds.".format(
len(dataloader) * batch_size,
np.mean(epoch_times),
np.std(epoch_times),
)
)
| 3,290 | 30.644231 | 81 | py |
STTS | STTS-main/MViT/slowfast/utils/misc.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import json
import logging
import math
import numpy as np
import os
from datetime import datetime
import psutil
import torch
from fvcore.nn.activation_count import activation_count
from fvcore.nn.flop_count import flop_count
from matplotlib import pyplot as plt
from torch import nn
import slowfast.utils.logging as logging
import slowfast.utils.multiprocessing as mpu
from slowfast.datasets.utils import pack_pathway_output
from slowfast.models.batchnorm_helper import SubBatchNorm3d
from slowfast.utils.env import pathmgr
logger = logging.get_logger(__name__)
def check_nan_losses(loss):
"""
Determine whether the loss is NaN (not a number).
Args:
loss (loss): loss to check whether is NaN.
"""
if math.isnan(loss):
raise RuntimeError("ERROR: Got NaN losses {}".format(datetime.now()))
def params_count(model, ignore_bn=False):
"""
Compute the number of parameters.
Args:
model (model): model to count the number of parameters.
"""
if not ignore_bn:
return np.sum([p.numel() for p in model.parameters()]).item()
else:
count = 0
for m in model.modules():
if not isinstance(m, nn.BatchNorm3d):
for p in m.parameters(recurse=False):
count += p.numel()
return count
def gpu_mem_usage():
"""
Compute the GPU memory usage for the current device (GB).
"""
if torch.cuda.is_available():
mem_usage_bytes = torch.cuda.max_memory_allocated()
else:
mem_usage_bytes = 0
return mem_usage_bytes / 1024 ** 3
def cpu_mem_usage():
"""
Compute the system memory (RAM) usage for the current device (GB).
Returns:
usage (float): used memory (GB).
total (float): total memory (GB).
"""
vram = psutil.virtual_memory()
usage = (vram.total - vram.available) / 1024 ** 3
total = vram.total / 1024 ** 3
return usage, total
def _get_model_analysis_input(cfg, use_train_input):
"""
Return a dummy input for model analysis with batch size 1. The input is
used for analyzing the model (counting flops and activations etc.).
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
use_train_input (bool): if True, return the input for training. Otherwise,
return the input for testing.
Returns:
inputs: the input for model analysis.
"""
rgb_dimension = 3
if use_train_input:
if cfg.TRAIN.DATASET in ["imagenet", "imagenetprefetch"]:
input_tensors = torch.rand(
rgb_dimension,
cfg.DATA.TRAIN_CROP_SIZE,
cfg.DATA.TRAIN_CROP_SIZE,
)
else:
input_tensors = torch.rand(
rgb_dimension,
cfg.DATA.NUM_FRAMES,
cfg.DATA.TRAIN_CROP_SIZE,
cfg.DATA.TRAIN_CROP_SIZE,
)
else:
if cfg.TEST.DATASET in ["imagenet", "imagenetprefetch"]:
input_tensors = torch.rand(
rgb_dimension,
cfg.DATA.TEST_CROP_SIZE,
cfg.DATA.TEST_CROP_SIZE,
)
else:
input_tensors = torch.rand(
rgb_dimension,
cfg.DATA.NUM_FRAMES,
cfg.DATA.TEST_CROP_SIZE,
cfg.DATA.TEST_CROP_SIZE,
)
model_inputs = pack_pathway_output(cfg, input_tensors)
for i in range(len(model_inputs)):
model_inputs[i] = model_inputs[i].unsqueeze(0)
if cfg.NUM_GPUS:
model_inputs[i] = model_inputs[i].cuda(non_blocking=True)
# If detection is enabled, count flops for one proposal.
if cfg.DETECTION.ENABLE:
bbox = torch.tensor([[0, 0, 1.0, 0, 1.0]])
if cfg.NUM_GPUS:
bbox = bbox.cuda()
inputs = (model_inputs, bbox)
else:
inputs = (model_inputs,)
return inputs
def get_model_stats(model, cfg, mode, use_train_input):
"""
Compute statistics for the current model given the config.
Args:
model (model): model to perform analysis.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
mode (str): Options include `flop` or `activation`. Compute either flop
(gflops) or activation count (mega).
use_train_input (bool): if True, compute statistics for training. Otherwise,
compute statistics for testing.
Returns:
float: the total number of count of the given model.
"""
assert mode in [
"flop",
"activation",
], "'{}' not supported for model analysis".format(mode)
if mode == "flop":
model_stats_fun = flop_count
elif mode == "activation":
model_stats_fun = activation_count
# Set model to evaluation mode for analysis.
# Evaluation mode can avoid getting stuck with sync batchnorm.
model_mode = model.training
model.eval()
inputs = _get_model_analysis_input(cfg, use_train_input)
count_dict, *_ = model_stats_fun(model, inputs)
count = sum(count_dict.values())
model.train(model_mode)
return count
def log_model_info(model, cfg, use_train_input=True):
"""
Log info, includes number of parameters, gpu usage, gflops and activation count.
The model info is computed when the model is in validation mode.
Args:
model (model): model to log the info.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
use_train_input (bool): if True, log info for training. Otherwise,
log info for testing.
"""
logger.info("Model:\n{}".format(model))
logger.info("Params: {:,}".format(params_count(model)))
logger.info("Mem: {:,} MB".format(gpu_mem_usage()))
logger.info(
"Flops: {:,} G".format(
get_model_stats(model, cfg, "flop", use_train_input)
)
)
logger.info(
"Activations: {:,} M".format(
get_model_stats(model, cfg, "activation", use_train_input)
)
)
logger.info("nvidia-smi")
os.system("nvidia-smi")
def is_eval_epoch(cfg, cur_epoch, multigrid_schedule):
"""
Determine if the model should be evaluated at the current epoch.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
cur_epoch (int): current epoch.
multigrid_schedule (List): schedule for multigrid training.
"""
if cur_epoch + 1 == cfg.SOLVER.MAX_EPOCH:
return True
if multigrid_schedule is not None:
prev_epoch = 0
for s in multigrid_schedule:
if cur_epoch < s[-1]:
period = max(
(s[-1] - prev_epoch) // cfg.MULTIGRID.EVAL_FREQ + 1, 1
)
return (s[-1] - 1 - cur_epoch) % period == 0
prev_epoch = s[-1]
return (cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0
def plot_input(tensor, bboxes=(), texts=(), path="./tmp_vis.png"):
"""
Plot the input tensor with the optional bounding box and save it to disk.
Args:
tensor (tensor): a tensor with shape of `NxCxHxW`.
bboxes (tuple): bounding boxes with format of [[x, y, h, w]].
texts (tuple): a tuple of string to plot.
path (str): path to the image to save to.
"""
tensor = tensor.float()
tensor = tensor - tensor.min()
tensor = tensor / tensor.max()
f, ax = plt.subplots(nrows=1, ncols=tensor.shape[0], figsize=(50, 20))
for i in range(tensor.shape[0]):
ax[i].axis("off")
ax[i].imshow(tensor[i].permute(1, 2, 0))
# ax[1][0].axis('off')
if bboxes is not None and len(bboxes) > i:
for box in bboxes[i]:
x1, y1, x2, y2 = box
ax[i].vlines(x1, y1, y2, colors="g", linestyles="solid")
ax[i].vlines(x2, y1, y2, colors="g", linestyles="solid")
ax[i].hlines(y1, x1, x2, colors="g", linestyles="solid")
ax[i].hlines(y2, x1, x2, colors="g", linestyles="solid")
if texts is not None and len(texts) > i:
ax[i].text(0, 0, texts[i])
f.savefig(path)
def frozen_bn_stats(model):
"""
Set all the bn layers to eval mode.
Args:
model (model): model to set bn layers to eval mode.
"""
for m in model.modules():
if isinstance(m, nn.BatchNorm3d):
m.eval()
def aggregate_sub_bn_stats(module):
"""
Recursively find all SubBN modules and aggregate sub-BN stats.
Args:
module (nn.Module)
Returns:
count (int): number of SubBN module found.
"""
count = 0
for child in module.children():
if isinstance(child, SubBatchNorm3d):
child.aggregate_stats()
count += 1
else:
count += aggregate_sub_bn_stats(child)
return count
def launch_job(cfg, init_method, func, daemon=False):
"""
Run 'func' on one or more GPUs, specified in cfg
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
init_method (str): initialization method to launch the job with multiple
devices.
func (function): job to run on GPU(s)
daemon (bool): The spawned processes’ daemon flag. If set to True,
daemonic processes will be created
"""
if cfg.NUM_GPUS > 1:
torch.multiprocessing.spawn(
mpu.run,
nprocs=cfg.NUM_GPUS,
args=(
cfg.NUM_GPUS,
func,
init_method,
cfg.SHARD_ID,
cfg.NUM_SHARDS,
cfg.DIST_BACKEND,
cfg,
),
daemon=daemon,
)
else:
func(cfg=cfg)
def get_class_names(path, parent_path=None, subset_path=None):
"""
Read json file with entries {classname: index} and return
an array of class names in order.
If parent_path is provided, load and map all children to their ids.
Args:
path (str): path to class ids json file.
File must be in the format {"class1": id1, "class2": id2, ...}
parent_path (Optional[str]): path to parent-child json file.
File must be in the format {"parent1": ["child1", "child2", ...], ...}
subset_path (Optional[str]): path to text file containing a subset
of class names, separated by newline characters.
Returns:
class_names (list of strs): list of class names.
class_parents (dict): a dictionary where key is the name of the parent class
and value is a list of ids of the children classes.
subset_ids (list of ints): list of ids of the classes provided in the
subset file.
"""
try:
with pathmgr.open(path, "r") as f:
class2idx = json.load(f)
except Exception as err:
print("Fail to load file from {} with error {}".format(path, err))
return
max_key = max(class2idx.values())
class_names = [None] * (max_key + 1)
for k, i in class2idx.items():
class_names[i] = k
class_parent = None
if parent_path is not None and parent_path != "":
try:
with pathmgr.open(parent_path, "r") as f:
d_parent = json.load(f)
except EnvironmentError as err:
print(
"Fail to load file from {} with error {}".format(
parent_path, err
)
)
return
class_parent = {}
for parent, children in d_parent.items():
indices = [
class2idx[c] for c in children if class2idx.get(c) is not None
]
class_parent[parent] = indices
subset_ids = None
if subset_path is not None and subset_path != "":
try:
with pathmgr.open(subset_path, "r") as f:
subset = f.read().split("\n")
subset_ids = [
class2idx[name]
for name in subset
if class2idx.get(name) is not None
]
except EnvironmentError as err:
print(
"Fail to load file from {} with error {}".format(
subset_path, err
)
)
return
return class_names, class_parent, subset_ids
| 12,587 | 31.78125 | 84 | py |
STTS | STTS-main/MViT/slowfast/utils/distributed.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Distributed helpers."""
import functools
import logging
import pickle
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
def all_gather(tensors):
"""
All gathers the provided tensors from all processes across machines.
Args:
tensors (list): tensors to perform all gather across all processes in
all machines.
"""
gather_list = []
output_tensor = []
world_size = dist.get_world_size()
for tensor in tensors:
tensor_placeholder = [
torch.ones_like(tensor) for _ in range(world_size)
]
dist.all_gather(tensor_placeholder, tensor, async_op=False)
gather_list.append(tensor_placeholder)
for gathered_tensor in gather_list:
output_tensor.append(torch.cat(gathered_tensor, dim=0))
return output_tensor
def all_reduce(tensors, average=True):
"""
All reduce the provided tensors from all processes across machines.
Args:
tensors (list): tensors to perform all reduce across all processes in
all machines.
average (bool): scales the reduced tensor by the number of overall
processes across all machines.
"""
for tensor in tensors:
dist.all_reduce(tensor, async_op=False)
if average:
world_size = dist.get_world_size()
for tensor in tensors:
tensor.mul_(1.0 / world_size)
return tensors
def init_process_group(
local_rank,
local_world_size,
shard_id,
num_shards,
init_method,
dist_backend="nccl",
):
"""
Initializes the default process group.
Args:
local_rank (int): the rank on the current local machine.
local_world_size (int): the world size (number of processes running) on
the current local machine.
shard_id (int): the shard index (machine rank) of the current machine.
num_shards (int): number of shards for distributed training.
init_method (string): supporting three different methods for
initializing process groups:
"file": use shared file system to initialize the groups across
different processes.
"tcp": use tcp address to initialize the groups across different
dist_backend (string): backend to use for distributed training. Options
includes gloo, mpi and nccl, the details can be found here:
https://pytorch.org/docs/stable/distributed.html
"""
# Sets the GPU to use.
torch.cuda.set_device(local_rank)
# Initialize the process group.
proc_rank = local_rank + shard_id * local_world_size
world_size = local_world_size * num_shards
dist.init_process_group(
backend=dist_backend,
init_method=init_method,
world_size=world_size,
rank=proc_rank,
)
def is_master_proc(num_gpus=8):
"""
Determines if the current process is the master process.
"""
if torch.distributed.is_initialized():
return dist.get_rank() % num_gpus == 0
else:
return True
def is_root_proc():
"""
Determines if the current process is the root process.
"""
if torch.distributed.is_initialized():
return dist.get_rank() == 0
else:
return True
def get_world_size():
"""
Get the size of the world.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
"""
Get the rank of the current process.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
Returns:
(group): pytorch dist group.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
"""
Seriialize the tensor to ByteTensor. Note that only `gloo` and `nccl`
backend is supported.
Args:
data (data): data to be serialized.
group (group): pytorch dist group.
Returns:
tensor (ByteTensor): tensor that serialized.
"""
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Padding all the tensors from different GPUs to the largest ones.
Args:
tensor (tensor): tensor to pad.
group (group): pytorch dist group.
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor(
[tensor.numel()], dtype=torch.int64, device=tensor.device
)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device)
for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros(
(max_size - local_size,), dtype=torch.uint8, device=tensor.device
)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather_unaligned(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def init_distributed_training(cfg):
"""
Initialize variables needed for distributed training.
"""
if cfg.NUM_GPUS <= 1:
return
num_gpus_per_machine = cfg.NUM_GPUS
num_machines = dist.get_world_size() // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(
range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine)
)
pg = dist.new_group(ranks_on_i)
if i == cfg.SHARD_ID:
global _LOCAL_PROCESS_GROUP
_LOCAL_PROCESS_GROUP = pg
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
| 8,879 | 27.645161 | 85 | py |
STTS | STTS-main/MViT/slowfast/utils/metrics.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Functions for computing metrics."""
import torch
def topks_correct(preds, labels, ks):
"""
Given the predictions, labels, and a list of top-k values, compute the
number of correct predictions for each top-k value.
Args:
preds (array): array of predictions. Dimension is batchsize
N x ClassNum.
labels (array): array of labels. Dimension is batchsize N.
ks (list): list of top-k values. For example, ks = [1, 5] correspods
to top-1 and top-5.
Returns:
topks_correct (list): list of numbers, where the `i`-th entry
corresponds to the number of top-`ks[i]` correct predictions.
"""
assert preds.size(0) == labels.size(
0
), "Batch dim of predictions and labels must match"
# Find the top max_k predictions for each sample
_top_max_k_vals, top_max_k_inds = torch.topk(
preds, max(ks), dim=1, largest=True, sorted=True
)
# (batch_size, max_k) -> (max_k, batch_size).
top_max_k_inds = top_max_k_inds.t()
# (batch_size, ) -> (max_k, batch_size).
rep_max_k_labels = labels.view(1, -1).expand_as(top_max_k_inds)
# (i, j) = 1 if top i-th prediction for the j-th sample is correct.
top_max_k_correct = top_max_k_inds.eq(rep_max_k_labels)
# Compute the number of topk correct predictions for each k.
topks_correct = [top_max_k_correct[:k, :].float().sum() for k in ks]
return topks_correct
def topk_errors(preds, labels, ks):
"""
Computes the top-k error for each k.
Args:
preds (array): array of predictions. Dimension is N.
labels (array): array of labels. Dimension is N.
ks (list): list of ks to calculate the top accuracies.
"""
num_topks_correct = topks_correct(preds, labels, ks)
return [(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct]
def topk_accuracies(preds, labels, ks):
"""
Computes the top-k accuracy for each k.
Args:
preds (array): array of predictions. Dimension is N.
labels (array): array of labels. Dimension is N.
ks (list): list of ks to calculate the top accuracies.
"""
num_topks_correct = topks_correct(preds, labels, ks)
return [(x / preds.size(0)) * 100.0 for x in num_topks_correct]
| 2,381 | 35.646154 | 76 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.