id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
22,319 | import numpy as np
import os
import cv2
import torch
import tqdm
import slowfast.utils.checkpoint as cu
import slowfast.utils.logging as logging
from slowfast.datasets.ava_helper import parse_bboxes_file
from slowfast.datasets.cv2_transform import scale, scale_boxes
from slowfast.datasets.utils import get_sequence
from slowfast.models import build_model
from slowfast.utils import misc
from slowfast.utils.env import pathmgr
from slowfast.visualization.utils import process_cv2_inputs
from slowfast.visualization.video_visualizer import VideoVisualizer
def parse_bboxes_file(
ann_filenames, ann_is_gt_box, detect_thresh, boxes_sample_rate=1
):
"""
Parse AVA bounding boxes files.
Args:
ann_filenames (list of str(s)): a list of AVA bounding boxes annotation files.
ann_is_gt_box (list of bools): a list of boolean to indicate whether the corresponding
ann_file is ground-truth. `ann_is_gt_box[i]` correspond to `ann_filenames[i]`.
detect_thresh (float): threshold for accepting predicted boxes, range [0, 1].
boxes_sample_rate (int): sample rate for test bounding boxes. Get 1 every `boxes_sample_rate`.
"""
all_boxes = {}
count = 0
unique_box_count = 0
for filename, is_gt_box in zip(ann_filenames, ann_is_gt_box):
with pathmgr.open(filename, "r") as f:
for line in f:
row = line.strip().split(",")
# When we use predicted boxes to train/eval, we need to
# ignore the boxes whose scores are below the threshold.
if not is_gt_box:
score = float(row[7])
if score < detect_thresh:
continue
video_name, frame_sec = row[0], int(row[1])
if frame_sec % boxes_sample_rate != 0:
continue
# Box with format [x1, y1, x2, y2] with a range of [0, 1] as float.
box_key = ",".join(row[2:6])
box = list(map(float, row[2:6]))
label = -1 if row[6] == "" else int(row[6])
if video_name not in all_boxes:
all_boxes[video_name] = {}
for sec in AVA_VALID_FRAMES:
all_boxes[video_name][sec] = {}
if box_key not in all_boxes[video_name][frame_sec]:
all_boxes[video_name][frame_sec][box_key] = [box, []]
unique_box_count += 1
all_boxes[video_name][frame_sec][box_key][1].append(label)
if label != -1:
count += 1
for video_name in all_boxes.keys():
for frame_sec in all_boxes[video_name].keys():
# Save in format of a list of [box_i, box_i_labels].
all_boxes[video_name][frame_sec] = list(
all_boxes[video_name][frame_sec].values()
)
return all_boxes, count, unique_box_count
The provided code snippet includes necessary dependencies for implementing the `load_boxes_labels` function. Write a Python function `def load_boxes_labels(cfg, video_name, fps, img_width, img_height)` to solve the following problem:
Loading boxes and labels from AVA bounding boxes csv files. Args: cfg (CfgNode): config. video_name (str): name of the given video. fps (int or float): frames per second of the input video/images folder. img_width (int): width of images in input video/images folder. img_height (int): height of images in input video/images folder. Returns: preds_boxes (dict): a dict which maps from `frame_idx` to a list of `boxes` and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes. gt_boxes (dict): if cfg.DEMO.GT_BOXES is given, return similar dict as all_pred_boxes but for ground-truth boxes.
Here is the function:
def load_boxes_labels(cfg, video_name, fps, img_width, img_height):
"""
Loading boxes and labels from AVA bounding boxes csv files.
Args:
cfg (CfgNode): config.
video_name (str): name of the given video.
fps (int or float): frames per second of the input video/images folder.
img_width (int): width of images in input video/images folder.
img_height (int): height of images in input video/images folder.
Returns:
preds_boxes (dict): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is
a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes.
gt_boxes (dict): if cfg.DEMO.GT_BOXES is given, return similar dict as
all_pred_boxes but for ground-truth boxes.
"""
starting_second = cfg.DEMO.STARTING_SECOND
def sec_to_frameidx(sec):
return (sec - starting_second) * fps
def process_bboxes_dict(dictionary):
"""
Replace all `keyframe_sec` in `dictionary` with `keyframe_idx` and
merge all [`box_coordinate`, `box_labels`] pairs into
[`all_boxes_coordinates`, `all_boxes_labels`] for each `keyframe_idx`.
Args:
dictionary (dict): a dictionary which maps `frame_sec` to a list of `box`.
Each `box` is a [`box_coord`, `box_labels`] where `box_coord` is the
coordinates of box and 'box_labels` are the corresponding
labels for the box.
Returns:
new_dict (dict): a dict which maps from `frame_idx` to a list of `boxes`
and `labels`. Each `box` in `boxes` is a list of 4 box coordinates. `labels[i]`
is a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes.
"""
# Replace all keyframe_sec with keyframe_idx.
new_dict = {}
for keyframe_sec, boxes_and_labels in dictionary.items():
# Ignore keyframes with no boxes
if len(boxes_and_labels) == 0:
continue
keyframe_idx = sec_to_frameidx(keyframe_sec)
boxes, labels = list(zip(*boxes_and_labels))
# Shift labels from [1, n_classes] to [0, n_classes - 1].
labels = [[i - 1 for i in box_label] for box_label in labels]
boxes = np.array(boxes)
boxes[:, [0, 2]] *= img_width
boxes[:, [1, 3]] *= img_height
new_dict[keyframe_idx] = [boxes.tolist(), list(labels)]
return new_dict
preds_boxes_path = cfg.DEMO.PREDS_BOXES
gt_boxes_path = cfg.DEMO.GT_BOXES
preds_boxes, _, _ = parse_bboxes_file(
ann_filenames=[preds_boxes_path],
ann_is_gt_box=[False],
detect_thresh=cfg.AVA.DETECTION_SCORE_THRESH,
boxes_sample_rate=1,
)
preds_boxes = preds_boxes[video_name]
if gt_boxes_path == "":
gt_boxes = None
else:
gt_boxes, _, _ = parse_bboxes_file(
ann_filenames=[gt_boxes_path],
ann_is_gt_box=[True],
detect_thresh=cfg.AVA.DETECTION_SCORE_THRESH,
boxes_sample_rate=1,
)
gt_boxes = gt_boxes[video_name]
preds_boxes = process_bboxes_dict(preds_boxes)
if gt_boxes is not None:
gt_boxes = process_bboxes_dict(gt_boxes)
return preds_boxes, gt_boxes | Loading boxes and labels from AVA bounding boxes csv files. Args: cfg (CfgNode): config. video_name (str): name of the given video. fps (int or float): frames per second of the input video/images folder. img_width (int): width of images in input video/images folder. img_height (int): height of images in input video/images folder. Returns: preds_boxes (dict): a dict which maps from `frame_idx` to a list of `boxes` and `labels`. Each `box` is a list of 4 box coordinates. `labels[i]` is a list of labels for `boxes[i]`. Note that label is -1 for predicted boxes. gt_boxes (dict): if cfg.DEMO.GT_BOXES is given, return similar dict as all_pred_boxes but for ground-truth boxes. |
22,320 | import itertools
import logging as log
import numpy as np
import matplotlib.pyplot as plt
import torch
from detectron2.utils.visualizer import Visualizer
import slowfast.utils.logging as logging
from slowfast.utils.misc import get_class_names
logger = logging.get_logger(__name__)
The provided code snippet includes necessary dependencies for implementing the `_create_text_labels` function. Write a Python function `def _create_text_labels(classes, scores, class_names, ground_truth=False)` to solve the following problem:
Create text labels. Args: classes (list[int]): a list of class ids for each example. scores (list[float] or None): list of scores for each example. class_names (list[str]): a list of class names, ordered by their ids. ground_truth (bool): whether the labels are ground truth. Returns: labels (list[str]): formatted text labels.
Here is the function:
def _create_text_labels(classes, scores, class_names, ground_truth=False):
"""
Create text labels.
Args:
classes (list[int]): a list of class ids for each example.
scores (list[float] or None): list of scores for each example.
class_names (list[str]): a list of class names, ordered by their ids.
ground_truth (bool): whether the labels are ground truth.
Returns:
labels (list[str]): formatted text labels.
"""
try:
labels = [class_names[i] for i in classes]
except IndexError:
logger.error("Class indices get out of range: {}".format(classes))
return None
if ground_truth:
labels = ["[{}] {}".format("GT", label) for label in labels]
elif scores is not None:
assert len(classes) == len(scores)
labels = [
"[{:.2f}] {}".format(s, label) for s, label in zip(scores, labels)
]
return labels | Create text labels. Args: classes (list[int]): a list of class ids for each example. scores (list[float] or None): list of scores for each example. class_names (list[str]): a list of class names, ordered by their ids. ground_truth (bool): whether the labels are ground truth. Returns: labels (list[str]): formatted text labels. |
22,321 | import math
from fvcore.common.config import CfgNode
from . import custom_config
def assert_and_infer_cfg(cfg):
# BN assertions.
if cfg.BN.USE_PRECISE_STATS:
assert cfg.BN.NUM_BATCHES_PRECISE >= 0
# TRAIN assertions.
assert cfg.TRAIN.CHECKPOINT_TYPE in ["pytorch", "caffe2"]
assert cfg.NUM_GPUS == 0 or cfg.TRAIN.BATCH_SIZE % cfg.NUM_GPUS == 0
# TEST assertions.
assert cfg.TEST.CHECKPOINT_TYPE in ["pytorch", "caffe2"]
assert cfg.NUM_GPUS == 0 or cfg.TEST.BATCH_SIZE % cfg.NUM_GPUS == 0
# RESNET assertions.
assert cfg.RESNET.NUM_GROUPS > 0
assert cfg.RESNET.WIDTH_PER_GROUP > 0
assert cfg.RESNET.WIDTH_PER_GROUP % cfg.RESNET.NUM_GROUPS == 0
# Execute LR scaling by num_shards.
if cfg.SOLVER.BASE_LR_SCALE_NUM_SHARDS:
cfg.SOLVER.BASE_LR *= cfg.NUM_SHARDS
cfg.SOLVER.WARMUP_START_LR *= cfg.NUM_SHARDS
cfg.SOLVER.COSINE_END_LR *= cfg.NUM_SHARDS
# General assertions.
assert cfg.SHARD_ID < cfg.NUM_SHARDS
return cfg | null |
22,322 |
def add_custom_config(_C):
# Add your own customized configs.
pass | null |
22,323 | import logging
import os
from collections import defaultdict
from slowfast.utils.env import pathmgr
logger = logging.getLogger(__name__)
pathmgr = PathManagerFactory.get(key="pyslowfast")
The provided code snippet includes necessary dependencies for implementing the `load_image_lists` function. Write a Python function `def load_image_lists(cfg, is_train)` to solve the following problem:
Loading image paths from corresponding files. Args: cfg (CfgNode): config. is_train (bool): if it is training dataset or not. Returns: image_paths (list[list]): a list of items. Each item (also a list) corresponds to one video and contains the paths of images for this video. video_idx_to_name (list): a list which stores video names.
Here is the function:
def load_image_lists(cfg, is_train):
"""
Loading image paths from corresponding files.
Args:
cfg (CfgNode): config.
is_train (bool): if it is training dataset or not.
Returns:
image_paths (list[list]): a list of items. Each item (also a list)
corresponds to one video and contains the paths of images for
this video.
video_idx_to_name (list): a list which stores video names.
"""
list_filenames = [
os.path.join(cfg.AVA.FRAME_LIST_DIR, filename)
for filename in (
cfg.AVA.TRAIN_LISTS if is_train else cfg.AVA.TEST_LISTS
)
]
image_paths = defaultdict(list)
video_name_to_idx = {}
video_idx_to_name = []
for list_filename in list_filenames:
with pathmgr.open(list_filename, "r") as f:
f.readline()
for line in f:
row = line.split()
# The format of each row should follow:
# original_vido_id video_id frame_id path labels.
assert len(row) == 5
video_name = row[0]
if video_name not in video_name_to_idx:
idx = len(video_name_to_idx)
video_name_to_idx[video_name] = idx
video_idx_to_name.append(video_name)
data_key = video_name_to_idx[video_name]
image_paths[data_key].append(
os.path.join(cfg.AVA.FRAME_DIR, row[3])
)
image_paths = [image_paths[i] for i in range(len(image_paths))]
logger.info(
"Finished loading image paths from: %s" % ", ".join(list_filenames)
)
return image_paths, video_idx_to_name | Loading image paths from corresponding files. Args: cfg (CfgNode): config. is_train (bool): if it is training dataset or not. Returns: image_paths (list[list]): a list of items. Each item (also a list) corresponds to one video and contains the paths of images for this video. video_idx_to_name (list): a list which stores video names. |
22,324 | import logging
import os
from collections import defaultdict
from slowfast.utils.env import pathmgr
logger = logging.getLogger(__name__)
def parse_bboxes_file(
ann_filenames, ann_is_gt_box, detect_thresh, boxes_sample_rate=1
):
"""
Parse AVA bounding boxes files.
Args:
ann_filenames (list of str(s)): a list of AVA bounding boxes annotation files.
ann_is_gt_box (list of bools): a list of boolean to indicate whether the corresponding
ann_file is ground-truth. `ann_is_gt_box[i]` correspond to `ann_filenames[i]`.
detect_thresh (float): threshold for accepting predicted boxes, range [0, 1].
boxes_sample_rate (int): sample rate for test bounding boxes. Get 1 every `boxes_sample_rate`.
"""
all_boxes = {}
count = 0
unique_box_count = 0
for filename, is_gt_box in zip(ann_filenames, ann_is_gt_box):
with pathmgr.open(filename, "r") as f:
for line in f:
row = line.strip().split(",")
# When we use predicted boxes to train/eval, we need to
# ignore the boxes whose scores are below the threshold.
if not is_gt_box:
score = float(row[7])
if score < detect_thresh:
continue
video_name, frame_sec = row[0], int(row[1])
if frame_sec % boxes_sample_rate != 0:
continue
# Box with format [x1, y1, x2, y2] with a range of [0, 1] as float.
box_key = ",".join(row[2:6])
box = list(map(float, row[2:6]))
label = -1 if row[6] == "" else int(row[6])
if video_name not in all_boxes:
all_boxes[video_name] = {}
for sec in AVA_VALID_FRAMES:
all_boxes[video_name][sec] = {}
if box_key not in all_boxes[video_name][frame_sec]:
all_boxes[video_name][frame_sec][box_key] = [box, []]
unique_box_count += 1
all_boxes[video_name][frame_sec][box_key][1].append(label)
if label != -1:
count += 1
for video_name in all_boxes.keys():
for frame_sec in all_boxes[video_name].keys():
# Save in format of a list of [box_i, box_i_labels].
all_boxes[video_name][frame_sec] = list(
all_boxes[video_name][frame_sec].values()
)
return all_boxes, count, unique_box_count
The provided code snippet includes necessary dependencies for implementing the `load_boxes_and_labels` function. Write a Python function `def load_boxes_and_labels(cfg, mode)` to solve the following problem:
Loading boxes and labels from csv files. Args: cfg (CfgNode): config. mode (str): 'train', 'val', or 'test' mode. Returns: all_boxes (dict): a dict which maps from `video_name` and `frame_sec` to a list of `box`. Each `box` is a [`box_coord`, `box_labels`] where `box_coord` is the coordinates of box and 'box_labels` are the corresponding labels for the box.
Here is the function:
def load_boxes_and_labels(cfg, mode):
"""
Loading boxes and labels from csv files.
Args:
cfg (CfgNode): config.
mode (str): 'train', 'val', or 'test' mode.
Returns:
all_boxes (dict): a dict which maps from `video_name` and
`frame_sec` to a list of `box`. Each `box` is a
[`box_coord`, `box_labels`] where `box_coord` is the
coordinates of box and 'box_labels` are the corresponding
labels for the box.
"""
gt_lists = cfg.AVA.TRAIN_GT_BOX_LISTS if mode == "train" else []
pred_lists = (
cfg.AVA.TRAIN_PREDICT_BOX_LISTS
if mode == "train"
else cfg.AVA.TEST_PREDICT_BOX_LISTS
)
ann_filenames = [
os.path.join(cfg.AVA.ANNOTATION_DIR, filename)
for filename in gt_lists + pred_lists
]
ann_is_gt_box = [True] * len(gt_lists) + [False] * len(pred_lists)
detect_thresh = cfg.AVA.DETECTION_SCORE_THRESH
# Only select frame_sec % 4 = 0 samples for validation if not
# set FULL_TEST_ON_VAL.
boxes_sample_rate = (
4 if mode == "val" and not cfg.AVA.FULL_TEST_ON_VAL else 1
)
all_boxes, count, unique_box_count = parse_bboxes_file(
ann_filenames=ann_filenames,
ann_is_gt_box=ann_is_gt_box,
detect_thresh=detect_thresh,
boxes_sample_rate=boxes_sample_rate,
)
logger.info(
"Finished loading annotations from: %s" % ", ".join(ann_filenames)
)
logger.info("Detection threshold: {}".format(detect_thresh))
logger.info("Number of unique boxes: %d" % unique_box_count)
logger.info("Number of annotations: %d" % count)
return all_boxes | Loading boxes and labels from csv files. Args: cfg (CfgNode): config. mode (str): 'train', 'val', or 'test' mode. Returns: all_boxes (dict): a dict which maps from `video_name` and `frame_sec` to a list of `box`. Each `box` is a [`box_coord`, `box_labels`] where `box_coord` is the coordinates of box and 'box_labels` are the corresponding labels for the box. |
22,325 | import logging
import os
from collections import defaultdict
from slowfast.utils.env import pathmgr
logger = logging.getLogger(__name__)
FPS = 30
AVA_VALID_FRAMES = range(902, 1799)
The provided code snippet includes necessary dependencies for implementing the `get_keyframe_data` function. Write a Python function `def get_keyframe_data(boxes_and_labels)` to solve the following problem:
Getting keyframe indices, boxes and labels in the dataset. Args: boxes_and_labels (list[dict]): a list which maps from video_idx to a dict. Each dict `frame_sec` to a list of boxes and corresponding labels. Returns: keyframe_indices (list): a list of indices of the keyframes. keyframe_boxes_and_labels (list[list[list]]): a list of list which maps from video_idx and sec_idx to a list of boxes and corresponding labels.
Here is the function:
def get_keyframe_data(boxes_and_labels):
"""
Getting keyframe indices, boxes and labels in the dataset.
Args:
boxes_and_labels (list[dict]): a list which maps from video_idx to a dict.
Each dict `frame_sec` to a list of boxes and corresponding labels.
Returns:
keyframe_indices (list): a list of indices of the keyframes.
keyframe_boxes_and_labels (list[list[list]]): a list of list which maps from
video_idx and sec_idx to a list of boxes and corresponding labels.
"""
def sec_to_frame(sec):
"""
Convert time index (in second) to frame index.
0: 900
30: 901
"""
return (sec - 900) * FPS
keyframe_indices = []
keyframe_boxes_and_labels = []
count = 0
for video_idx in range(len(boxes_and_labels)):
sec_idx = 0
keyframe_boxes_and_labels.append([])
for sec in boxes_and_labels[video_idx].keys():
if sec not in AVA_VALID_FRAMES:
continue
if len(boxes_and_labels[video_idx][sec]) > 0:
keyframe_indices.append(
(video_idx, sec_idx, sec, sec_to_frame(sec))
)
keyframe_boxes_and_labels[video_idx].append(
boxes_and_labels[video_idx][sec]
)
sec_idx += 1
count += 1
logger.info("%d keyframes used." % count)
return keyframe_indices, keyframe_boxes_and_labels | Getting keyframe indices, boxes and labels in the dataset. Args: boxes_and_labels (list[dict]): a list which maps from video_idx to a dict. Each dict `frame_sec` to a list of boxes and corresponding labels. Returns: keyframe_indices (list): a list of indices of the keyframes. keyframe_boxes_and_labels (list[list[list]]): a list of list which maps from video_idx and sec_idx to a list of boxes and corresponding labels. |
22,326 | import logging
import os
from collections import defaultdict
from slowfast.utils.env import pathmgr
The provided code snippet includes necessary dependencies for implementing the `get_num_boxes_used` function. Write a Python function `def get_num_boxes_used(keyframe_indices, keyframe_boxes_and_labels)` to solve the following problem:
Get total number of used boxes. Args: keyframe_indices (list): a list of indices of the keyframes. keyframe_boxes_and_labels (list[list[list]]): a list of list which maps from video_idx and sec_idx to a list of boxes and corresponding labels. Returns: count (int): total number of used boxes.
Here is the function:
def get_num_boxes_used(keyframe_indices, keyframe_boxes_and_labels):
"""
Get total number of used boxes.
Args:
keyframe_indices (list): a list of indices of the keyframes.
keyframe_boxes_and_labels (list[list[list]]): a list of list which maps from
video_idx and sec_idx to a list of boxes and corresponding labels.
Returns:
count (int): total number of used boxes.
"""
count = 0
for video_idx, sec_idx, _, _ in keyframe_indices:
count += len(keyframe_boxes_and_labels[video_idx][sec_idx])
return count | Get total number of used boxes. Args: keyframe_indices (list): a list of indices of the keyframes. keyframe_boxes_and_labels (list[list[list]]): a list of list which maps from video_idx and sec_idx to a list of boxes and corresponding labels. Returns: count (int): total number of used boxes. |
22,327 | import logging
import numpy as np
import os
import random
import time
from collections import defaultdict
import cv2
import torch
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
from slowfast.utils.env import pathmgr
from . import transform as transform
from .random_erasing import RandomErasing
from .transform import create_random_augment
logger = logging.getLogger(__name__)
pathmgr = PathManagerFactory.get(key="pyslowfast")
The provided code snippet includes necessary dependencies for implementing the `retry_load_images` function. Write a Python function `def retry_load_images(image_paths, retry=10, backend="pytorch")` to solve the following problem:
This function is to load images with support of retrying for failed load. Args: image_paths (list): paths of images needed to be loaded. retry (int, optional): maximum time of loading retrying. Defaults to 10. backend (str): `pytorch` or `cv2`. Returns: imgs (list): list of loaded images.
Here is the function:
def retry_load_images(image_paths, retry=10, backend="pytorch"):
"""
This function is to load images with support of retrying for failed load.
Args:
image_paths (list): paths of images needed to be loaded.
retry (int, optional): maximum time of loading retrying. Defaults to 10.
backend (str): `pytorch` or `cv2`.
Returns:
imgs (list): list of loaded images.
"""
for i in range(retry):
imgs = []
for image_path in image_paths:
with pathmgr.open(image_path, "rb") as f:
img_str = np.frombuffer(f.read(), np.uint8)
img = cv2.imdecode(img_str, flags=cv2.IMREAD_COLOR)
imgs.append(img)
if all(img is not None for img in imgs):
if backend == "pytorch":
imgs = torch.as_tensor(np.stack(imgs))
return imgs
else:
logger.warn("Reading failed. Will retry.")
time.sleep(1.0)
if i == retry - 1:
raise Exception("Failed to load images {}".format(image_paths)) | This function is to load images with support of retrying for failed load. Args: image_paths (list): paths of images needed to be loaded. retry (int, optional): maximum time of loading retrying. Defaults to 10. backend (str): `pytorch` or `cv2`. Returns: imgs (list): list of loaded images. |
22,328 | import logging
import numpy as np
import os
import random
import time
from collections import defaultdict
import cv2
import torch
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
from slowfast.utils.env import pathmgr
from . import transform as transform
from .random_erasing import RandomErasing
from .transform import create_random_augment
The provided code snippet includes necessary dependencies for implementing the `get_sequence` function. Write a Python function `def get_sequence(center_idx, half_len, sample_rate, num_frames)` to solve the following problem:
Sample frames among the corresponding clip. Args: center_idx (int): center frame idx for current clip half_len (int): half of the clip length sample_rate (int): sampling rate for sampling frames inside of the clip num_frames (int): number of expected sampled frames Returns: seq (list): list of indexes of sampled frames in this clip.
Here is the function:
def get_sequence(center_idx, half_len, sample_rate, num_frames):
"""
Sample frames among the corresponding clip.
Args:
center_idx (int): center frame idx for current clip
half_len (int): half of the clip length
sample_rate (int): sampling rate for sampling frames inside of the clip
num_frames (int): number of expected sampled frames
Returns:
seq (list): list of indexes of sampled frames in this clip.
"""
seq = list(range(center_idx - half_len, center_idx + half_len, sample_rate))
for seq_idx in range(len(seq)):
if seq[seq_idx] < 0:
seq[seq_idx] = 0
elif seq[seq_idx] >= num_frames:
seq[seq_idx] = num_frames - 1
return seq | Sample frames among the corresponding clip. Args: center_idx (int): center frame idx for current clip half_len (int): half of the clip length sample_rate (int): sampling rate for sampling frames inside of the clip num_frames (int): number of expected sampled frames Returns: seq (list): list of indexes of sampled frames in this clip. |
22,329 | import logging
import numpy as np
import os
import random
import time
from collections import defaultdict
import cv2
import torch
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
from slowfast.utils.env import pathmgr
from . import transform as transform
from .random_erasing import RandomErasing
from .transform import create_random_augment
def aggregate_labels(label_list):
"""
Join a list of label list.
Args:
labels (list): The input label list.
Returns:
labels (list): The joint list of all lists in input.
"""
all_labels = []
for labels in label_list:
for l in labels:
all_labels.append(l)
return list(set(all_labels))
The provided code snippet includes necessary dependencies for implementing the `convert_to_video_level_labels` function. Write a Python function `def convert_to_video_level_labels(labels)` to solve the following problem:
Aggregate annotations from all frames of a video to form video-level labels. Args: labels (list): The input label list. Returns: labels (list): Same as input, but with each label replaced by a video-level one.
Here is the function:
def convert_to_video_level_labels(labels):
"""
Aggregate annotations from all frames of a video to form video-level labels.
Args:
labels (list): The input label list.
Returns:
labels (list): Same as input, but with each label replaced by
a video-level one.
"""
for video_id in range(len(labels)):
video_level_labels = aggregate_labels(labels[video_id])
for i in range(len(labels[video_id])):
labels[video_id][i] = video_level_labels
return labels | Aggregate annotations from all frames of a video to form video-level labels. Args: labels (list): The input label list. Returns: labels (list): Same as input, but with each label replaced by a video-level one. |
22,330 | import logging
import numpy as np
import os
import random
import time
from collections import defaultdict
import cv2
import torch
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
from slowfast.utils.env import pathmgr
from . import transform as transform
from .random_erasing import RandomErasing
from .transform import create_random_augment
pathmgr = PathManagerFactory.get(key="pyslowfast")
The provided code snippet includes necessary dependencies for implementing the `load_image_lists` function. Write a Python function `def load_image_lists(frame_list_file, prefix="", return_list=False)` to solve the following problem:
Load image paths and labels from a "frame list". Each line of the frame list contains: `original_vido_id video_id frame_id path labels` Args: frame_list_file (string): path to the frame list. prefix (str): the prefix for the path. return_list (bool): if True, return a list. If False, return a dict. Returns: image_paths (list or dict): list of list containing path to each frame. If return_list is False, then return in a dict form. labels (list or dict): list of list containing label of each frame. If return_list is False, then return in a dict form.
Here is the function:
def load_image_lists(frame_list_file, prefix="", return_list=False):
"""
Load image paths and labels from a "frame list".
Each line of the frame list contains:
`original_vido_id video_id frame_id path labels`
Args:
frame_list_file (string): path to the frame list.
prefix (str): the prefix for the path.
return_list (bool): if True, return a list. If False, return a dict.
Returns:
image_paths (list or dict): list of list containing path to each frame.
If return_list is False, then return in a dict form.
labels (list or dict): list of list containing label of each frame.
If return_list is False, then return in a dict form.
"""
image_paths = defaultdict(list)
labels = defaultdict(list)
with pathmgr.open(frame_list_file, "r") as f:
assert f.readline().startswith("original_vido_id")
for line in f:
row = line.split()
# original_vido_id video_id frame_id path labels
assert len(row) == 5
video_name = row[0]
if prefix == "":
path = row[3]
else:
path = os.path.join(prefix, row[3])
image_paths[video_name].append(path)
frame_labels = row[-1].replace('"', "")
if frame_labels != "":
labels[video_name].append(
[int(x) for x in frame_labels.split(",")]
)
else:
labels[video_name].append([])
if return_list:
keys = image_paths.keys()
image_paths = [image_paths[key] for key in keys]
labels = [labels[key] for key in keys]
return image_paths, labels
return dict(image_paths), dict(labels) | Load image paths and labels from a "frame list". Each line of the frame list contains: `original_vido_id video_id frame_id path labels` Args: frame_list_file (string): path to the frame list. prefix (str): the prefix for the path. return_list (bool): if True, return a list. If False, return a dict. Returns: image_paths (list or dict): list of list containing path to each frame. If return_list is False, then return in a dict form. labels (list or dict): list of list containing label of each frame. If return_list is False, then return in a dict form. |
22,331 | import logging
import numpy as np
import os
import random
import time
from collections import defaultdict
import cv2
import torch
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
from slowfast.utils.env import pathmgr
from . import transform as transform
from .random_erasing import RandomErasing
from .transform import create_random_augment
The provided code snippet includes necessary dependencies for implementing the `get_random_sampling_rate` function. Write a Python function `def get_random_sampling_rate(long_cycle_sampling_rate, sampling_rate)` to solve the following problem:
When multigrid training uses a fewer number of frames, we randomly increase the sampling rate so that some clips cover the original span.
Here is the function:
def get_random_sampling_rate(long_cycle_sampling_rate, sampling_rate):
"""
When multigrid training uses a fewer number of frames, we randomly
increase the sampling rate so that some clips cover the original span.
"""
if long_cycle_sampling_rate > 0:
assert long_cycle_sampling_rate >= sampling_rate
return random.randint(sampling_rate, long_cycle_sampling_rate)
else:
return sampling_rate | When multigrid training uses a fewer number of frames, we randomly increase the sampling rate so that some clips cover the original span. |
22,332 | import logging
import numpy as np
import os
import random
import time
from collections import defaultdict
import cv2
import torch
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
from slowfast.utils.env import pathmgr
from . import transform as transform
from .random_erasing import RandomErasing
from .transform import create_random_augment
def spatial_sampling(
frames,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=224,
random_horizontal_flip=True,
inverse_uniform_sampling=False,
aspect_ratio=None,
scale=None,
motion_shift=False,
):
"""
Perform spatial sampling on the given video frames. If spatial_idx is
-1, perform random scale, random crop, and random flip on the given
frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling
with the given spatial_idx.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `num frames` x `height` x `width` x `channel`.
spatial_idx (int): if -1, perform random spatial sampling. If 0, 1,
or 2, perform left, center, right crop if width is larger than
height, and perform top, center, buttom crop if height is larger
than width.
min_scale (int): the minimal size of scaling.
max_scale (int): the maximal size of scaling.
crop_size (int): the size of height and width used to crop the
frames.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale,
max_scale].
aspect_ratio (list): Aspect ratio range for resizing.
scale (list): Scale range for resizing.
motion_shift (bool): Whether to apply motion shift for resizing.
Returns:
frames (tensor): spatially sampled frames.
"""
assert spatial_idx in [-1, 0, 1, 2]
if spatial_idx == -1:
if aspect_ratio is None and scale is None:
frames, _ = transform.random_short_side_scale_jitter(
images=frames,
min_size=min_scale,
max_size=max_scale,
inverse_uniform_sampling=inverse_uniform_sampling,
)
frames, _ = transform.random_crop(frames, crop_size)
else:
transform_func = (
transform.random_resized_crop_with_shift
if motion_shift
else transform.random_resized_crop
)
frames = transform_func(
images=frames,
target_height=crop_size,
target_width=crop_size,
scale=scale,
ratio=aspect_ratio,
)
if random_horizontal_flip:
frames, _ = transform.horizontal_flip(0.5, frames)
else:
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale}) == 1
frames, _ = transform.random_short_side_scale_jitter(
frames, min_scale, max_scale
)
frames, _ = transform.uniform_crop(frames, crop_size, spatial_idx)
return frames
def tensor_normalize(tensor, mean, std, func=None):
"""
Normalize a given tensor by subtracting the mean and dividing the std.
Args:
tensor (tensor): tensor to normalize.
mean (tensor or list): mean value to subtract.
std (tensor or list): std to divide.
"""
if tensor.dtype == torch.uint8:
tensor = tensor.float()
tensor = tensor / 255.0
if type(mean) == list:
mean = torch.tensor(mean)
if type(std) == list:
std = torch.tensor(std)
if func is not None:
tensor = func(tensor)
tensor = tensor - mean
tensor = tensor / std
return tensor
def _frame_to_list_img(frames):
img_list = [
transforms.ToPILImage()(frames[i]) for i in range(frames.size(0))
]
return img_list
def _list_img_to_frames(img_list):
img_list = [transforms.ToTensor()(img) for img in img_list]
return torch.stack(img_list)
class RandomErasing:
"""Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
This variant of RandomErasing is intended to be applied to either a batch
or single image tensor after it has been normalized by dataset mean and std.
Args:
probability: Probability that the Random Erasing operation will be performed.
min_area: Minimum percentage of erased area wrt input image area.
max_area: Maximum percentage of erased area wrt input image area.
min_aspect: Minimum aspect ratio of erased area.
mode: pixel color mode, one of 'const', 'rand', or 'pixel'
'const' - erase block is constant color of 0 for all channels
'rand' - erase block is same per-channel random (normal) color
'pixel' - erase block is per-pixel random (normal) color
max_count: maximum number of erasing blocks per image, area per box is scaled by count.
per-image count is randomly chosen between 1 and this value.
"""
def __init__(
self,
probability=0.5,
min_area=0.02,
max_area=1 / 3,
min_aspect=0.3,
max_aspect=None,
mode="const",
min_count=1,
max_count=None,
num_splits=0,
device="cuda",
cube=True,
):
self.probability = probability
self.min_area = min_area
self.max_area = max_area
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
self.min_count = min_count
self.max_count = max_count or min_count
self.num_splits = num_splits
mode = mode.lower()
self.rand_color = False
self.per_pixel = False
self.cube = cube
if mode == "rand":
self.rand_color = True # per block random normal
elif mode == "pixel":
self.per_pixel = True # per pixel random normal
else:
assert not mode or mode == "const"
self.device = device
def _erase(self, img, chan, img_h, img_w, dtype):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(10):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
img[:, top : top + h, left : left + w] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def _erase_cube(
self,
img,
batch_start,
batch_size,
chan,
img_h,
img_w,
dtype,
):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(100):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
for i in range(batch_start, batch_size):
img_instance = img[i]
img_instance[
:, top : top + h, left : left + w
] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def __call__(self, input):
if len(input.size()) == 3:
self._erase(input, *input.size(), input.dtype)
else:
batch_size, chan, img_h, img_w = input.size()
# skip first slice of batch if num_splits is set (for clean portion of samples)
batch_start = (
batch_size // self.num_splits if self.num_splits > 1 else 0
)
if self.cube:
self._erase_cube(
input,
batch_start,
batch_size,
chan,
img_h,
img_w,
input.dtype,
)
else:
for i in range(batch_start, batch_size):
self._erase(input[i], chan, img_h, img_w, input.dtype)
return input
def create_random_augment(
input_size,
auto_augment=None,
interpolation="bilinear",
):
"""
Get video randaug transform.
Args:
input_size: The size of the input video in tuple.
auto_augment: Parameters for randaug. An example:
"rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number
of operations to apply).
interpolation: Interpolation method.
"""
if isinstance(input_size, tuple):
img_size = input_size[-2:]
else:
img_size = input_size
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, tuple):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = {"translate_const": int(img_size_min * 0.45)}
if interpolation and interpolation != "random":
aa_params["interpolation"] = _pil_interp(interpolation)
if auto_augment.startswith("rand"):
return transforms.Compose(
[rand_augment_transform(auto_augment, aa_params)]
)
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `aug_frame` function. Write a Python function `def aug_frame( cfg, mode, rand_erase, frames, spatial_sample_index, min_scale, max_scale, crop_size, )` to solve the following problem:
Perform augmentations on the given video frames, including random augmentation, normalization, spatial sampling and optional random erasing. Args: cfg (CfgNode): configs. mode (string): Options includes `train`, `val`, or `test` mode. rand_erase (bool): if performing random erasing. frames (tensor): frames of images sampled from the video. The dimension is `num frames` x `height` x `width` x `channel`. spatial_sample_index (int): if -1, perform random spatial sampling. If 0, 1, or 2, perform left, center, right crop if width is larger thanheight, and perform top, center, buttom crop if height is larger than width. min_scale (int): the minimal size of scaling. max_scale (int): the maximal size of scaling. crop_size (int): the size of height and width used to crop the frames. Returns: frames (tensor): spatially sampled frames.
Here is the function:
def aug_frame(
cfg,
mode,
rand_erase,
frames,
spatial_sample_index,
min_scale,
max_scale,
crop_size,
):
"""
Perform augmentations on the given video frames, including
random augmentation, normalization, spatial sampling and optional random
erasing.
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
rand_erase (bool): if performing random erasing.
frames (tensor): frames of images sampled from the video. The
dimension is `num frames` x `height` x `width` x `channel`.
spatial_sample_index (int): if -1, perform random spatial sampling.
If 0, 1, or 2, perform left, center, right crop if width is larger
thanheight, and perform top, center, buttom crop if height is larger
than width.
min_scale (int): the minimal size of scaling.
max_scale (int): the maximal size of scaling.
crop_size (int): the size of height and width used to crop the
frames.
Returns:
frames (tensor): spatially sampled frames.
"""
if cfg.AUG.AA_TYPE:
aug_transform = create_random_augment(
input_size=(frames.size(1), frames.size(2)),
auto_augment=cfg.AUG.AA_TYPE,
interpolation=cfg.AUG.INTERPOLATION,
)
# T H W C -> T C H W.
frames = frames.permute(0, 3, 1, 2)
list_img = _frame_to_list_img(frames)
list_img = aug_transform(list_img)
frames = _list_img_to_frames(list_img)
frames = frames.permute(0, 2, 3, 1)
frames = tensor_normalize(frames, cfg.DATA.MEAN, cfg.DATA.STD)
# T H W C -> C T H W.
frames = frames.permute(3, 0, 1, 2)
# Perform data augmentation.
scl, asp = (
cfg.DATA.TRAIN_JITTER_SCALES_RELATIVE,
cfg.DATA.TRAIN_JITTER_ASPECT_RELATIVE,
)
relative_scales = None if (mode not in ["train"] or len(scl) == 0) else scl
relative_aspect = None if (mode not in ["train"] or len(asp) == 0) else asp
frames = spatial_sampling(
frames,
spatial_idx=spatial_sample_index,
min_scale=min_scale,
max_scale=max_scale,
crop_size=crop_size,
random_horizontal_flip=cfg.DATA.RANDOM_FLIP,
inverse_uniform_sampling=cfg.DATA.INV_UNIFORM_SAMPLE,
aspect_ratio=relative_aspect,
scale=relative_scales,
motion_shift=cfg.DATA.TRAIN_JITTER_MOTION_SHIFT
if mode in ["train"]
else False,
)
if rand_erase:
erase_transform = RandomErasing(
cfg.AUG.RE_PROB,
mode=cfg.AUG.RE_MODE,
max_count=cfg.AUG.RE_COUNT,
num_splits=cfg.AUG.RE_COUNT,
device="cpu",
)
frames = frames.permute(1, 0, 2, 3)
frames = erase_transform(frames)
frames = frames.permute(1, 0, 2, 3)
return frames | Perform augmentations on the given video frames, including random augmentation, normalization, spatial sampling and optional random erasing. Args: cfg (CfgNode): configs. mode (string): Options includes `train`, `val`, or `test` mode. rand_erase (bool): if performing random erasing. frames (tensor): frames of images sampled from the video. The dimension is `num frames` x `height` x `width` x `channel`. spatial_sample_index (int): if -1, perform random spatial sampling. If 0, 1, or 2, perform left, center, right crop if width is larger thanheight, and perform top, center, buttom crop if height is larger than width. min_scale (int): the minimal size of scaling. max_scale (int): the maximal size of scaling. crop_size (int): the size of height and width used to crop the frames. Returns: frames (tensor): spatially sampled frames. |
22,333 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def _check_args_tf(kwargs):
if "fillcolor" in kwargs and _PIL_VER < (5, 0):
kwargs.pop("fillcolor")
kwargs["resample"] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs
) | null |
22,334 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def _check_args_tf(kwargs):
if "fillcolor" in kwargs and _PIL_VER < (5, 0):
kwargs.pop("fillcolor")
kwargs["resample"] = _interpolation(kwargs)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs
) | null |
22,335 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def _check_args_tf(kwargs):
if "fillcolor" in kwargs and _PIL_VER < (5, 0):
kwargs.pop("fillcolor")
kwargs["resample"] = _interpolation(kwargs)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
) | null |
22,336 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def _check_args_tf(kwargs):
if "fillcolor" in kwargs and _PIL_VER < (5, 0):
kwargs.pop("fillcolor")
kwargs["resample"] = _interpolation(kwargs)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
) | null |
22,337 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def _check_args_tf(kwargs):
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
) | null |
22,338 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def _check_args_tf(kwargs):
if "fillcolor" in kwargs and _PIL_VER < (5, 0):
kwargs.pop("fillcolor")
kwargs["resample"] = _interpolation(kwargs)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
) | null |
22,339 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_PIL_VER = tuple([int(x) for x in PIL.__version__.split(".")[:2]])
def _check_args_tf(kwargs):
if "fillcolor" in kwargs and _PIL_VER < (5, 0):
kwargs.pop("fillcolor")
kwargs["resample"] = _interpolation(kwargs)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0],
-rotn_center[1] - post_trans[1],
matrix,
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs["resample"]) | null |
22,340 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def auto_contrast(img, **__):
return ImageOps.autocontrast(img) | null |
22,341 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def invert(img, **__):
return ImageOps.invert(img) | null |
22,342 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def equalize(img, **__):
return ImageOps.equalize(img) | null |
22,343 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh) | null |
22,344 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img | null |
22,345 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
return ImageOps.posterize(img, bits_to_keep) | null |
22,346 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor) | null |
22,347 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor) | null |
22,348 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor) | null |
22,349 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor) | null |
22,350 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_MAX_LEVEL = 10.0
def _randomly_negate(v):
def _rotate_level_to_arg(level, _hparams):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.0
level = _randomly_negate(level)
return (level,) | null |
22,351 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_MAX_LEVEL = 10.0
def _enhance_level_to_arg(level, _hparams):
# range [0.1, 1.9]
return ((level / _MAX_LEVEL) * 1.8 + 0.1,) | null |
22,352 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_MAX_LEVEL = 10.0
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _enhance_increasing_level_to_arg(level, _hparams):
# the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend
# range [0.1, 1.9]
level = (level / _MAX_LEVEL) * 0.9
level = 1.0 + _randomly_negate(level)
return (level,) | null |
22,353 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_MAX_LEVEL = 10.0
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _shear_level_to_arg(level, _hparams):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return (level,) | null |
22,354 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_MAX_LEVEL = 10.0
def _randomly_negate(v):
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams["translate_const"]
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return (level,) | null |
22,355 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_MAX_LEVEL = 10.0
def _randomly_negate(v):
def _translate_rel_level_to_arg(level, hparams):
# default range [-0.45, 0.45]
translate_pct = hparams.get("translate_pct", 0.45)
level = (level / _MAX_LEVEL) * translate_pct
level = _randomly_negate(level)
return (level,) | null |
22,356 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def _posterize_level_to_arg(level, _hparams):
# As per Tensorflow TPU EfficientNet impl
# range [0, 4], 'keep 0 up to 4 MSB of original image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4),)
def _posterize_increasing_level_to_arg(level, hparams):
# As per Tensorflow models research and UDA impl
# range [4, 0], 'keep 4 down to 0 MSB of original image',
# intensity/severity of augmentation increases with level
return (4 - _posterize_level_to_arg(level, hparams)[0],) | null |
22,357 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_MAX_LEVEL = 10.0
def _posterize_original_level_to_arg(level, _hparams):
# As per original AutoAugment paper description
# range [4, 8], 'keep 4 up to 8 MSB of image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4) + 4,) | null |
22,358 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
def _solarize_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 256),)
def _solarize_increasing_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation increases with level
return (256 - _solarize_level_to_arg(level, _hparams)[0],) | null |
22,359 | import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_MAX_LEVEL = 10.0
def _solarize_add_level_to_arg(level, _hparams):
# range [0, 110]
return (int((level / _MAX_LEVEL) * 110),) | null |
22,360 | import av
The provided code snippet includes necessary dependencies for implementing the `get_video_container` function. Write a Python function `def get_video_container(path_to_vid, multi_thread_decode=False, backend="pyav")` to solve the following problem:
Given the path to the video, return the pyav video container. Args: path_to_vid (str): path to the video. multi_thread_decode (bool): if True, perform multi-thread decoding. backend (str): decoder backend, options include `pyav` and `torchvision`, default is `pyav`. Returns: container (container): video container.
Here is the function:
def get_video_container(path_to_vid, multi_thread_decode=False, backend="pyav"):
"""
Given the path to the video, return the pyav video container.
Args:
path_to_vid (str): path to the video.
multi_thread_decode (bool): if True, perform multi-thread decoding.
backend (str): decoder backend, options include `pyav` and
`torchvision`, default is `pyav`.
Returns:
container (container): video container.
"""
if backend == "torchvision":
with open(path_to_vid, "rb") as fp:
container = fp.read()
return container
elif backend == "pyav":
container = av.open(path_to_vid)
if multi_thread_decode:
# Enable multiple threads for decoding.
container.streams.video[0].thread_type = "AUTO"
return container
else:
raise NotImplementedError("Unknown backend {}".format(backend)) | Given the path to the video, return the pyav video container. Args: path_to_vid (str): path to the video. multi_thread_decode (bool): if True, perform multi-thread decoding. backend (str): decoder backend, options include `pyav` and `torchvision`, default is `pyav`. Returns: container (container): video container. |
22,361 | import numpy as np
import torch
def convert_to_one_hot(targets, num_classes, on_value=1.0, off_value=0.0):
"""
This function converts target class indices to one-hot vectors, given the
number of classes.
Args:
targets (loader): Class labels.
num_classes (int): Total number of classes.
on_value (float): Target Value for ground truth class.
off_value (float): Target Value for other classes.This value is used for
label smoothing.
"""
targets = targets.long().view(-1, 1)
return torch.full(
(targets.size()[0], num_classes), off_value, device=targets.device
).scatter_(1, targets, on_value)
The provided code snippet includes necessary dependencies for implementing the `mixup_target` function. Write a Python function `def mixup_target(target, num_classes, lam=1.0, smoothing=0.0)` to solve the following problem:
This function converts target class indices to one-hot vectors, given the number of classes. Args: targets (loader): Class labels. num_classes (int): Total number of classes. lam (float): lamba value for mixup/cutmix. smoothing (float): Label smoothing value.
Here is the function:
def mixup_target(target, num_classes, lam=1.0, smoothing=0.0):
"""
This function converts target class indices to one-hot vectors, given the
number of classes.
Args:
targets (loader): Class labels.
num_classes (int): Total number of classes.
lam (float): lamba value for mixup/cutmix.
smoothing (float): Label smoothing value.
"""
off_value = smoothing / num_classes
on_value = 1.0 - smoothing + off_value
target1 = convert_to_one_hot(
target,
num_classes,
on_value=on_value,
off_value=off_value,
)
target2 = convert_to_one_hot(
target.flip(0),
num_classes,
on_value=on_value,
off_value=off_value,
)
return target1 * lam + target2 * (1.0 - lam) | This function converts target class indices to one-hot vectors, given the number of classes. Args: targets (loader): Class labels. num_classes (int): Total number of classes. lam (float): lamba value for mixup/cutmix. smoothing (float): Label smoothing value. |
22,362 | import numpy as np
import torch
def rand_bbox(img_shape, lam, margin=0.0, count=None):
"""
Generates a random square bbox based on lambda value.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
The provided code snippet includes necessary dependencies for implementing the `get_cutmix_bbox` function. Write a Python function `def get_cutmix_bbox(img_shape, lam, correct_lam=True, count=None)` to solve the following problem:
Generates the box coordinates for cutmix. Args: img_shape (tuple): Image shape as tuple lam (float): Cutmix lambda value correct_lam (bool): Apply lambda correction when cutmix bbox clipped by image borders. count (int): Number of bbox to generate
Here is the function:
def get_cutmix_bbox(img_shape, lam, correct_lam=True, count=None):
"""
Generates the box coordinates for cutmix.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
correct_lam (bool): Apply lambda correction when cutmix bbox clipped by
image borders.
count (int): Number of bbox to generate
"""
yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)
if correct_lam:
bbox_area = (yu - yl) * (xu - xl)
lam = 1.0 - bbox_area / float(img_shape[-2] * img_shape[-1])
return (yl, yu, xl, xu), lam | Generates the box coordinates for cutmix. Args: img_shape (tuple): Image shape as tuple lam (float): Cutmix lambda value correct_lam (bool): Apply lambda correction when cutmix bbox clipped by image borders. count (int): Number of bbox to generate |
22,363 | import math
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `clip_boxes_to_image` function. Write a Python function `def clip_boxes_to_image(boxes, height, width)` to solve the following problem:
Clip the boxes with the height and width of the image size. Args: boxes (ndarray): bounding boxes to peform crop. The dimension is `num boxes` x 4. height (int): the height of the image. width (int): the width of the image. Returns: boxes (ndarray): cropped bounding boxes.
Here is the function:
def clip_boxes_to_image(boxes, height, width):
"""
Clip the boxes with the height and width of the image size.
Args:
boxes (ndarray): bounding boxes to peform crop. The dimension is
`num boxes` x 4.
height (int): the height of the image.
width (int): the width of the image.
Returns:
boxes (ndarray): cropped bounding boxes.
"""
boxes[:, [0, 2]] = np.minimum(
width - 1.0, np.maximum(0.0, boxes[:, [0, 2]])
)
boxes[:, [1, 3]] = np.minimum(
height - 1.0, np.maximum(0.0, boxes[:, [1, 3]])
)
return boxes | Clip the boxes with the height and width of the image size. Args: boxes (ndarray): bounding boxes to peform crop. The dimension is `num boxes` x 4. height (int): the height of the image. width (int): the width of the image. Returns: boxes (ndarray): cropped bounding boxes. |
22,364 | import math
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `random_short_side_scale_jitter_list` function. Write a Python function `def random_short_side_scale_jitter_list(images, min_size, max_size, boxes=None)` to solve the following problem:
Perform a spatial short scale jittering on the given images and corresponding boxes. Args: images (list): list of images to perform scale jitter. Dimension is `height` x `width` x `channel`. min_size (int): the minimal size to scale the frames. max_size (int): the maximal size to scale the frames. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: (list): the list of scaled images with dimension of `new height` x `new width` x `channel`. (ndarray or None): the scaled boxes with dimension of `num boxes` x 4.
Here is the function:
def random_short_side_scale_jitter_list(images, min_size, max_size, boxes=None):
"""
Perform a spatial short scale jittering on the given images and
corresponding boxes.
Args:
images (list): list of images to perform scale jitter. Dimension is
`height` x `width` x `channel`.
min_size (int): the minimal size to scale the frames.
max_size (int): the maximal size to scale the frames.
boxes (list): optional. Corresponding boxes to images. Dimension is
`num boxes` x 4.
Returns:
(list): the list of scaled images with dimension of
`new height` x `new width` x `channel`.
(ndarray or None): the scaled boxes with dimension of
`num boxes` x 4.
"""
size = int(round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size)))
height = images[0].shape[0]
width = images[0].shape[1]
if (width <= height and width == size) or (
height <= width and height == size
):
return images, boxes
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
if boxes is not None:
boxes = [
proposal * float(new_height) / height for proposal in boxes
]
else:
new_width = int(math.floor((float(width) / height) * size))
if boxes is not None:
boxes = [proposal * float(new_width) / width for proposal in boxes]
return (
[
cv2.resize(
image, (new_width, new_height), interpolation=cv2.INTER_LINEAR
).astype(np.float32)
for image in images
],
boxes,
) | Perform a spatial short scale jittering on the given images and corresponding boxes. Args: images (list): list of images to perform scale jitter. Dimension is `height` x `width` x `channel`. min_size (int): the minimal size to scale the frames. max_size (int): the maximal size to scale the frames. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: (list): the list of scaled images with dimension of `new height` x `new width` x `channel`. (ndarray or None): the scaled boxes with dimension of `num boxes` x 4. |
22,365 | import math
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `scale_boxes` function. Write a Python function `def scale_boxes(size, boxes, height, width)` to solve the following problem:
Scale the short side of the box to size. Args: size (int): size to scale the image. boxes (ndarray): bounding boxes to peform scale. The dimension is `num boxes` x 4. height (int): the height of the image. width (int): the width of the image. Returns: boxes (ndarray): scaled bounding boxes.
Here is the function:
def scale_boxes(size, boxes, height, width):
"""
Scale the short side of the box to size.
Args:
size (int): size to scale the image.
boxes (ndarray): bounding boxes to peform scale. The dimension is
`num boxes` x 4.
height (int): the height of the image.
width (int): the width of the image.
Returns:
boxes (ndarray): scaled bounding boxes.
"""
if (width <= height and width == size) or (
height <= width and height == size
):
return boxes
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
boxes *= float(new_height) / height
else:
new_width = int(math.floor((float(width) / height) * size))
boxes *= float(new_width) / width
return boxes | Scale the short side of the box to size. Args: size (int): size to scale the image. boxes (ndarray): bounding boxes to peform scale. The dimension is `num boxes` x 4. height (int): the height of the image. width (int): the width of the image. Returns: boxes (ndarray): scaled bounding boxes. |
22,366 | import math
import numpy as np
import cv2
def flip_boxes(boxes, im_width):
"""
Horizontally flip the boxes.
Args:
boxes (array): box to flip.
im_width (int): width of the image.
Returns:
boxes_flipped (array): flipped box.
"""
boxes_flipped = boxes.copy()
boxes_flipped[:, 0::4] = im_width - boxes[:, 2::4] - 1
boxes_flipped[:, 2::4] = im_width - boxes[:, 0::4] - 1
return boxes_flipped
The provided code snippet includes necessary dependencies for implementing the `horizontal_flip_list` function. Write a Python function `def horizontal_flip_list(prob, images, order="CHW", boxes=None)` to solve the following problem:
Horizontally flip the list of image and optional boxes. Args: prob (float): probability to flip. image (list): ilist of images to perform short side scale. Dimension is `height` x `width` x `channel` or `channel` x `height` x `width`. order (str): order of the `height`, `channel` and `width`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: (ndarray): the scaled image with dimension of `height` x `width` x `channel`. (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4.
Here is the function:
def horizontal_flip_list(prob, images, order="CHW", boxes=None):
"""
Horizontally flip the list of image and optional boxes.
Args:
prob (float): probability to flip.
image (list): ilist of images to perform short side scale. Dimension is
`height` x `width` x `channel` or `channel` x `height` x `width`.
order (str): order of the `height`, `channel` and `width`.
boxes (list): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
(ndarray): the scaled image with dimension of
`height` x `width` x `channel`.
(list): optional. Corresponding boxes to images. Dimension is
`num boxes` x 4.
"""
_, width, _ = images[0].shape
if np.random.uniform() < prob:
if boxes is not None:
boxes = [flip_boxes(proposal, width) for proposal in boxes]
if order == "CHW":
out_images = []
for image in images:
image = np.asarray(image).swapaxes(2, 0)
image = image[::-1]
out_images.append(image.swapaxes(0, 2))
return out_images, boxes
elif order == "HWC":
return [cv2.flip(image, 1) for image in images], boxes
return images, boxes | Horizontally flip the list of image and optional boxes. Args: prob (float): probability to flip. image (list): ilist of images to perform short side scale. Dimension is `height` x `width` x `channel` or `channel` x `height` x `width`. order (str): order of the `height`, `channel` and `width`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: (ndarray): the scaled image with dimension of `height` x `width` x `channel`. (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. |
22,367 | import math
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `spatial_shift_crop_list` function. Write a Python function `def spatial_shift_crop_list(size, images, spatial_shift_pos, boxes=None)` to solve the following problem:
Perform left, center, or right crop of the given list of images. Args: size (int): size to crop. image (list): ilist of images to perform short side scale. Dimension is `height` x `width` x `channel` or `channel` x `height` x `width`. spatial_shift_pos (int): option includes 0 (left), 1 (middle), and 2 (right) crop. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (ndarray): the cropped list of images with dimension of `height` x `width` x `channel`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4.
Here is the function:
def spatial_shift_crop_list(size, images, spatial_shift_pos, boxes=None):
"""
Perform left, center, or right crop of the given list of images.
Args:
size (int): size to crop.
image (list): ilist of images to perform short side scale. Dimension is
`height` x `width` x `channel` or `channel` x `height` x `width`.
spatial_shift_pos (int): option includes 0 (left), 1 (middle), and
2 (right) crop.
boxes (list): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (ndarray): the cropped list of images with dimension of
`height` x `width` x `channel`.
boxes (list): optional. Corresponding boxes to images. Dimension is
`num boxes` x 4.
"""
assert spatial_shift_pos in [0, 1, 2]
height = images[0].shape[0]
width = images[0].shape[1]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_shift_pos == 0:
y_offset = 0
elif spatial_shift_pos == 2:
y_offset = height - size
else:
if spatial_shift_pos == 0:
x_offset = 0
elif spatial_shift_pos == 2:
x_offset = width - size
cropped = [
image[y_offset : y_offset + size, x_offset : x_offset + size, :]
for image in images
]
assert cropped[0].shape[0] == size, "Image height not cropped properly"
assert cropped[0].shape[1] == size, "Image width not cropped properly"
if boxes is not None:
for i in range(len(boxes)):
boxes[i][:, [0, 2]] -= x_offset
boxes[i][:, [1, 3]] -= y_offset
return cropped, boxes | Perform left, center, or right crop of the given list of images. Args: size (int): size to crop. image (list): ilist of images to perform short side scale. Dimension is `height` x `width` x `channel` or `channel` x `height` x `width`. spatial_shift_pos (int): option includes 0 (left), 1 (middle), and 2 (right) crop. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (ndarray): the cropped list of images with dimension of `height` x `width` x `channel`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. |
22,368 | import math
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `CHW2HWC` function. Write a Python function `def CHW2HWC(image)` to solve the following problem:
Transpose the dimension from `channel` x `height` x `width` to `height` x `width` x `channel`. Args: image (array): image to transpose. Returns (array): transposed image.
Here is the function:
def CHW2HWC(image):
"""
Transpose the dimension from `channel` x `height` x `width` to
`height` x `width` x `channel`.
Args:
image (array): image to transpose.
Returns
(array): transposed image.
"""
return image.transpose([1, 2, 0]) | Transpose the dimension from `channel` x `height` x `width` to `height` x `width` x `channel`. Args: image (array): image to transpose. Returns (array): transposed image. |
22,369 | import math
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `HWC2CHW` function. Write a Python function `def HWC2CHW(image)` to solve the following problem:
Transpose the dimension from `height` x `width` x `channel` to `channel` x `height` x `width`. Args: image (array): image to transpose. Returns (array): transposed image.
Here is the function:
def HWC2CHW(image):
"""
Transpose the dimension from `height` x `width` x `channel` to
`channel` x `height` x `width`.
Args:
image (array): image to transpose.
Returns
(array): transposed image.
"""
return image.transpose([2, 0, 1]) | Transpose the dimension from `height` x `width` x `channel` to `channel` x `height` x `width`. Args: image (array): image to transpose. Returns (array): transposed image. |
22,370 | import math
import numpy as np
import cv2
def saturation_list(var, images):
"""
Perform color saturation on the list of given images.
Args:
var (float): variance.
images (list): list of images to perform color saturation.
Returns:
(list): list of images that performed color saturation.
"""
alpha = 1.0 + np.random.uniform(-var, var)
out_images = []
for image in images:
img_gray = grayscale(image)
out_images.append(blend(image, img_gray, alpha))
return out_images
def brightness_list(var, images):
"""
Perform color brightness on the given list of images.
Args:
var (float): variance.
images (list): list of images to perform color brightness.
Returns:
(array): list of images that performed color brightness.
"""
alpha = 1.0 + np.random.uniform(-var, var)
out_images = []
for image in images:
img_bright = np.zeros(image.shape).astype(image.dtype)
out_images.append(blend(image, img_bright, alpha))
return out_images
def contrast_list(var, images):
"""
Perform color contrast on the given list of images.
Args:
var (float): variance.
images (list): list of images to perform color contrast.
Returns:
(array): image that performed color contrast.
"""
alpha = 1.0 + np.random.uniform(-var, var)
out_images = []
for image in images:
img_gray = grayscale(image)
img_gray.fill(np.mean(img_gray[0]))
out_images.append(blend(image, img_gray, alpha))
return out_images
The provided code snippet includes necessary dependencies for implementing the `color_jitter_list` function. Write a Python function `def color_jitter_list( images, img_brightness=0, img_contrast=0, img_saturation=0 )` to solve the following problem:
Perform color jitter on the list of images. Args: images (list): list of images to perform color jitter. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: images (list): the jittered list of images.
Here is the function:
def color_jitter_list(
images, img_brightness=0, img_contrast=0, img_saturation=0
):
"""
Perform color jitter on the list of images.
Args:
images (list): list of images to perform color jitter.
img_brightness (float): jitter ratio for brightness.
img_contrast (float): jitter ratio for contrast.
img_saturation (float): jitter ratio for saturation.
Returns:
images (list): the jittered list of images.
"""
jitter = []
if img_brightness != 0:
jitter.append("brightness")
if img_contrast != 0:
jitter.append("contrast")
if img_saturation != 0:
jitter.append("saturation")
if len(jitter) > 0:
order = np.random.permutation(np.arange(len(jitter)))
for idx in range(0, len(jitter)):
if jitter[order[idx]] == "brightness":
images = brightness_list(img_brightness, images)
elif jitter[order[idx]] == "contrast":
images = contrast_list(img_contrast, images)
elif jitter[order[idx]] == "saturation":
images = saturation_list(img_saturation, images)
return images | Perform color jitter on the list of images. Args: images (list): list of images to perform color jitter. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: images (list): the jittered list of images. |
22,371 | import math
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `lighting_list` function. Write a Python function `def lighting_list(imgs, alphastd, eigval, eigvec, alpha=None)` to solve the following problem:
Perform AlexNet-style PCA jitter on the given list of images. Args: images (list): list of images to perform lighting jitter. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (list): the list of jittered images.
Here is the function:
def lighting_list(imgs, alphastd, eigval, eigvec, alpha=None):
"""
Perform AlexNet-style PCA jitter on the given list of images.
Args:
images (list): list of images to perform lighting jitter.
alphastd (float): jitter ratio for PCA jitter.
eigval (list): eigenvalues for PCA jitter.
eigvec (list[list]): eigenvectors for PCA jitter.
Returns:
out_images (list): the list of jittered images.
"""
if alphastd == 0:
return imgs
# generate alpha1, alpha2, alpha3
alpha = np.random.normal(0, alphastd, size=(1, 3))
eig_vec = np.array(eigvec)
eig_val = np.reshape(eigval, (1, 3))
rgb = np.sum(
eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0),
axis=1,
)
out_images = []
for img in imgs:
for idx in range(img.shape[0]):
img[idx] = img[idx] + rgb[2 - idx]
out_images.append(img)
return out_images | Perform AlexNet-style PCA jitter on the given list of images. Args: images (list): list of images to perform lighting jitter. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (list): the list of jittered images. |
22,372 | import math
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `color_normalization` function. Write a Python function `def color_normalization(image, mean, stddev)` to solve the following problem:
Perform color normalization on the image with the given mean and stddev. Args: image (array): image to perform color normalization. mean (float): mean value to subtract. stddev (float): stddev to devide.
Here is the function:
def color_normalization(image, mean, stddev):
"""
Perform color normalization on the image with the given mean and stddev.
Args:
image (array): image to perform color normalization.
mean (float): mean value to subtract.
stddev (float): stddev to devide.
"""
# Input image should in format of CHW
assert len(mean) == image.shape[0], "channel mean not computed properly"
assert len(stddev) == image.shape[0], "channel stddev not computed properly"
for idx in range(image.shape[0]):
image[idx] = image[idx] - mean[idx]
image[idx] = image[idx] / stddev[idx]
return image | Perform color normalization on the image with the given mean and stddev. Args: image (array): image to perform color normalization. mean (float): mean value to subtract. stddev (float): stddev to devide. |
22,373 | import math
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `horizontal_flip` function. Write a Python function `def horizontal_flip(prob, image, order="CHW")` to solve the following problem:
Horizontally flip the image. Args: prob (float): probability to flip. image (array): image to pad. order (str): order of the `height`, `channel` and `width`. Returns: img (array): flipped image.
Here is the function:
def horizontal_flip(prob, image, order="CHW"):
"""
Horizontally flip the image.
Args:
prob (float): probability to flip.
image (array): image to pad.
order (str): order of the `height`, `channel` and `width`.
Returns:
img (array): flipped image.
"""
assert order in ["CHW", "HWC"], "order {} is not supported".format(order)
if np.random.uniform() < prob:
if order == "CHW":
image = image[:, :, ::-1]
elif order == "HWC":
image = image[:, ::-1, :]
else:
raise NotImplementedError("Unknown order {}".format(order))
return image | Horizontally flip the image. Args: prob (float): probability to flip. image (array): image to pad. order (str): order of the `height`, `channel` and `width`. Returns: img (array): flipped image. |
22,374 | import math
import numpy as np
import cv2
def pad_image(image, pad_size, order="CHW"):
"""
Pad the given image with the size of pad_size.
Args:
image (array): image to pad.
pad_size (int): size to pad.
order (str): order of the `height`, `channel` and `width`.
Returns:
img (array): padded image.
"""
if order == "CHW":
img = np.pad(
image,
((0, 0), (pad_size, pad_size), (pad_size, pad_size)),
mode=str("constant"),
)
elif order == "HWC":
img = np.pad(
image,
((pad_size, pad_size), (pad_size, pad_size), (0, 0)),
mode=str("constant"),
)
return img
def crop_boxes(boxes, x_offset, y_offset):
"""
Crop the boxes given the offsets.
Args:
boxes (array): boxes to crop.
x_offset (int): offset on x.
y_offset (int): offset on y.
"""
boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return boxes
The provided code snippet includes necessary dependencies for implementing the `random_crop_list` function. Write a Python function `def random_crop_list(images, size, pad_size=0, order="CHW", boxes=None)` to solve the following problem:
Perform random crop on a list of images. Args: images (list): list of images to perform random crop. size (int): size to crop. pad_size (int): padding size. order (str): order of the `height`, `channel` and `width`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (ndarray): the cropped list of images with dimension of `height` x `width` x `channel`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4.
Here is the function:
def random_crop_list(images, size, pad_size=0, order="CHW", boxes=None):
"""
Perform random crop on a list of images.
Args:
images (list): list of images to perform random crop.
size (int): size to crop.
pad_size (int): padding size.
order (str): order of the `height`, `channel` and `width`.
boxes (list): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (ndarray): the cropped list of images with dimension of
`height` x `width` x `channel`.
boxes (list): optional. Corresponding boxes to images. Dimension is
`num boxes` x 4.
"""
# explicitly dealing processing per image order to avoid flipping images.
if pad_size > 0:
images = [
pad_image(pad_size=pad_size, image=image, order=order)
for image in images
]
# image format should be CHW.
if order == "CHW":
if images[0].shape[1] == size and images[0].shape[2] == size:
return images, boxes
height = images[0].shape[1]
width = images[0].shape[2]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = [
image[:, y_offset : y_offset + size, x_offset : x_offset + size]
for image in images
]
assert cropped[0].shape[1] == size, "Image not cropped properly"
assert cropped[0].shape[2] == size, "Image not cropped properly"
elif order == "HWC":
if images[0].shape[0] == size and images[0].shape[1] == size:
return images, boxes
height = images[0].shape[0]
width = images[0].shape[1]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = [
image[y_offset : y_offset + size, x_offset : x_offset + size, :]
for image in images
]
assert cropped[0].shape[0] == size, "Image not cropped properly"
assert cropped[0].shape[1] == size, "Image not cropped properly"
if boxes is not None:
boxes = [crop_boxes(proposal, x_offset, y_offset) for proposal in boxes]
return cropped, boxes | Perform random crop on a list of images. Args: images (list): list of images to perform random crop. size (int): size to crop. pad_size (int): padding size. order (str): order of the `height`, `channel` and `width`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (ndarray): the cropped list of images with dimension of `height` x `width` x `channel`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. |
22,375 | import math
import numpy as np
import cv2
def scale(size, image):
"""
Scale the short side of the image to size.
Args:
size (int): size to scale the image.
image (array): image to perform short side scale. Dimension is
`height` x `width` x `channel`.
Returns:
(ndarray): the scaled image with dimension of
`height` x `width` x `channel`.
"""
height = image.shape[0]
width = image.shape[1]
if (width <= height and width == size) or (
height <= width and height == size
):
return image
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
else:
new_width = int(math.floor((float(width) / height) * size))
img = cv2.resize(
image, (new_width, new_height), interpolation=cv2.INTER_LINEAR
)
return img.astype(np.float32)
The provided code snippet includes necessary dependencies for implementing the `random_scale_jitter` function. Write a Python function `def random_scale_jitter(image, min_size, max_size)` to solve the following problem:
Perform ResNet style random scale jittering: randomly select the scale from [1/max_size, 1/min_size]. Args: image (array): image to perform random scale. min_size (int): min size to scale. max_size (int) max size to scale. Returns: image (array): scaled image.
Here is the function:
def random_scale_jitter(image, min_size, max_size):
"""
Perform ResNet style random scale jittering: randomly select the scale from
[1/max_size, 1/min_size].
Args:
image (array): image to perform random scale.
min_size (int): min size to scale.
max_size (int) max size to scale.
Returns:
image (array): scaled image.
"""
img_scale = int(
round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))
)
image = scale(img_scale, image)
return image | Perform ResNet style random scale jittering: randomly select the scale from [1/max_size, 1/min_size]. Args: image (array): image to perform random scale. min_size (int): min size to scale. max_size (int) max size to scale. Returns: image (array): scaled image. |
22,376 | import math
import numpy as np
import cv2
def scale(size, image):
"""
Scale the short side of the image to size.
Args:
size (int): size to scale the image.
image (array): image to perform short side scale. Dimension is
`height` x `width` x `channel`.
Returns:
(ndarray): the scaled image with dimension of
`height` x `width` x `channel`.
"""
height = image.shape[0]
width = image.shape[1]
if (width <= height and width == size) or (
height <= width and height == size
):
return image
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
else:
new_width = int(math.floor((float(width) / height) * size))
img = cv2.resize(
image, (new_width, new_height), interpolation=cv2.INTER_LINEAR
)
return img.astype(np.float32)
The provided code snippet includes necessary dependencies for implementing the `random_scale_jitter_list` function. Write a Python function `def random_scale_jitter_list(images, min_size, max_size)` to solve the following problem:
Perform ResNet style random scale jittering on a list of image: randomly select the scale from [1/max_size, 1/min_size]. Note that all the image will share the same scale. Args: images (list): list of images to perform random scale. min_size (int): min size to scale. max_size (int) max size to scale. Returns: images (list): list of scaled image.
Here is the function:
def random_scale_jitter_list(images, min_size, max_size):
"""
Perform ResNet style random scale jittering on a list of image: randomly
select the scale from [1/max_size, 1/min_size]. Note that all the image
will share the same scale.
Args:
images (list): list of images to perform random scale.
min_size (int): min size to scale.
max_size (int) max size to scale.
Returns:
images (list): list of scaled image.
"""
img_scale = int(
round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))
)
return [scale(img_scale, image) for image in images] | Perform ResNet style random scale jittering on a list of image: randomly select the scale from [1/max_size, 1/min_size]. Note that all the image will share the same scale. Args: images (list): list of images to perform random scale. min_size (int): min size to scale. max_size (int) max size to scale. Returns: images (list): list of scaled image. |
22,377 | import math
import numpy as np
import cv2
def scale(size, image):
"""
Scale the short side of the image to size.
Args:
size (int): size to scale the image.
image (array): image to perform short side scale. Dimension is
`height` x `width` x `channel`.
Returns:
(ndarray): the scaled image with dimension of
`height` x `width` x `channel`.
"""
height = image.shape[0]
width = image.shape[1]
if (width <= height and width == size) or (
height <= width and height == size
):
return image
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
else:
new_width = int(math.floor((float(width) / height) * size))
img = cv2.resize(
image, (new_width, new_height), interpolation=cv2.INTER_LINEAR
)
return img.astype(np.float32)
def center_crop(size, image):
"""
Perform center crop on input images.
Args:
size (int): size of the cropped height and width.
image (array): the image to perform center crop.
"""
height = image.shape[0]
width = image.shape[1]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
cropped = image[y_offset : y_offset + size, x_offset : x_offset + size, :]
assert cropped.shape[0] == size, "Image height not cropped properly"
assert cropped.shape[1] == size, "Image width not cropped properly"
return cropped
The provided code snippet includes necessary dependencies for implementing the `random_sized_crop` function. Write a Python function `def random_sized_crop(image, size, area_frac=0.08)` to solve the following problem:
Perform random sized cropping on the given image. Random crop with size 8% - 100% image area and aspect ratio in [3/4, 4/3]. Args: image (array): image to crop. size (int): size to crop. area_frac (float): area of fraction. Returns: (array): cropped image.
Here is the function:
def random_sized_crop(image, size, area_frac=0.08):
"""
Perform random sized cropping on the given image. Random crop with size
8% - 100% image area and aspect ratio in [3/4, 4/3].
Args:
image (array): image to crop.
size (int): size to crop.
area_frac (float): area of fraction.
Returns:
(array): cropped image.
"""
for _ in range(0, 10):
height = image.shape[0]
width = image.shape[1]
area = height * width
target_area = np.random.uniform(area_frac, 1.0) * area
aspect_ratio = np.random.uniform(3.0 / 4.0, 4.0 / 3.0)
w = int(round(math.sqrt(float(target_area) * aspect_ratio)))
h = int(round(math.sqrt(float(target_area) / aspect_ratio)))
if np.random.uniform() < 0.5:
w, h = h, w
if h <= height and w <= width:
if height == h:
y_offset = 0
else:
y_offset = np.random.randint(0, height - h)
if width == w:
x_offset = 0
else:
x_offset = np.random.randint(0, width - w)
y_offset = int(y_offset)
x_offset = int(x_offset)
cropped = image[y_offset : y_offset + h, x_offset : x_offset + w, :]
assert (
cropped.shape[0] == h and cropped.shape[1] == w
), "Wrong crop size"
cropped = cv2.resize(
cropped, (size, size), interpolation=cv2.INTER_LINEAR
)
return cropped.astype(np.float32)
return center_crop(size, scale(size, image)) | Perform random sized cropping on the given image. Random crop with size 8% - 100% image area and aspect ratio in [3/4, 4/3]. Args: image (array): image to crop. size (int): size to crop. area_frac (float): area of fraction. Returns: (array): cropped image. |
22,378 | import math
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `lighting` function. Write a Python function `def lighting(img, alphastd, eigval, eigvec)` to solve the following problem:
Perform AlexNet-style PCA jitter on the given image. Args: image (array): list of images to perform lighting jitter. alphastd (float): jitter ratio for PCA jitter. eigval (array): eigenvalues for PCA jitter. eigvec (list): eigenvectors for PCA jitter. Returns: img (tensor): the jittered image.
Here is the function:
def lighting(img, alphastd, eigval, eigvec):
"""
Perform AlexNet-style PCA jitter on the given image.
Args:
image (array): list of images to perform lighting jitter.
alphastd (float): jitter ratio for PCA jitter.
eigval (array): eigenvalues for PCA jitter.
eigvec (list): eigenvectors for PCA jitter.
Returns:
img (tensor): the jittered image.
"""
if alphastd == 0:
return img
# generate alpha1, alpha2, alpha3.
alpha = np.random.normal(0, alphastd, size=(1, 3))
eig_vec = np.array(eigvec)
eig_val = np.reshape(eigval, (1, 3))
rgb = np.sum(
eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0),
axis=1,
)
for idx in range(img.shape[0]):
img[idx] = img[idx] + rgb[2 - idx]
return img | Perform AlexNet-style PCA jitter on the given image. Args: image (array): list of images to perform lighting jitter. alphastd (float): jitter ratio for PCA jitter. eigval (array): eigenvalues for PCA jitter. eigvec (list): eigenvectors for PCA jitter. Returns: img (tensor): the jittered image. |
22,379 | import math
import numpy as np
import cv2
def scale(size, image):
"""
Scale the short side of the image to size.
Args:
size (int): size to scale the image.
image (array): image to perform short side scale. Dimension is
`height` x `width` x `channel`.
Returns:
(ndarray): the scaled image with dimension of
`height` x `width` x `channel`.
"""
height = image.shape[0]
width = image.shape[1]
if (width <= height and width == size) or (
height <= width and height == size
):
return image
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
else:
new_width = int(math.floor((float(width) / height) * size))
img = cv2.resize(
image, (new_width, new_height), interpolation=cv2.INTER_LINEAR
)
return img.astype(np.float32)
def center_crop(size, image):
"""
Perform center crop on input images.
Args:
size (int): size of the cropped height and width.
image (array): the image to perform center crop.
"""
height = image.shape[0]
width = image.shape[1]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
cropped = image[y_offset : y_offset + size, x_offset : x_offset + size, :]
assert cropped.shape[0] == size, "Image height not cropped properly"
assert cropped.shape[1] == size, "Image width not cropped properly"
return cropped
The provided code snippet includes necessary dependencies for implementing the `random_sized_crop_list` function. Write a Python function `def random_sized_crop_list(images, size, crop_area_fraction=0.08)` to solve the following problem:
Perform random sized cropping on the given list of images. Random crop with size 8% - 100% image area and aspect ratio in [3/4, 4/3]. Args: images (list): image to crop. size (int): size to crop. area_frac (float): area of fraction. Returns: (list): list of cropped image.
Here is the function:
def random_sized_crop_list(images, size, crop_area_fraction=0.08):
"""
Perform random sized cropping on the given list of images. Random crop with
size 8% - 100% image area and aspect ratio in [3/4, 4/3].
Args:
images (list): image to crop.
size (int): size to crop.
area_frac (float): area of fraction.
Returns:
(list): list of cropped image.
"""
for _ in range(0, 10):
height = images[0].shape[0]
width = images[0].shape[1]
area = height * width
target_area = np.random.uniform(crop_area_fraction, 1.0) * area
aspect_ratio = np.random.uniform(3.0 / 4.0, 4.0 / 3.0)
w = int(round(math.sqrt(float(target_area) * aspect_ratio)))
h = int(round(math.sqrt(float(target_area) / aspect_ratio)))
if np.random.uniform() < 0.5:
w, h = h, w
if h <= height and w <= width:
if height == h:
y_offset = 0
else:
y_offset = np.random.randint(0, height - h)
if width == w:
x_offset = 0
else:
x_offset = np.random.randint(0, width - w)
y_offset = int(y_offset)
x_offset = int(x_offset)
croppsed_images = []
for image in images:
cropped = image[
y_offset : y_offset + h, x_offset : x_offset + w, :
]
assert (
cropped.shape[0] == h and cropped.shape[1] == w
), "Wrong crop size"
cropped = cv2.resize(
cropped, (size, size), interpolation=cv2.INTER_LINEAR
)
croppsed_images.append(cropped.astype(np.float32))
return croppsed_images
return [center_crop(size, scale(size, image)) for image in images] | Perform random sized cropping on the given list of images. Random crop with size 8% - 100% image area and aspect ratio in [3/4, 4/3]. Args: images (list): image to crop. size (int): size to crop. area_frac (float): area of fraction. Returns: (list): list of cropped image. |
22,380 | import math
import numpy as np
import cv2
def saturation(var, image):
"""
Perform color saturation on the given image.
Args:
var (float): variance.
image (array): image to perform color saturation.
Returns:
(array): image that performed color saturation.
"""
img_gray = grayscale(image)
alpha = 1.0 + np.random.uniform(-var, var)
return blend(image, img_gray, alpha)
def brightness(var, image):
"""
Perform color brightness on the given image.
Args:
var (float): variance.
image (array): image to perform color brightness.
Returns:
(array): image that performed color brightness.
"""
img_bright = np.zeros(image.shape).astype(image.dtype)
alpha = 1.0 + np.random.uniform(-var, var)
return blend(image, img_bright, alpha)
def contrast(var, image):
"""
Perform color contrast on the given image.
Args:
var (float): variance.
image (array): image to perform color contrast.
Returns:
(array): image that performed color contrast.
"""
img_gray = grayscale(image)
img_gray.fill(np.mean(img_gray[0]))
alpha = 1.0 + np.random.uniform(-var, var)
return blend(image, img_gray, alpha)
The provided code snippet includes necessary dependencies for implementing the `color_jitter` function. Write a Python function `def color_jitter(image, img_brightness=0, img_contrast=0, img_saturation=0)` to solve the following problem:
Perform color jitter on the given image. Args: image (array): image to perform color jitter. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: image (array): the jittered image.
Here is the function:
def color_jitter(image, img_brightness=0, img_contrast=0, img_saturation=0):
"""
Perform color jitter on the given image.
Args:
image (array): image to perform color jitter.
img_brightness (float): jitter ratio for brightness.
img_contrast (float): jitter ratio for contrast.
img_saturation (float): jitter ratio for saturation.
Returns:
image (array): the jittered image.
"""
jitter = []
if img_brightness != 0:
jitter.append("brightness")
if img_contrast != 0:
jitter.append("contrast")
if img_saturation != 0:
jitter.append("saturation")
if len(jitter) > 0:
order = np.random.permutation(np.arange(len(jitter)))
for idx in range(0, len(jitter)):
if jitter[order[idx]] == "brightness":
image = brightness(img_brightness, image)
elif jitter[order[idx]] == "contrast":
image = contrast(img_contrast, image)
elif jitter[order[idx]] == "saturation":
image = saturation(img_saturation, image)
return image | Perform color jitter on the given image. Args: image (array): image to perform color jitter. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: image (array): the jittered image. |
22,381 | import math
import random
import torch
def _get_pixels(
per_pixel, rand_color, patch_size, dtype=torch.float32, device="cuda"
):
# NOTE I've seen CUDA illegal memory access errors being caused by the normal_()
# paths, flip the order so normal is run on CPU if this becomes a problem
# Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508
if per_pixel:
return torch.empty(patch_size, dtype=dtype, device=device).normal_()
elif rand_color:
return torch.empty(
(patch_size[0], 1, 1), dtype=dtype, device=device
).normal_()
else:
return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) | null |
22,382 | import logging
import math
import numpy as np
import random
import torch
import torchvision as tv
import torchvision.transforms.functional as F
from PIL import Image, ImageFilter
from scipy.ndimage import gaussian_filter
from torchvision import transforms
from .rand_augment import rand_augment_transform
from .random_erasing import RandomErasing
The provided code snippet includes necessary dependencies for implementing the `clip_boxes_to_image` function. Write a Python function `def clip_boxes_to_image(boxes, height, width)` to solve the following problem:
Clip an array of boxes to an image with the given height and width. Args: boxes (ndarray): bounding boxes to perform clipping. Dimension is `num boxes` x 4. height (int): given image height. width (int): given image width. Returns: clipped_boxes (ndarray): the clipped boxes with dimension of `num boxes` x 4.
Here is the function:
def clip_boxes_to_image(boxes, height, width):
"""
Clip an array of boxes to an image with the given height and width.
Args:
boxes (ndarray): bounding boxes to perform clipping.
Dimension is `num boxes` x 4.
height (int): given image height.
width (int): given image width.
Returns:
clipped_boxes (ndarray): the clipped boxes with dimension of
`num boxes` x 4.
"""
clipped_boxes = boxes.copy()
clipped_boxes[:, [0, 2]] = np.minimum(
width - 1.0, np.maximum(0.0, boxes[:, [0, 2]])
)
clipped_boxes[:, [1, 3]] = np.minimum(
height - 1.0, np.maximum(0.0, boxes[:, [1, 3]])
)
return clipped_boxes | Clip an array of boxes to an image with the given height and width. Args: boxes (ndarray): bounding boxes to perform clipping. Dimension is `num boxes` x 4. height (int): given image height. width (int): given image width. Returns: clipped_boxes (ndarray): the clipped boxes with dimension of `num boxes` x 4. |
22,383 | import logging
import math
import numpy as np
import random
import torch
import torchvision as tv
import torchvision.transforms.functional as F
from PIL import Image, ImageFilter
from scipy.ndimage import gaussian_filter
from torchvision import transforms
from .rand_augment import rand_augment_transform
from .random_erasing import RandomErasing
The provided code snippet includes necessary dependencies for implementing the `lighting_jitter` function. Write a Python function `def lighting_jitter(images, alphastd, eigval, eigvec)` to solve the following problem:
Perform AlexNet-style PCA jitter on the given images. Args: images (tensor): images to perform lighting jitter. Dimension is `num frames` x `channel` x `height` x `width`. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`.
Here is the function:
def lighting_jitter(images, alphastd, eigval, eigvec):
"""
Perform AlexNet-style PCA jitter on the given images.
Args:
images (tensor): images to perform lighting jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
alphastd (float): jitter ratio for PCA jitter.
eigval (list): eigenvalues for PCA jitter.
eigvec (list[list]): eigenvectors for PCA jitter.
Returns:
out_images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if alphastd == 0:
return images
# generate alpha1, alpha2, alpha3.
alpha = np.random.normal(0, alphastd, size=(1, 3))
eig_vec = np.array(eigvec)
eig_val = np.reshape(eigval, (1, 3))
rgb = np.sum(
eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0),
axis=1,
)
out_images = torch.zeros_like(images)
if len(images.shape) == 3:
# C H W
channel_dim = 0
elif len(images.shape) == 4:
# T C H W
channel_dim = 1
else:
raise NotImplementedError(f"Unsupported dimension {len(images.shape)}")
for idx in range(images.shape[channel_dim]):
# C H W
if len(images.shape) == 3:
out_images[idx] = images[idx] + rgb[2 - idx]
# T C H W
elif len(images.shape) == 4:
out_images[:, idx] = images[:, idx] + rgb[2 - idx]
else:
raise NotImplementedError(
f"Unsupported dimension {len(images.shape)}"
)
return out_images | Perform AlexNet-style PCA jitter on the given images. Args: images (tensor): images to perform lighting jitter. Dimension is `num frames` x `channel` x `height` x `width`. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. |
22,384 | import logging
import math
import numpy as np
import random
import torch
import torchvision as tv
import torchvision.transforms.functional as F
from PIL import Image, ImageFilter
from scipy.ndimage import gaussian_filter
from torchvision import transforms
from .rand_augment import rand_augment_transform
from .random_erasing import RandomErasing
The provided code snippet includes necessary dependencies for implementing the `color_normalization` function. Write a Python function `def color_normalization(images, mean, stddev)` to solve the following problem:
Perform color nomration on the given images. Args: images (tensor): images to perform color normalization. Dimension is `num frames` x `channel` x `height` x `width`. mean (list): mean values for normalization. stddev (list): standard deviations for normalization. Returns: out_images (tensor): the noramlized images, the dimension is `num frames` x `channel` x `height` x `width`.
Here is the function:
def color_normalization(images, mean, stddev):
"""
Perform color nomration on the given images.
Args:
images (tensor): images to perform color normalization. Dimension is
`num frames` x `channel` x `height` x `width`.
mean (list): mean values for normalization.
stddev (list): standard deviations for normalization.
Returns:
out_images (tensor): the noramlized images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
if len(images.shape) == 3:
assert (
len(mean) == images.shape[0]
), "channel mean not computed properly"
assert (
len(stddev) == images.shape[0]
), "channel stddev not computed properly"
elif len(images.shape) == 4:
assert (
len(mean) == images.shape[1]
), "channel mean not computed properly"
assert (
len(stddev) == images.shape[1]
), "channel stddev not computed properly"
else:
raise NotImplementedError(f"Unsupported dimension {len(images.shape)}")
out_images = torch.zeros_like(images)
for idx in range(len(mean)):
# C H W
if len(images.shape) == 3:
out_images[idx] = (images[idx] - mean[idx]) / stddev[idx]
elif len(images.shape) == 4:
out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx]
else:
raise NotImplementedError(
f"Unsupported dimension {len(images.shape)}"
)
return out_images | Perform color nomration on the given images. Args: images (tensor): images to perform color normalization. Dimension is `num frames` x `channel` x `height` x `width`. mean (list): mean values for normalization. stddev (list): standard deviations for normalization. Returns: out_images (tensor): the noramlized images, the dimension is `num frames` x `channel` x `height` x `width`. |
22,385 | import logging
import math
import numpy as np
import random
import torch
import torchvision as tv
import torchvision.transforms.functional as F
from PIL import Image, ImageFilter
from scipy.ndimage import gaussian_filter
from torchvision import transforms
from .rand_augment import rand_augment_transform
from .random_erasing import RandomErasing
def _get_param_spatial_crop(
scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False
):
"""
Given scale, ratio, height and width, return sampled coordinates of the videos.
"""
for _ in range(num_repeat):
area = height * width
target_area = random.uniform(*scale) * area
if log_scale:
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
else:
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.uniform() < 0.5 and switch_hw:
w, h = h, w
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
The provided code snippet includes necessary dependencies for implementing the `random_sized_crop_img` function. Write a Python function `def random_sized_crop_img( im, size, jitter_scale=(0.08, 1.0), jitter_aspect=(3.0 / 4.0, 4.0 / 3.0), max_iter=10, )` to solve the following problem:
Performs Inception-style cropping (used for training).
Here is the function:
def random_sized_crop_img(
im,
size,
jitter_scale=(0.08, 1.0),
jitter_aspect=(3.0 / 4.0, 4.0 / 3.0),
max_iter=10,
):
"""
Performs Inception-style cropping (used for training).
"""
assert (
len(im.shape) == 3
), "Currently only support image for random_sized_crop"
h, w = im.shape[1:3]
i, j, h, w = _get_param_spatial_crop(
scale=jitter_scale,
ratio=jitter_aspect,
height=h,
width=w,
num_repeat=max_iter,
log_scale=False,
switch_hw=True,
)
cropped = im[:, i : i + h, j : j + w]
return torch.nn.functional.interpolate(
cropped.unsqueeze(0),
size=(size, size),
mode="bilinear",
align_corners=False,
).squeeze(0) | Performs Inception-style cropping (used for training). |
22,386 | import logging
import math
import numpy as np
import random
import torch
import torchvision as tv
import torchvision.transforms.functional as F
from PIL import Image, ImageFilter
from scipy.ndimage import gaussian_filter
from torchvision import transforms
from .rand_augment import rand_augment_transform
from .random_erasing import RandomErasing
def _pil_interp(method):
if method == "bicubic":
return Image.BICUBIC
elif method == "lanczos":
return Image.LANCZOS
elif method == "hamming":
return Image.HAMMING
else:
return Image.BILINEAR
class RandomResizedCropAndInterpolation:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation="bilinear",
):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
print("range should be of kind (min, max)")
if interpolation == "random":
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.scale = scale
self.ratio = ratio
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for _ in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = " ".join(
[_pil_interpolation_to_str[x] for x in self.interpolation]
)
else:
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + "(size={0}".format(self.size)
format_string += ", scale={0}".format(
tuple(round(s, 4) for s in self.scale)
)
format_string += ", ratio={0}".format(
tuple(round(r, 4) for r in self.ratio)
)
format_string += ", interpolation={0})".format(interpolate_str)
return format_string
def rand_augment_transform(config_str, hparams):
"""
RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719
Create a RandAugment transform
:param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by
dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining
sections, not order sepecific determine
'm' - integer magnitude of rand augment
'n' - integer num layers (number of transform ops selected per image)
'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)
'mstd' - float std deviation of magnitude noise applied
'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)
Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5
'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2
:param hparams: Other hparams (kwargs) for the RandAugmentation scheme
:return: A PyTorch compatible Transform
"""
magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10)
num_layers = 2 # default to 2 ops per image
weight_idx = None # default to no probability weights for op choice
transforms = _RAND_TRANSFORMS
config = config_str.split("-")
assert config[0] == "rand"
config = config[1:]
for c in config:
cs = re.split(r"(\d.*)", c)
if len(cs) < 2:
continue
key, val = cs[:2]
if key == "mstd":
# noise param injected via hparams for now
hparams.setdefault("magnitude_std", float(val))
elif key == "inc":
if bool(val):
transforms = _RAND_INCREASING_TRANSFORMS
elif key == "m":
magnitude = int(val)
elif key == "n":
num_layers = int(val)
elif key == "w":
weight_idx = int(val)
else:
assert NotImplementedError
ra_ops = rand_augment_ops(
magnitude=magnitude, hparams=hparams, transforms=transforms
)
choice_weights = (
None if weight_idx is None else _select_rand_weights(weight_idx)
)
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
class RandomErasing:
"""Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
This variant of RandomErasing is intended to be applied to either a batch
or single image tensor after it has been normalized by dataset mean and std.
Args:
probability: Probability that the Random Erasing operation will be performed.
min_area: Minimum percentage of erased area wrt input image area.
max_area: Maximum percentage of erased area wrt input image area.
min_aspect: Minimum aspect ratio of erased area.
mode: pixel color mode, one of 'const', 'rand', or 'pixel'
'const' - erase block is constant color of 0 for all channels
'rand' - erase block is same per-channel random (normal) color
'pixel' - erase block is per-pixel random (normal) color
max_count: maximum number of erasing blocks per image, area per box is scaled by count.
per-image count is randomly chosen between 1 and this value.
"""
def __init__(
self,
probability=0.5,
min_area=0.02,
max_area=1 / 3,
min_aspect=0.3,
max_aspect=None,
mode="const",
min_count=1,
max_count=None,
num_splits=0,
device="cuda",
cube=True,
):
self.probability = probability
self.min_area = min_area
self.max_area = max_area
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
self.min_count = min_count
self.max_count = max_count or min_count
self.num_splits = num_splits
mode = mode.lower()
self.rand_color = False
self.per_pixel = False
self.cube = cube
if mode == "rand":
self.rand_color = True # per block random normal
elif mode == "pixel":
self.per_pixel = True # per pixel random normal
else:
assert not mode or mode == "const"
self.device = device
def _erase(self, img, chan, img_h, img_w, dtype):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(10):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
img[:, top : top + h, left : left + w] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def _erase_cube(
self,
img,
batch_start,
batch_size,
chan,
img_h,
img_w,
dtype,
):
if random.random() > self.probability:
return
area = img_h * img_w
count = (
self.min_count
if self.min_count == self.max_count
else random.randint(self.min_count, self.max_count)
)
for _ in range(count):
for _ in range(100):
target_area = (
random.uniform(self.min_area, self.max_area) * area / count
)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img_w and h < img_h:
top = random.randint(0, img_h - h)
left = random.randint(0, img_w - w)
for i in range(batch_start, batch_size):
img_instance = img[i]
img_instance[
:, top : top + h, left : left + w
] = _get_pixels(
self.per_pixel,
self.rand_color,
(chan, h, w),
dtype=dtype,
device=self.device,
)
break
def __call__(self, input):
if len(input.size()) == 3:
self._erase(input, *input.size(), input.dtype)
else:
batch_size, chan, img_h, img_w = input.size()
# skip first slice of batch if num_splits is set (for clean portion of samples)
batch_start = (
batch_size // self.num_splits if self.num_splits > 1 else 0
)
if self.cube:
self._erase_cube(
input,
batch_start,
batch_size,
chan,
img_h,
img_w,
input.dtype,
)
else:
for i in range(batch_start, batch_size):
self._erase(input[i], chan, img_h, img_w, input.dtype)
return input
The provided code snippet includes necessary dependencies for implementing the `transforms_imagenet_train` function. Write a Python function `def transforms_imagenet_train( img_size=224, scale=None, ratio=None, hflip=0.5, vflip=0.0, color_jitter=0.4, auto_augment=None, interpolation="random", use_prefetcher=False, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), re_prob=0.0, re_mode="const", re_count=1, re_num_splits=0, separate=False, )` to solve the following problem:
If separate==True, the transforms are returned as a tuple of 3 separate transforms for use in a mixing dataset that passes * all data through the first (primary) transform, called the 'clean' data * a portion of the data through the secondary transform * normalizes and converts the branches above with the third, final transform
Here is the function:
def transforms_imagenet_train(
img_size=224,
scale=None,
ratio=None,
hflip=0.5,
vflip=0.0,
color_jitter=0.4,
auto_augment=None,
interpolation="random",
use_prefetcher=False,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
re_prob=0.0,
re_mode="const",
re_count=1,
re_num_splits=0,
separate=False,
):
"""
If separate==True, the transforms are returned as a tuple of 3 separate transforms
for use in a mixing dataset that passes
* all data through the first (primary) transform, called the 'clean' data
* a portion of the data through the secondary transform
* normalizes and converts the branches above with the third, final transform
"""
if isinstance(img_size, tuple):
img_size = img_size[-2:]
else:
img_size = img_size
scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range
ratio = tuple(
ratio or (3.0 / 4.0, 4.0 / 3.0)
) # default imagenet ratio range
primary_tfl = [
RandomResizedCropAndInterpolation(
img_size, scale=scale, ratio=ratio, interpolation=interpolation
)
]
if hflip > 0.0:
primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)]
if vflip > 0.0:
primary_tfl += [transforms.RandomVerticalFlip(p=vflip)]
secondary_tfl = []
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, tuple):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = dict(
translate_const=int(img_size_min * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
if interpolation and interpolation != "random":
aa_params["interpolation"] = _pil_interp(interpolation)
if auto_augment.startswith("rand"):
secondary_tfl += [rand_augment_transform(auto_augment, aa_params)]
elif auto_augment.startswith("augmix"):
raise NotImplementedError("Augmix not implemented")
else:
raise NotImplementedError("Auto aug not implemented")
elif color_jitter is not None:
# color jitter is enabled when not using AA
if isinstance(color_jitter, (list, tuple)):
# color jitter should be a 3-tuple/list if spec brightness/contrast/saturation
# or 4 if also augmenting hue
assert len(color_jitter) in (3, 4)
else:
# if it's a scalar, duplicate for brightness, contrast, and saturation, no hue
color_jitter = (float(color_jitter),) * 3
secondary_tfl += [transforms.ColorJitter(*color_jitter)]
final_tfl = []
final_tfl += [
transforms.ToTensor(),
transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)),
]
if re_prob > 0.0:
final_tfl.append(
RandomErasing(
re_prob,
mode=re_mode,
max_count=re_count,
num_splits=re_num_splits,
device="cpu",
cube=False,
)
)
if separate:
return (
transforms.Compose(primary_tfl),
transforms.Compose(secondary_tfl),
transforms.Compose(final_tfl),
)
else:
return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) | If separate==True, the transforms are returned as a tuple of 3 separate transforms for use in a mixing dataset that passes * all data through the first (primary) transform, called the 'clean' data * a portion of the data through the secondary transform * normalizes and converts the branches above with the third, final transform |
22,387 | import logging
import math
import numpy as np
import random
import torch
import torchvision as tv
import torchvision.transforms.functional as F
from PIL import Image, ImageFilter
from scipy.ndimage import gaussian_filter
from torchvision import transforms
from .rand_augment import rand_augment_transform
from .random_erasing import RandomErasing
def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0):
"""
Perfrom a color jittering on the input images. The channels of images
should be in order BGR.
Args:
images (tensor): images to perform color jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
img_brightness (float): jitter ratio for brightness.
img_contrast (float): jitter ratio for contrast.
img_saturation (float): jitter ratio for saturation.
Returns:
images (tensor): the jittered images, the dimension is
`num frames` x `channel` x `height` x `width`.
"""
jitter = []
if img_brightness != 0:
jitter.append("brightness")
if img_contrast != 0:
jitter.append("contrast")
if img_saturation != 0:
jitter.append("saturation")
if len(jitter) > 0:
order = np.random.permutation(np.arange(len(jitter)))
for idx in range(0, len(jitter)):
if jitter[order[idx]] == "brightness":
images = brightness_jitter(img_brightness, images)
elif jitter[order[idx]] == "contrast":
images = contrast_jitter(img_contrast, images)
elif jitter[order[idx]] == "saturation":
images = saturation_jitter(img_saturation, images)
return images
class GaussianBlur:
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
if len(self.sigma) == 2:
sigma = random.uniform(self.sigma[0], self.sigma[1])
elif len(self.sigma) == 1:
sigma = self.sigma[0]
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
def color_jitter_video_ssl(
frames,
bri_con_sat=[0.4] * 3,
hue=0.1,
p_convert_gray=0.0,
moco_v2_aug=False,
gaussan_sigma_min=[0.0, 0.1],
gaussan_sigma_max=[0.0, 2.0],
):
# T H W C -> C T H W.
frames = frames.permute(3, 0, 1, 2)
if moco_v2_aug:
color_jitter = tv.transforms.Compose(
[
tv.transforms.ToPILImage(),
tv.transforms.RandomApply(
[
tv.transforms.ColorJitter(
bri_con_sat[0], bri_con_sat[1], bri_con_sat[2], hue
)
],
p=0.8,
),
tv.transforms.RandomGrayscale(p=p_convert_gray),
tv.transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5),
tv.transforms.ToTensor(),
]
)
else:
color_jitter = tv.transforms.Compose(
[
tv.transforms.ToPILImage(),
tv.transforms.RandomGrayscale(p=p_convert_gray),
tv.transforms.ColorJitter(
bri_con_sat[0], bri_con_sat[1], bri_con_sat[2], hue
),
tv.transforms.ToTensor(),
]
)
c, t, h, w = frames.shape
frames = frames.view(c, t * h, w)
frames = color_jitter(frames)
frames = frames.view(c, t, h, w)
# C T H W -> T H W C.
frames = frames.permute(1, 2, 3, 0)
return frames | null |
22,388 | import functools
import os
from typing import Dict
import torch
from torch.utils.data import (
DistributedSampler,
RandomSampler,
SequentialSampler,
)
from torchvision.transforms import Compose, Lambda
from torchvision.transforms._transforms_video import (
NormalizeVideo,
RandomCropVideo,
RandomHorizontalFlipVideo,
)
import slowfast.utils.logging as logging
from pytorchvideo.data import (
Charades,
LabeledVideoDataset,
SSv2,
make_clip_sampler,
)
from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths
from pytorchvideo.transforms import (
ApplyTransformToKey,
RandomShortSideScale,
ShortSideScale,
UniformCropVideo,
UniformTemporalSubsample,
)
from . import utils as utils
from .build import DATASET_REGISTRY
logger = logging.get_logger(__name__)
class PTVDatasetWrapper(torch.utils.data.IterableDataset):
"""
Wrapper for PyTorchVideo datasets.
"""
def __init__(self, num_videos, clips_per_video, crops_per_clip, dataset):
"""
Construct the dataset.
Args:
num_vidoes (int): number of videos in the dataset.
clips_per_video (int): number of clips per video in the dataset.
dataset (torch.utils.data.IterableDataset): a PyTorchVideo dataset.
"""
self._clips_per_video = clips_per_video
self._crops_per_clip = crops_per_clip
self._num_videos = num_videos
self.dataset = dataset
def __next__(self):
"""
Retrieves the next clip from the dataset.
"""
return self.dataset.__next__()
def sampler(self):
"""
Returns:
(torch.utils.data.Sampler): video sampler for the dataset.
"""
return self.dataset.video_sampler
def __len__(self):
"""
Returns:
(int): the number of clips per replica in the IterableDataset.
"""
return len(self.sampler) * self._clips_per_video * self._crops_per_clip
def num_videos(self):
"""
Returns:
(int): the number of clips in total in the dataset.
"""
return self._num_videos * self._clips_per_video * self._crops_per_clip
def __iter__(self):
return self
class PackPathway(torch.nn.Module):
"""
Transform for converting video frames as a list of tensors. Each tensor
corresponding to a unique pathway.
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x: torch.Tensor):
return utils.pack_pathway_output(self.cfg, x)
class DictToTuple(torch.nn.Module):
"""
Transform for converting output from dict to a tuple following PySlowFast
dataset output format.
"""
def __init__(self, num_clips, num_crops):
super().__init__()
self._num_clips = num_clips
self._num_crops = num_crops
def forward(self, x: Dict[str, torch.Tensor]):
index = (
x["video_index"] * self._num_clips * self._num_crops
+ x["clip_index"] * self._num_crops
+ x["aug_index"]
)
return x["video"], x["label"], index, {}
def div255(x):
"""
Scale clip frames from [0, 255] to [0, 1].
Args:
x (Tensor): A tensor of the clip's RGB frames with shape:
(channel, time, height, width).
Returns:
x (Tensor): Scaled tensor by divide 255.
"""
return x / 255.0
The provided code snippet includes necessary dependencies for implementing the `Ptvkinetics` function. Write a Python function `def Ptvkinetics(cfg, mode)` to solve the following problem:
Construct the Kinetics video loader with a given csv file. The format of the csv file is: ``` path_to_video_1 label_1 path_to_video_2 label_2 ... path_to_video_N label_N ``` For `train` and `val` mode, a single clip is randomly sampled from every video with random cropping, scaling, and flipping. For `test` mode, multiple clips are uniformaly sampled from every video with center cropping. Args: cfg (CfgNode): configs. mode (string): Options includes `train`, `val`, or `test` mode. For the train and val mode, the data loader will take data from the train or val set, and sample one clip per video. For the test mode, the data loader will take data from test set, and sample multiple clips per video.
Here is the function:
def Ptvkinetics(cfg, mode):
"""
Construct the Kinetics video loader with a given csv file. The format of
the csv file is:
```
path_to_video_1 label_1
path_to_video_2 label_2
...
path_to_video_N label_N
```
For `train` and `val` mode, a single clip is randomly sampled from every video
with random cropping, scaling, and flipping. For `test` mode, multiple clips are
uniformaly sampled from every video with center cropping.
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
For the train and val mode, the data loader will take data
from the train or val set, and sample one clip per video.
For the test mode, the data loader will take data from test set,
and sample multiple clips per video.
"""
# Only support train, val, and test mode.
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported".format(mode)
logger.info("Constructing Ptvkinetics {}...".format(mode))
clip_duration = (
cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE / cfg.DATA.TARGET_FPS
)
path_to_file = os.path.join(
cfg.DATA.PATH_TO_DATA_DIR, "{}.csv".format(mode)
)
labeled_video_paths = LabeledVideoPaths.from_path(path_to_file)
num_videos = len(labeled_video_paths)
labeled_video_paths.path_prefix = cfg.DATA.PATH_PREFIX
logger.info(
"Constructing kinetics dataloader (size: {}) from {}".format(
num_videos, path_to_file
)
)
if mode in ["train", "val"]:
num_clips = 1
num_crops = 1
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
UniformTemporalSubsample(cfg.DATA.NUM_FRAMES),
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
RandomShortSideScale(
min_size=cfg.DATA.TRAIN_JITTER_SCALES[0],
max_size=cfg.DATA.TRAIN_JITTER_SCALES[1],
),
RandomCropVideo(cfg.DATA.TRAIN_CROP_SIZE),
]
+ (
[RandomHorizontalFlipVideo(p=0.5)]
if cfg.DATA.RANDOM_FLIP
else []
)
+ [PackPathway(cfg)]
),
),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler("random", clip_duration)
if cfg.NUM_GPUS > 1:
video_sampler = DistributedSampler
else:
video_sampler = (
RandomSampler if mode == "train" else SequentialSampler
)
else:
num_clips = cfg.TEST.NUM_ENSEMBLE_VIEWS
num_crops = cfg.TEST.NUM_SPATIAL_CROPS
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
UniformTemporalSubsample(cfg.DATA.NUM_FRAMES),
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
ShortSideScale(
size=cfg.DATA.TRAIN_JITTER_SCALES[0]
),
]
),
),
UniformCropVideo(size=cfg.DATA.TEST_CROP_SIZE),
ApplyTransformToKey(key="video", transform=PackPathway(cfg)),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler(
"constant_clips_per_video",
clip_duration,
num_clips,
num_crops,
)
video_sampler = (
DistributedSampler if cfg.NUM_GPUS > 1 else SequentialSampler
)
return PTVDatasetWrapper(
num_videos=num_videos,
clips_per_video=num_clips,
crops_per_clip=num_crops,
dataset=LabeledVideoDataset(
labeled_video_paths=labeled_video_paths,
clip_sampler=clip_sampler,
video_sampler=video_sampler,
transform=transform,
decode_audio=False,
decoder=cfg.DATA.DECODING_BACKEND,
),
) | Construct the Kinetics video loader with a given csv file. The format of the csv file is: ``` path_to_video_1 label_1 path_to_video_2 label_2 ... path_to_video_N label_N ``` For `train` and `val` mode, a single clip is randomly sampled from every video with random cropping, scaling, and flipping. For `test` mode, multiple clips are uniformaly sampled from every video with center cropping. Args: cfg (CfgNode): configs. mode (string): Options includes `train`, `val`, or `test` mode. For the train and val mode, the data loader will take data from the train or val set, and sample one clip per video. For the test mode, the data loader will take data from test set, and sample multiple clips per video. |
22,389 | import functools
import os
from typing import Dict
import torch
from torch.utils.data import (
DistributedSampler,
RandomSampler,
SequentialSampler,
)
from torchvision.transforms import Compose, Lambda
from torchvision.transforms._transforms_video import (
NormalizeVideo,
RandomCropVideo,
RandomHorizontalFlipVideo,
)
import slowfast.utils.logging as logging
from pytorchvideo.data import (
Charades,
LabeledVideoDataset,
SSv2,
make_clip_sampler,
)
from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths
from pytorchvideo.transforms import (
ApplyTransformToKey,
RandomShortSideScale,
ShortSideScale,
UniformCropVideo,
UniformTemporalSubsample,
)
from . import utils as utils
from .build import DATASET_REGISTRY
logger = logging.get_logger(__name__)
class PTVDatasetWrapper(torch.utils.data.IterableDataset):
"""
Wrapper for PyTorchVideo datasets.
"""
def __init__(self, num_videos, clips_per_video, crops_per_clip, dataset):
"""
Construct the dataset.
Args:
num_vidoes (int): number of videos in the dataset.
clips_per_video (int): number of clips per video in the dataset.
dataset (torch.utils.data.IterableDataset): a PyTorchVideo dataset.
"""
self._clips_per_video = clips_per_video
self._crops_per_clip = crops_per_clip
self._num_videos = num_videos
self.dataset = dataset
def __next__(self):
"""
Retrieves the next clip from the dataset.
"""
return self.dataset.__next__()
def sampler(self):
"""
Returns:
(torch.utils.data.Sampler): video sampler for the dataset.
"""
return self.dataset.video_sampler
def __len__(self):
"""
Returns:
(int): the number of clips per replica in the IterableDataset.
"""
return len(self.sampler) * self._clips_per_video * self._crops_per_clip
def num_videos(self):
"""
Returns:
(int): the number of clips in total in the dataset.
"""
return self._num_videos * self._clips_per_video * self._crops_per_clip
def __iter__(self):
return self
class PackPathway(torch.nn.Module):
"""
Transform for converting video frames as a list of tensors. Each tensor
corresponding to a unique pathway.
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x: torch.Tensor):
return utils.pack_pathway_output(self.cfg, x)
class DictToTuple(torch.nn.Module):
"""
Transform for converting output from dict to a tuple following PySlowFast
dataset output format.
"""
def __init__(self, num_clips, num_crops):
super().__init__()
self._num_clips = num_clips
self._num_crops = num_crops
def forward(self, x: Dict[str, torch.Tensor]):
index = (
x["video_index"] * self._num_clips * self._num_crops
+ x["clip_index"] * self._num_crops
+ x["aug_index"]
)
return x["video"], x["label"], index, {}
def div255(x):
"""
Scale clip frames from [0, 255] to [0, 1].
Args:
x (Tensor): A tensor of the clip's RGB frames with shape:
(channel, time, height, width).
Returns:
x (Tensor): Scaled tensor by divide 255.
"""
return x / 255.0
def process_charades_label(x, mode, num_classes):
"""
Process the video label for Charades dataset. Use video-level label for
training mode, otherwise use clip-level label. Then convert the label into
a binary vector.
Args:
x (dict): a video clip including label index.
mode (string): Options includes `train`, `val`, or `test` mode.
num_classes (int): Number of classes in the dataset.
Returns:
x (dict): video clip with updated label information.
"""
label = (
utils.aggregate_labels(x["label"])
if mode == "train"
else x["video_label"]
)
x["label"] = torch.as_tensor(utils.as_binary_vector(label, num_classes))
return x
def rgb2bgr(x):
"""
Convert clip frames from RGB mode to BRG mode.
Args:
x (Tensor): A tensor of the clip's RGB frames with shape:
(channel, time, height, width).
Returns:
x (Tensor): Converted tensor
"""
return x[[2, 1, 0], ...]
The provided code snippet includes necessary dependencies for implementing the `Ptvcharades` function. Write a Python function `def Ptvcharades(cfg, mode)` to solve the following problem:
Construct PyTorchVideo Charades video loader. Load Charades data (frame paths, labels, etc. ) to Charades Dataset object. The dataset could be downloaded from Chrades official website (https://allenai.org/plato/charades/). Please see datasets/DATASET.md for more information about the data format. For `train` and `val` mode, a single clip is randomly sampled from every video with random cropping, scaling, and flipping. For `test` mode, multiple clips are uniformaly sampled from every video with center cropping. Args: cfg (CfgNode): configs. mode (string): Options includes `train`, `val`, or `test` mode. For the train and val mode, the data loader will take data from the train or val set, and sample one clip per video. For the test mode, the data loader will take data from test set, and sample multiple clips per video.
Here is the function:
def Ptvcharades(cfg, mode):
"""
Construct PyTorchVideo Charades video loader.
Load Charades data (frame paths, labels, etc. ) to Charades Dataset object.
The dataset could be downloaded from Chrades official website
(https://allenai.org/plato/charades/).
Please see datasets/DATASET.md for more information about the data format.
For `train` and `val` mode, a single clip is randomly sampled from every video
with random cropping, scaling, and flipping. For `test` mode, multiple clips are
uniformaly sampled from every video with center cropping.
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
For the train and val mode, the data loader will take data
from the train or val set, and sample one clip per video.
For the test mode, the data loader will take data from test set,
and sample multiple clips per video.
"""
# Only support train, val, and test mode.
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported".format(mode)
logger.info("Constructing Ptvcharades {}...".format(mode))
clip_duration = (
(cfg.DATA.NUM_FRAMES - 1) * cfg.DATA.SAMPLING_RATE + 1
) / cfg.DATA.TARGET_FPS
if mode in ["train", "val"]:
num_clips = 1
num_crops = 1
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
RandomShortSideScale(
min_size=cfg.DATA.TRAIN_JITTER_SCALES[0],
max_size=cfg.DATA.TRAIN_JITTER_SCALES[1],
),
RandomCropVideo(cfg.DATA.TRAIN_CROP_SIZE),
Lambda(rgb2bgr),
]
+ (
[RandomHorizontalFlipVideo(p=0.5)]
if cfg.DATA.RANDOM_FLIP
else []
)
+ [PackPathway(cfg)]
),
),
Lambda(
functools.partial(
process_charades_label,
mode=mode,
num_classes=cfg.MODEL.NUM_CLASSES,
)
),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler("random", clip_duration)
if cfg.NUM_GPUS > 1:
video_sampler = DistributedSampler
else:
video_sampler = (
RandomSampler if mode == "train" else SequentialSampler
)
else:
num_clips = cfg.TEST.NUM_ENSEMBLE_VIEWS
num_crops = cfg.TEST.NUM_SPATIAL_CROPS
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
ShortSideScale(size=cfg.DATA.TEST_CROP_SIZE),
]
),
),
UniformCropVideo(size=cfg.DATA.TEST_CROP_SIZE),
Lambda(
functools.partial(
process_charades_label,
mode=mode,
num_classes=cfg.MODEL.NUM_CLASSES,
)
),
ApplyTransformToKey(
key="video",
transform=Compose(
[Lambda(rgb2bgr), PackPathway(cfg)],
),
),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler(
"constant_clips_per_video",
clip_duration,
num_clips,
num_crops,
)
video_sampler = (
DistributedSampler if cfg.NUM_GPUS > 1 else SequentialSampler
)
data_path = os.path.join(cfg.DATA.PATH_TO_DATA_DIR, "{}.csv".format(mode))
dataset = Charades(
data_path=data_path,
clip_sampler=clip_sampler,
video_sampler=video_sampler,
transform=transform,
video_path_prefix=cfg.DATA.PATH_PREFIX,
frames_per_clip=cfg.DATA.NUM_FRAMES,
)
logger.info(
"Constructing charades dataloader (size: {}) from {}".format(
len(dataset._path_to_videos), data_path
)
)
return PTVDatasetWrapper(
num_videos=len(dataset._path_to_videos),
clips_per_video=num_clips,
crops_per_clip=num_crops,
dataset=dataset,
) | Construct PyTorchVideo Charades video loader. Load Charades data (frame paths, labels, etc. ) to Charades Dataset object. The dataset could be downloaded from Chrades official website (https://allenai.org/plato/charades/). Please see datasets/DATASET.md for more information about the data format. For `train` and `val` mode, a single clip is randomly sampled from every video with random cropping, scaling, and flipping. For `test` mode, multiple clips are uniformaly sampled from every video with center cropping. Args: cfg (CfgNode): configs. mode (string): Options includes `train`, `val`, or `test` mode. For the train and val mode, the data loader will take data from the train or val set, and sample one clip per video. For the test mode, the data loader will take data from test set, and sample multiple clips per video. |
22,390 | import functools
import os
from typing import Dict
import torch
from torch.utils.data import (
DistributedSampler,
RandomSampler,
SequentialSampler,
)
from torchvision.transforms import Compose, Lambda
from torchvision.transforms._transforms_video import (
NormalizeVideo,
RandomCropVideo,
RandomHorizontalFlipVideo,
)
import slowfast.utils.logging as logging
from pytorchvideo.data import (
Charades,
LabeledVideoDataset,
SSv2,
make_clip_sampler,
)
from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths
from pytorchvideo.transforms import (
ApplyTransformToKey,
RandomShortSideScale,
ShortSideScale,
UniformCropVideo,
UniformTemporalSubsample,
)
from . import utils as utils
from .build import DATASET_REGISTRY
logger = logging.get_logger(__name__)
class PTVDatasetWrapper(torch.utils.data.IterableDataset):
"""
Wrapper for PyTorchVideo datasets.
"""
def __init__(self, num_videos, clips_per_video, crops_per_clip, dataset):
"""
Construct the dataset.
Args:
num_vidoes (int): number of videos in the dataset.
clips_per_video (int): number of clips per video in the dataset.
dataset (torch.utils.data.IterableDataset): a PyTorchVideo dataset.
"""
self._clips_per_video = clips_per_video
self._crops_per_clip = crops_per_clip
self._num_videos = num_videos
self.dataset = dataset
def __next__(self):
"""
Retrieves the next clip from the dataset.
"""
return self.dataset.__next__()
def sampler(self):
"""
Returns:
(torch.utils.data.Sampler): video sampler for the dataset.
"""
return self.dataset.video_sampler
def __len__(self):
"""
Returns:
(int): the number of clips per replica in the IterableDataset.
"""
return len(self.sampler) * self._clips_per_video * self._crops_per_clip
def num_videos(self):
"""
Returns:
(int): the number of clips in total in the dataset.
"""
return self._num_videos * self._clips_per_video * self._crops_per_clip
def __iter__(self):
return self
class PackPathway(torch.nn.Module):
"""
Transform for converting video frames as a list of tensors. Each tensor
corresponding to a unique pathway.
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x: torch.Tensor):
return utils.pack_pathway_output(self.cfg, x)
class DictToTuple(torch.nn.Module):
"""
Transform for converting output from dict to a tuple following PySlowFast
dataset output format.
"""
def __init__(self, num_clips, num_crops):
super().__init__()
self._num_clips = num_clips
self._num_crops = num_crops
def forward(self, x: Dict[str, torch.Tensor]):
index = (
x["video_index"] * self._num_clips * self._num_crops
+ x["clip_index"] * self._num_crops
+ x["aug_index"]
)
return x["video"], x["label"], index, {}
def div255(x):
"""
Scale clip frames from [0, 255] to [0, 1].
Args:
x (Tensor): A tensor of the clip's RGB frames with shape:
(channel, time, height, width).
Returns:
x (Tensor): Scaled tensor by divide 255.
"""
return x / 255.0
def rgb2bgr(x):
"""
Convert clip frames from RGB mode to BRG mode.
Args:
x (Tensor): A tensor of the clip's RGB frames with shape:
(channel, time, height, width).
Returns:
x (Tensor): Converted tensor
"""
return x[[2, 1, 0], ...]
The provided code snippet includes necessary dependencies for implementing the `Ptvssv2` function. Write a Python function `def Ptvssv2(cfg, mode)` to solve the following problem:
Construct PyTorchVideo Something-Something v2 SSv2 video loader. Load SSv2 data (frame paths, labels, etc. ) to SSv2 Dataset object. The dataset could be downloaded from Chrades official website (https://20bn.com/datasets/something-something). Please see datasets/DATASET.md for more information about the data format. For training and validation, a single clip is randomly sampled from every video with random cropping and scaling. For testing, multiple clips are uniformaly sampled from every video with uniform cropping. For uniform cropping, we take the left, center, and right crop if the width is larger than height, or take top, center, and bottom crop if the height is larger than the width. Args: cfg (CfgNode): configs. mode (string): Options includes `train`, `val`, or `test` mode.
Here is the function:
def Ptvssv2(cfg, mode):
"""
Construct PyTorchVideo Something-Something v2 SSv2 video loader.
Load SSv2 data (frame paths, labels, etc. ) to SSv2 Dataset object.
The dataset could be downloaded from Chrades official website
(https://20bn.com/datasets/something-something).
Please see datasets/DATASET.md for more information about the data format.
For training and validation, a single clip is randomly sampled from every
video with random cropping and scaling. For testing, multiple clips are
uniformaly sampled from every video with uniform cropping. For uniform cropping,
we take the left, center, and right crop if the width is larger than height,
or take top, center, and bottom crop if the height is larger than the width.
Args:
cfg (CfgNode): configs.
mode (string): Options includes `train`, `val`, or `test` mode.
"""
# Only support train, val, and test mode.
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported".format(mode)
logger.info("Constructing Ptvcharades {}...".format(mode))
if mode in ["train", "val"]:
num_clips = 1
num_crops = 1
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
RandomShortSideScale(
min_size=cfg.DATA.TRAIN_JITTER_SCALES[0],
max_size=cfg.DATA.TRAIN_JITTER_SCALES[1],
),
RandomCropVideo(cfg.DATA.TRAIN_CROP_SIZE),
Lambda(rgb2bgr),
]
+ (
[RandomHorizontalFlipVideo(p=0.5)]
if cfg.DATA.RANDOM_FLIP
else []
)
+ [PackPathway(cfg)]
),
),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler(
"constant_clips_per_video",
1, # Put arbitrary duration as ssv2 always needs full video clip.
num_clips,
num_crops,
)
if cfg.NUM_GPUS > 1:
video_sampler = DistributedSampler
else:
video_sampler = (
RandomSampler if mode == "train" else SequentialSampler
)
else:
assert cfg.TEST.NUM_ENSEMBLE_VIEWS == 1
num_clips = cfg.TEST.NUM_ENSEMBLE_VIEWS
num_crops = cfg.TEST.NUM_SPATIAL_CROPS
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
Lambda(div255),
NormalizeVideo(cfg.DATA.MEAN, cfg.DATA.STD),
ShortSideScale(size=cfg.DATA.TEST_CROP_SIZE),
]
),
),
UniformCropVideo(size=cfg.DATA.TEST_CROP_SIZE),
ApplyTransformToKey(
key="video",
transform=Compose(
[Lambda(rgb2bgr), PackPathway(cfg)],
),
),
DictToTuple(num_clips, num_crops),
]
)
clip_sampler = make_clip_sampler(
"constant_clips_per_video",
1, # Put arbitrary duration as ssv2 always needs full video clip.
num_clips,
num_crops,
)
video_sampler = (
DistributedSampler if cfg.NUM_GPUS > 1 else SequentialSampler
)
label_name_file = os.path.join(
cfg.DATA.PATH_TO_DATA_DIR, "something-something-v2-labels.json"
)
video_label_file = os.path.join(
cfg.DATA.PATH_TO_DATA_DIR,
"something-something-v2-{}.json".format(
"train" if mode == "train" else "validation"
),
)
data_path = os.path.join(
cfg.DATA.PATH_TO_DATA_DIR,
"{}.csv".format("train" if mode == "train" else "val"),
)
dataset = SSv2(
label_name_file=label_name_file,
video_label_file=video_label_file,
video_path_label_file=data_path,
clip_sampler=clip_sampler,
video_sampler=video_sampler,
transform=transform,
video_path_prefix=cfg.DATA.PATH_PREFIX,
frames_per_clip=cfg.DATA.NUM_FRAMES,
rand_sample_frames=mode == "train",
)
logger.info(
"Constructing ssv2 dataloader (size: {}) from {}".format(
len(dataset._path_to_videos), data_path
)
)
return PTVDatasetWrapper(
num_videos=len(dataset._path_to_videos),
clips_per_video=num_clips,
crops_per_clip=num_crops,
dataset=dataset,
) | Construct PyTorchVideo Something-Something v2 SSv2 video loader. Load SSv2 data (frame paths, labels, etc. ) to SSv2 Dataset object. The dataset could be downloaded from Chrades official website (https://20bn.com/datasets/something-something). Please see datasets/DATASET.md for more information about the data format. For training and validation, a single clip is randomly sampled from every video with random cropping and scaling. For testing, multiple clips are uniformaly sampled from every video with uniform cropping. For uniform cropping, we take the left, center, and right crop if the width is larger than height, or take top, center, and bottom crop if the height is larger than the width. Args: cfg (CfgNode): configs. mode (string): Options includes `train`, `val`, or `test` mode. |
22,391 | import torch
import torch.nn as nn
class ResNetBasicStem(nn.Module):
"""
ResNe(X)t 3D stem module.
Performs spatiotemporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input, and 2 or 3 is used for optical flow input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(ResNetBasicStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
# Construct the stem layer.
self._construct_stem(dim_in, dim_out, norm_module)
def _construct_stem(self, dim_in, dim_out, norm_module):
self.conv = nn.Conv3d(
dim_in,
dim_out,
self.kernel,
stride=self.stride,
padding=self.padding,
bias=False,
)
self.bn = norm_module(
num_features=dim_out, eps=self.eps, momentum=self.bn_mmt
)
self.relu = nn.ReLU(self.inplace_relu)
self.pool_layer = nn.MaxPool3d(
kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]
)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.pool_layer(x)
return x
class X3DStem(nn.Module):
"""
X3D's 3D stem module.
Performs a spatial followed by a depthwise temporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input, and 2 or 3 is used for optical flow input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(X3DStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
# Construct the stem layer.
self._construct_stem(dim_in, dim_out, norm_module)
def _construct_stem(self, dim_in, dim_out, norm_module):
self.conv_xy = nn.Conv3d(
dim_in,
dim_out,
kernel_size=(1, self.kernel[1], self.kernel[2]),
stride=(1, self.stride[1], self.stride[2]),
padding=(0, self.padding[1], self.padding[2]),
bias=False,
)
self.conv = nn.Conv3d(
dim_out,
dim_out,
kernel_size=(self.kernel[0], 1, 1),
stride=(self.stride[0], 1, 1),
padding=(self.padding[0], 0, 0),
bias=False,
groups=dim_out,
)
self.bn = norm_module(
num_features=dim_out, eps=self.eps, momentum=self.bn_mmt
)
self.relu = nn.ReLU(self.inplace_relu)
def forward(self, x):
x = self.conv_xy(x)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
The provided code snippet includes necessary dependencies for implementing the `get_stem_func` function. Write a Python function `def get_stem_func(name)` to solve the following problem:
Retrieves the stem module by name.
Here is the function:
def get_stem_func(name):
"""
Retrieves the stem module by name.
"""
trans_funcs = {"x3d_stem": X3DStem, "basic_stem": ResNetBasicStem}
assert (
name in trans_funcs.keys()
), "Transformation function '{}' not supported".format(name)
return trans_funcs[name] | Retrieves the stem module by name. |
22,392 | import numpy as np
import torch
import slowfast.utils.logging as logging
logger = logging.get_logger(__name__)
def round_width(width, multiplier, min_width=1, divisor=1, verbose=False):
if not multiplier:
return width
width *= multiplier
min_width = min_width or divisor
if verbose:
logger.info(f"min width {min_width}")
logger.info(f"width {width} divisor {divisor}")
logger.info(f"other {int(width + divisor / 2) // divisor * divisor}")
width_out = max(min_width, int(width + divisor / 2) // divisor * divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out) | null |
22,393 | import numpy as np
import torch
import slowfast.utils.logging as logging
The provided code snippet includes necessary dependencies for implementing the `validate_checkpoint_wrapper_import` function. Write a Python function `def validate_checkpoint_wrapper_import(checkpoint_wrapper)` to solve the following problem:
Check if checkpoint_wrapper is imported.
Here is the function:
def validate_checkpoint_wrapper_import(checkpoint_wrapper):
"""
Check if checkpoint_wrapper is imported.
"""
if checkpoint_wrapper is None:
raise ImportError("Please install fairscale.") | Check if checkpoint_wrapper is imported. |
22,394 | import numpy as np
import torch
import slowfast.utils.logging as logging
The provided code snippet includes necessary dependencies for implementing the `get_gkern` function. Write a Python function `def get_gkern(kernlen, std)` to solve the following problem:
Returns a 2D Gaussian kernel array.
Here is the function:
def get_gkern(kernlen, std):
"""Returns a 2D Gaussian kernel array."""
def _gaussian_fn(kernlen, std):
n = torch.arange(0, kernlen).float()
n -= n.mean()
n /= std
w = torch.exp(-0.5 * n**2)
return w
gkern1d = _gaussian_fn(kernlen, std)
gkern2d = torch.outer(gkern1d, gkern1d)
return gkern2d / gkern2d.sum() | Returns a 2D Gaussian kernel array. |
22,395 | import numpy as np
import torch
import slowfast.utils.logging as logging
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(
embed_dim // 2, grid[0]
) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(
embed_dim // 2, grid[1]
) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=float)
omega /= embed_dim / 2.0
omega = 1.0 / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
The provided code snippet includes necessary dependencies for implementing the `get_3d_sincos_pos_embed` function. Write a Python function `def get_3d_sincos_pos_embed(embed_dim, grid_size, t_size, cls_token=False)` to solve the following problem:
grid_size: int of the grid height and width t_size: int of the temporal size return: pos_embed: [t_size*grid_size*grid_size, embed_dim] or [1+t_size*grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
Here is the function:
def get_3d_sincos_pos_embed(embed_dim, grid_size, t_size, cls_token=False):
"""
grid_size: int of the grid height and width
t_size: int of the temporal size
return:
pos_embed: [t_size*grid_size*grid_size, embed_dim] or [1+t_size*grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
assert embed_dim % 4 == 0
embed_dim_spatial = embed_dim // 4 * 3
embed_dim_temporal = embed_dim // 4
# spatial
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed_spatial = get_2d_sincos_pos_embed_from_grid(
embed_dim_spatial, grid
)
# temporal
grid_t = np.arange(t_size, dtype=np.float32)
pos_embed_temporal = get_1d_sincos_pos_embed_from_grid(
embed_dim_temporal, grid_t
)
# concate: [T, H, W] order
pos_embed_temporal = pos_embed_temporal[:, np.newaxis, :]
pos_embed_temporal = np.repeat(
pos_embed_temporal, grid_size**2, axis=1
) # [T, H*W, D // 4]
pos_embed_spatial = pos_embed_spatial[np.newaxis, :, :]
pos_embed_spatial = np.repeat(
pos_embed_spatial, t_size, axis=0
) # [T, H*W, D // 4 * 3]
pos_embed = np.concatenate([pos_embed_temporal, pos_embed_spatial], axis=-1)
pos_embed = pos_embed.reshape([-1, embed_dim]) # [T*H*W, D]
if cls_token:
pos_embed = np.concatenate(
[np.zeros([1, embed_dim]), pos_embed], axis=0
)
return pos_embed | grid_size: int of the grid height and width t_size: int of the temporal size return: pos_embed: [t_size*grid_size*grid_size, embed_dim] or [1+t_size*grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
22,396 | import numpy as np
import torch
import slowfast.utils.logging as logging
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(
embed_dim // 2, grid[0]
) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(
embed_dim // 2, grid[1]
) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
The provided code snippet includes necessary dependencies for implementing the `get_2d_sincos_pos_embed` function. Write a Python function `def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False)` to solve the following problem:
grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
Here is the function:
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate(
[np.zeros([1, embed_dim]), pos_embed], axis=0
)
return pos_embed | grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) |
22,397 | import numpy as np
import torch
import slowfast.utils.logging as logging
def interpolate_pos_embed(model, checkpoint_model):
if "pos_embed" in checkpoint_model:
pos_embed_checkpoint = checkpoint_model["pos_embed"]
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int(
(pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5
)
# height (== width) for the new position embedding
new_size = int(num_patches**0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print(
"Position interpolate from %dx%d to %dx%d"
% (orig_size, orig_size, new_size, new_size)
)
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(
-1, orig_size, orig_size, embedding_size
).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_size, new_size),
mode="bicubic",
align_corners=False,
)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model["pos_embed"] = new_pos_embed | null |
22,398 | import numpy as np
import torch
import slowfast.utils.logging as logging
def calc_mvit_feature_geometry(cfg):
feat_size = [
[
cfg.DATA.NUM_FRAMES // cfg.MVIT.PATCH_STRIDE[0]
if len(cfg.MVIT.PATCH_STRIDE) > 2
else 1,
cfg.DATA.TRAIN_CROP_SIZE // cfg.MVIT.PATCH_STRIDE[-2],
cfg.DATA.TRAIN_CROP_SIZE // cfg.MVIT.PATCH_STRIDE[-1],
]
for i in range(cfg.MVIT.DEPTH)
]
feat_stride = [
[
cfg.MVIT.PATCH_STRIDE[0] if len(cfg.MVIT.PATCH_STRIDE) > 2 else 1,
cfg.MVIT.PATCH_STRIDE[-2],
cfg.MVIT.PATCH_STRIDE[-1],
]
for i in range(cfg.MVIT.DEPTH)
]
for _, x in enumerate(cfg.MVIT.POOL_Q_STRIDE):
for i in range(cfg.MVIT.DEPTH):
if i >= x[0]:
for j in range(len(feat_size[i])):
feat_size[i][j] = feat_size[i][j] // x[j + 1]
feat_stride[i][j] = feat_stride[i][j] * x[j + 1]
return feat_size, feat_stride | null |
22,399 | import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import trunc_normal_
from slowfast.models.common import DropPath, Mlp
def attention_pool(tensor, pool, thw_shape, has_cls_embed=True, norm=None):
if pool is None:
return tensor, thw_shape
tensor_dim = tensor.ndim
if tensor_dim == 4:
pass
elif tensor_dim == 3:
tensor = tensor.unsqueeze(1)
else:
raise NotImplementedError(f"Unsupported input dimension {tensor.shape}")
if has_cls_embed:
cls_tok, tensor = tensor[:, :, :1, :], tensor[:, :, 1:, :]
B, N, L, C = tensor.shape
T, H, W = thw_shape
tensor = (
tensor.reshape(B * N, T, H, W, C).permute(0, 4, 1, 2, 3).contiguous()
)
tensor = pool(tensor)
thw_shape = [tensor.shape[2], tensor.shape[3], tensor.shape[4]]
L_pooled = tensor.shape[2] * tensor.shape[3] * tensor.shape[4]
tensor = tensor.reshape(B, N, C, L_pooled).transpose(2, 3)
if has_cls_embed:
tensor = torch.cat((cls_tok, tensor), dim=2)
if norm is not None:
tensor = norm(tensor)
# Assert tensor_dim in [3, 4]
if tensor_dim == 4:
pass
else: # tensor_dim == 3:
tensor = tensor.squeeze(1)
return tensor, thw_shape | null |
22,400 | import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import trunc_normal_
from slowfast.models.common import DropPath, Mlp
def get_rel_pos(rel_pos, d):
if isinstance(d, int):
ori_d = rel_pos.shape[0]
if ori_d == d:
return rel_pos
else:
# Interpolate rel pos.
new_pos_embed = F.interpolate(
rel_pos.reshape(1, ori_d, -1).permute(0, 2, 1),
size=d,
mode="linear",
)
return new_pos_embed.reshape(-1, d).permute(1, 0)
The provided code snippet includes necessary dependencies for implementing the `cal_rel_pos_spatial` function. Write a Python function `def cal_rel_pos_spatial( attn, q, k, has_cls_embed, q_shape, k_shape, rel_pos_h, rel_pos_w )` to solve the following problem:
Decomposed Spatial Relative Positional Embeddings.
Here is the function:
def cal_rel_pos_spatial(
attn, q, k, has_cls_embed, q_shape, k_shape, rel_pos_h, rel_pos_w
):
"""
Decomposed Spatial Relative Positional Embeddings.
"""
sp_idx = 1 if has_cls_embed else 0
q_t, q_h, q_w = q_shape
k_t, k_h, k_w = k_shape
dh = int(2 * max(q_h, k_h) - 1)
dw = int(2 * max(q_w, k_w) - 1)
# Scale up rel pos if shapes for q and k are different.
q_h_ratio = max(k_h / q_h, 1.0)
k_h_ratio = max(q_h / k_h, 1.0)
dist_h = (
torch.arange(q_h)[:, None] * q_h_ratio
- torch.arange(k_h)[None, :] * k_h_ratio
)
dist_h += (k_h - 1) * k_h_ratio
q_w_ratio = max(k_w / q_w, 1.0)
k_w_ratio = max(q_w / k_w, 1.0)
dist_w = (
torch.arange(q_w)[:, None] * q_w_ratio
- torch.arange(k_w)[None, :] * k_w_ratio
)
dist_w += (k_w - 1) * k_w_ratio
# Intepolate rel pos if needed.
rel_pos_h = get_rel_pos(rel_pos_h, dh)
rel_pos_w = get_rel_pos(rel_pos_w, dw)
Rh = rel_pos_h[dist_h.long()]
Rw = rel_pos_w[dist_w.long()]
B, n_head, q_N, dim = q.shape
r_q = q[:, :, sp_idx:].reshape(B, n_head, q_t, q_h, q_w, dim)
rel_h_q = torch.einsum(
"bythwc,hkc->bythwk", r_q, Rh
) # [B, H, q_t, qh, qw, k_h]
rel_w_q = torch.einsum(
"bythwc,wkc->bythwk", r_q, Rw
) # [B, H, q_t, qh, qw, k_w]
attn[:, :, sp_idx:, sp_idx:] = (
attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_t, q_h, q_w, k_t, k_h, k_w)
+ rel_h_q[:, :, :, :, :, None, :, None]
+ rel_w_q[:, :, :, :, :, None, None, :]
).view(B, -1, q_t * q_h * q_w, k_t * k_h * k_w)
return attn | Decomposed Spatial Relative Positional Embeddings. |
22,401 | import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import trunc_normal_
from slowfast.models.common import DropPath, Mlp
def get_rel_pos(rel_pos, d):
if isinstance(d, int):
ori_d = rel_pos.shape[0]
if ori_d == d:
return rel_pos
else:
# Interpolate rel pos.
new_pos_embed = F.interpolate(
rel_pos.reshape(1, ori_d, -1).permute(0, 2, 1),
size=d,
mode="linear",
)
return new_pos_embed.reshape(-1, d).permute(1, 0)
The provided code snippet includes necessary dependencies for implementing the `cal_rel_pos_temporal` function. Write a Python function `def cal_rel_pos_temporal(attn, q, has_cls_embed, q_shape, k_shape, rel_pos_t)` to solve the following problem:
Temporal Relative Positional Embeddings.
Here is the function:
def cal_rel_pos_temporal(attn, q, has_cls_embed, q_shape, k_shape, rel_pos_t):
"""
Temporal Relative Positional Embeddings.
"""
sp_idx = 1 if has_cls_embed else 0
q_t, q_h, q_w = q_shape
k_t, k_h, k_w = k_shape
dt = int(2 * max(q_t, k_t) - 1)
# Intepolate rel pos if needed.
rel_pos_t = get_rel_pos(rel_pos_t, dt)
# Scale up rel pos if shapes for q and k are different.
q_t_ratio = max(k_t / q_t, 1.0)
k_t_ratio = max(q_t / k_t, 1.0)
dist_t = (
torch.arange(q_t)[:, None] * q_t_ratio
- torch.arange(k_t)[None, :] * k_t_ratio
)
dist_t += (k_t - 1) * k_t_ratio
Rt = rel_pos_t[dist_t.long()]
B, n_head, q_N, dim = q.shape
r_q = q[:, :, sp_idx:].reshape(B, n_head, q_t, q_h, q_w, dim)
# [B, H, q_t, q_h, q_w, dim] -> [q_t, B, H, q_h, q_w, dim] -> [q_t, B*H*q_h*q_w, dim]
r_q = r_q.permute(2, 0, 1, 3, 4, 5).reshape(
q_t, B * n_head * q_h * q_w, dim
)
# [q_t, B*H*q_h*q_w, dim] * [q_t, dim, k_t] = [q_t, B*H*q_h*q_w, k_t] -> [B*H*q_h*q_w, q_t, k_t]
rel = torch.matmul(r_q, Rt.transpose(1, 2)).transpose(0, 1)
# [B*H*q_h*q_w, q_t, k_t] -> [B, H, q_t, q_h, q_w, k_t]
rel = rel.view(B, n_head, q_h, q_w, q_t, k_t).permute(0, 1, 4, 2, 3, 5)
attn[:, :, sp_idx:, sp_idx:] = (
attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_t, q_h, q_w, k_t, k_h, k_w)
+ rel[:, :, :, :, :, :, None, None]
).view(B, -1, q_t * q_h * q_w, k_t * k_h * k_w)
return attn | Temporal Relative Positional Embeddings. |
22,402 | from functools import partial
import torch.nn as nn
from detectron2.layers import ROIAlign
from slowfast.models.batchnorm_helper import get_norm
from slowfast.models.video_model_builder import _POOL1, _TEMPORAL_KERNEL_BASIS
from pytorchvideo.models.csn import create_csn
from pytorchvideo.models.head import (
create_res_basic_head,
create_res_roi_pooling_head,
)
from pytorchvideo.models.r2plus1d import (
create_2plus1d_bottleneck_block,
create_r2plus1d,
)
from pytorchvideo.models.resnet import create_bottleneck_block, create_resnet
from pytorchvideo.models.slowfast import create_slowfast
from pytorchvideo.models.vision_transformers import (
create_multiscale_vision_transformers,
)
from pytorchvideo.models.x3d import (
Swish,
create_x3d,
create_x3d_bottleneck_block,
)
from .build import MODEL_REGISTRY
The provided code snippet includes necessary dependencies for implementing the `get_head_act` function. Write a Python function `def get_head_act(act_func)` to solve the following problem:
Return the actual head activation function given the activation fucntion name. Args: act_func (string): activation function to use. 'softmax': applies softmax on the output. 'sigmoid': applies sigmoid on the output. Returns: nn.Module: the activation layer.
Here is the function:
def get_head_act(act_func):
"""
Return the actual head activation function given the activation fucntion name.
Args:
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
Returns:
nn.Module: the activation layer.
"""
if act_func == "softmax":
return nn.Softmax(dim=1)
elif act_func == "sigmoid":
return nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as a head activation "
"function.".format(act_func)
) | Return the actual head activation function given the activation fucntion name. Args: act_func (string): activation function to use. 'softmax': applies softmax on the output. 'sigmoid': applies sigmoid on the output. Returns: nn.Module: the activation layer. |
22,403 | from functools import partial
import torch
import torch.nn as nn
from pytorchvideo.layers.batch_norm import (
NaiveSyncBatchNorm1d,
NaiveSyncBatchNorm3d,
)
class SubBatchNorm3d(nn.Module):
"""
The standard BN layer computes stats across all examples in a GPU. In some
cases it is desirable to compute stats across only a subset of examples
(e.g., in multigrid training https://arxiv.org/abs/1912.00998).
SubBatchNorm3d splits the batch dimension into N splits, and run BN on
each of them separately (so that the stats are computed on each subset of
examples (1/N of batch) independently. During evaluation, it aggregates
the stats from all splits into one BN.
"""
def __init__(self, num_splits, **args):
"""
Args:
num_splits (int): number of splits.
args (list): other arguments.
"""
super(SubBatchNorm3d, self).__init__()
self.num_splits = num_splits
num_features = args["num_features"]
# Keep only one set of weight and bias.
if args.get("affine", True):
self.affine = True
args["affine"] = False
self.weight = torch.nn.Parameter(torch.ones(num_features))
self.bias = torch.nn.Parameter(torch.zeros(num_features))
else:
self.affine = False
self.bn = nn.BatchNorm3d(**args)
args["num_features"] = num_features * num_splits
self.split_bn = nn.BatchNorm3d(**args)
def _get_aggregated_mean_std(self, means, stds, n):
"""
Calculate the aggregated mean and stds.
Args:
means (tensor): mean values.
stds (tensor): standard deviations.
n (int): number of sets of means and stds.
"""
mean = means.view(n, -1).sum(0) / n
std = (
stds.view(n, -1).sum(0) / n
+ ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n
)
return mean.detach(), std.detach()
def aggregate_stats(self):
"""
Synchronize running_mean, and running_var. Call this before eval.
"""
if self.split_bn.track_running_stats:
(
self.bn.running_mean.data,
self.bn.running_var.data,
) = self._get_aggregated_mean_std(
self.split_bn.running_mean,
self.split_bn.running_var,
self.num_splits,
)
def forward(self, x):
if self.training:
n, c, t, h, w = x.shape
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view((-1, 1, 1, 1))
x = x + self.bias.view((-1, 1, 1, 1))
return x
The provided code snippet includes necessary dependencies for implementing the `get_norm` function. Write a Python function `def get_norm(cfg)` to solve the following problem:
Args: cfg (CfgNode): model building configs, details are in the comments of the config file. Returns: nn.Module: the normalization layer.
Here is the function:
def get_norm(cfg):
"""
Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.
"""
if cfg.BN.NORM_TYPE in {"batchnorm", "sync_batchnorm_apex"}:
return nn.BatchNorm3d
elif cfg.BN.NORM_TYPE == "sub_batchnorm":
return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)
elif cfg.BN.NORM_TYPE == "sync_batchnorm":
return partial(
NaiveSyncBatchNorm3d,
num_sync_devices=cfg.BN.NUM_SYNC_DEVICES,
global_sync=cfg.BN.GLOBAL_SYNC,
)
else:
raise NotImplementedError(
"Norm type {} is not supported".format(cfg.BN.NORM_TYPE)
) | Args: cfg (CfgNode): model building configs, details are in the comments of the config file. Returns: nn.Module: the normalization layer. |
22,404 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import slowfast.models.losses as losses
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
from slowfast.models.video_model_builder import X3D, MViT, ResNet, SlowFast
from .build import MODEL_REGISTRY
def l2_loss(x, y):
return 2 - 2 * (x * y).sum(dim=-1) | null |
22,405 | import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `drop_path` function. Write a Python function `def drop_path(x, drop_prob: float = 0.0, training: bool = False)` to solve the following problem:
Stochastic Depth per sample.
Here is the function:
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""
Stochastic Depth per sample.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
mask = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
mask.floor_() # binarize
output = x.div(keep_prob) * mask
return output | Stochastic Depth per sample. |
22,406 | import torch
import torch.nn as nn
from slowfast.models.common import drop_path
from slowfast.models.nonlocal_helper import Nonlocal
from slowfast.models.operators import SE, Swish
class BasicTransform(nn.Module):
"""
Basic transformation: Tx3x3, 1x3x3, where T is the size of temporal kernel.
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner=None,
num_groups=1,
stride_1x1=None,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
dilation=1,
norm_module=nn.BatchNorm3d,
block_idx=0,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the first
convolution in the basic block.
stride (int): the stride of the bottleneck.
dim_inner (None): the inner dimension would not be used in
BasicTransform.
num_groups (int): number of groups for the convolution. Number of
group is always 1 for BasicTransform.
stride_1x1 (None): stride_1x1 will not be used in BasicTransform.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(BasicTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._construct(dim_in, dim_out, stride, dilation, norm_module)
def _construct(self, dim_in, dim_out, stride, dilation, norm_module):
# Tx3x3, BN, ReLU.
self.a = nn.Conv3d(
dim_in,
dim_out,
kernel_size=[self.temp_kernel_size, 3, 3],
stride=[1, stride, stride],
padding=[int(self.temp_kernel_size // 2), 1, 1],
bias=False,
)
self.a_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x3x3, BN.
self.b = nn.Conv3d(
dim_out,
dim_out,
kernel_size=[1, 3, 3],
stride=[1, 1, 1],
padding=[0, dilation, dilation],
dilation=[1, dilation, dilation],
bias=False,
)
self.b.final_conv = True
self.b_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.b_bn.transform_final_bn = True
def forward(self, x):
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
x = self.b(x)
x = self.b_bn(x)
return x
class X3DTransform(nn.Module):
"""
X3D transformation: 1x1x1, Tx3x3 (channelwise, num_groups=dim_in), 1x1x1,
augmented with (optional) SE (squeeze-excitation) on the 3x3x3 output.
T is the temporal kernel size (defaulting to 3)
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner,
num_groups,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
dilation=1,
norm_module=nn.BatchNorm3d,
se_ratio=0.0625,
swish_inner=True,
block_idx=0,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
se_ratio (float): if > 0, apply SE to the Tx3x3 conv, with the SE
channel dimensionality being se_ratio times the Tx3x3 conv dim.
swish_inner (bool): if True, apply swish to the Tx3x3 conv, otherwise
apply ReLU to the Tx3x3 conv.
"""
super(X3DTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._se_ratio = se_ratio
self._swish_inner = swish_inner
self._stride_1x1 = stride_1x1
self._block_idx = block_idx
self._construct(
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
)
def _construct(
self,
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
):
(str1x1, str3x3) = (stride, 1) if self._stride_1x1 else (1, stride)
# 1x1x1, BN, ReLU.
self.a = nn.Conv3d(
dim_in,
dim_inner,
kernel_size=[1, 1, 1],
stride=[1, str1x1, str1x1],
padding=[0, 0, 0],
bias=False,
)
self.a_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
# Tx3x3, BN, ReLU.
self.b = nn.Conv3d(
dim_inner,
dim_inner,
[self.temp_kernel_size, 3, 3],
stride=[1, str3x3, str3x3],
padding=[int(self.temp_kernel_size // 2), dilation, dilation],
groups=num_groups,
bias=False,
dilation=[1, dilation, dilation],
)
self.b_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
# Apply SE attention or not
use_se = True if (self._block_idx + 1) % 2 else False
if self._se_ratio > 0.0 and use_se:
self.se = SE(dim_inner, self._se_ratio)
if self._swish_inner:
self.b_relu = Swish()
else:
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x1x1, BN.
self.c = nn.Conv3d(
dim_inner,
dim_out,
kernel_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
bias=False,
)
self.c_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.c_bn.transform_final_bn = True
def forward(self, x):
for block in self.children():
x = block(x)
return x
class BottleneckTransform(nn.Module):
"""
Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of
temporal kernel.
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner,
num_groups,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
dilation=1,
norm_module=nn.BatchNorm3d,
block_idx=0,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the first
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(BottleneckTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._stride_1x1 = stride_1x1
self._construct(
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
)
def _construct(
self,
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
):
(str1x1, str3x3) = (stride, 1) if self._stride_1x1 else (1, stride)
# Tx1x1, BN, ReLU.
self.a = nn.Conv3d(
dim_in,
dim_inner,
kernel_size=[self.temp_kernel_size, 1, 1],
stride=[1, str1x1, str1x1],
padding=[int(self.temp_kernel_size // 2), 0, 0],
bias=False,
)
self.a_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x3x3, BN, ReLU.
self.b = nn.Conv3d(
dim_inner,
dim_inner,
[1, 3, 3],
stride=[1, str3x3, str3x3],
padding=[0, dilation, dilation],
groups=num_groups,
bias=False,
dilation=[1, dilation, dilation],
)
self.b_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x1x1, BN.
self.c = nn.Conv3d(
dim_inner,
dim_out,
kernel_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
bias=False,
)
self.c.final_conv = True
self.c_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.c_bn.transform_final_bn = True
def forward(self, x):
# Explicitly forward every layer.
# Branch2a.
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
# Branch2b.
x = self.b(x)
x = self.b_bn(x)
x = self.b_relu(x)
# Branch2c
x = self.c(x)
x = self.c_bn(x)
return x
The provided code snippet includes necessary dependencies for implementing the `get_trans_func` function. Write a Python function `def get_trans_func(name)` to solve the following problem:
Retrieves the transformation module by name.
Here is the function:
def get_trans_func(name):
"""
Retrieves the transformation module by name.
"""
trans_funcs = {
"bottleneck_transform": BottleneckTransform,
"basic_transform": BasicTransform,
"x3d_transform": X3DTransform,
}
assert (
name in trans_funcs.keys()
), "Transformation function '{}' not supported".format(name)
return trans_funcs[name] | Retrieves the transformation module by name. |
22,407 | import torch
from fvcore.common.registry import Registry
from torch.distributed.algorithms.ddp_comm_hooks import (
default as comm_hooks_default,
)
import slowfast.utils.logging as logging
logger = logging.get_logger(__name__)
MODEL_REGISTRY = Registry("MODEL")
MODEL_REGISTRY.__doc__ = """
Registry for video model.
The registered object will be called with `obj(cfg)`.
The call should return a `torch.nn.Module` object.
"""
The provided code snippet includes necessary dependencies for implementing the `build_model` function. Write a Python function `def build_model(cfg, gpu_id=None)` to solve the following problem:
Builds the video model. Args: cfg (configs): configs that contains the hyper-parameters to build the backbone. Details can be seen in slowfast/config/defaults.py. gpu_id (Optional[int]): specify the gpu index to build model.
Here is the function:
def build_model(cfg, gpu_id=None):
"""
Builds the video model.
Args:
cfg (configs): configs that contains the hyper-parameters to build the
backbone. Details can be seen in slowfast/config/defaults.py.
gpu_id (Optional[int]): specify the gpu index to build model.
"""
if torch.cuda.is_available():
assert (
cfg.NUM_GPUS <= torch.cuda.device_count()
), "Cannot use more GPU devices than available"
else:
assert (
cfg.NUM_GPUS == 0
), "Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs."
# Construct the model
name = cfg.MODEL.MODEL_NAME
model = MODEL_REGISTRY.get(name)(cfg)
if cfg.BN.NORM_TYPE == "sync_batchnorm_apex":
try:
import apex
except ImportError:
raise ImportError("APEX is required for this model, pelase install")
logger.info("Converting BN layers to Apex SyncBN")
process_group = apex.parallel.create_syncbn_process_group(
group_size=cfg.BN.NUM_SYNC_DEVICES
)
model = apex.parallel.convert_syncbn_model(
model, process_group=process_group
)
if cfg.NUM_GPUS:
if gpu_id is None:
# Determine the GPU used by the current process
cur_device = torch.cuda.current_device()
else:
cur_device = gpu_id
# Transfer the model to the current GPU device
model = model.cuda(device=cur_device)
# Use multi-process data parallel model in the multi-gpu setting
if cfg.NUM_GPUS > 1:
# Make model replica operate on the current device
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device,
find_unused_parameters=True
if cfg.MODEL.DETACH_FINAL_FC
or cfg.MODEL.MODEL_NAME == "ContrastiveModel"
else False,
)
if cfg.MODEL.FP16_ALLREDUCE:
model.register_comm_hook(
state=None, hook=comm_hooks_default.fp16_compress_hook
)
return model | Builds the video model. Args: cfg (configs): configs that contains the hyper-parameters to build the backbone. Details can be seen in slowfast/config/defaults.py. gpu_id (Optional[int]): specify the gpu index to build model. |
22,408 | import atexit
import builtins
import decimal
import functools
import logging
import os
import sys
import simplejson
import slowfast.utils.distributed as du
from slowfast.utils.env import pathmgr
def get_logger(name):
"""
Retrieve the logger with the specified name or, if name is None, return a
logger which is the root logger of the hierarchy.
Args:
name (string): name of the logger.
"""
return logging.getLogger(name)
pathmgr = PathManagerFactory.get(key="pyslowfast")
The provided code snippet includes necessary dependencies for implementing the `log_json_stats` function. Write a Python function `def log_json_stats(stats, output_dir=None)` to solve the following problem:
Logs json stats. Args: stats (dict): a dictionary of statistical information to log.
Here is the function:
def log_json_stats(stats, output_dir=None):
"""
Logs json stats.
Args:
stats (dict): a dictionary of statistical information to log.
"""
stats = {
k: decimal.Decimal("{:.5f}".format(v)) if isinstance(v, float) else v
for k, v in stats.items()
}
json_stats = simplejson.dumps(stats, sort_keys=True, use_decimal=True)
logger = get_logger(__name__)
logger.info("json_stats: {:s}".format(json_stats))
if du.is_master_proc(du.get_world_size()) and output_dir:
filename = os.path.join(output_dir, "json_stats.log")
try:
with pathmgr.open(
filename, "a", buffering=1024 if "://" in filename else -1
) as f:
f.write("json_stats: {:s}\n".format(json_stats))
except Exception:
logger.info(
"Failed to write to json_stats.log: {}".format(json_stats)
) | Logs json stats. Args: stats (dict): a dictionary of statistical information to log. |
22,409 | import argparse
import sys
import slowfast.utils.checkpoint as cu
from slowfast.config.defaults import get_cfg
The provided code snippet includes necessary dependencies for implementing the `parse_args` function. Write a Python function `def parse_args()` to solve the following problem:
Parse the following arguments for a default parser for PySlowFast users. Args: shard_id (int): shard id for the current machine. Starts from 0 to num_shards - 1. If single machine is used, then set shard id to 0. num_shards (int): number of shards using by the job. init_method (str): initialization method to launch the job with multiple devices. Options includes TCP or shared file-system for initialization. details can be find in https://pytorch.org/docs/stable/distributed.html#tcp-initialization cfg (str): path to the config file. opts (argument): provide addtional options from the command line, it overwrites the config loaded from file.
Here is the function:
def parse_args():
"""
Parse the following arguments for a default parser for PySlowFast users.
Args:
shard_id (int): shard id for the current machine. Starts from 0 to
num_shards - 1. If single machine is used, then set shard id to 0.
num_shards (int): number of shards using by the job.
init_method (str): initialization method to launch the job with multiple
devices. Options includes TCP or shared file-system for
initialization. details can be find in
https://pytorch.org/docs/stable/distributed.html#tcp-initialization
cfg (str): path to the config file.
opts (argument): provide addtional options from the command line, it
overwrites the config loaded from file.
"""
parser = argparse.ArgumentParser(
description="Provide SlowFast video training and testing pipeline."
)
parser.add_argument(
"--shard_id",
help="The shard id of current node, Starts from 0 to num_shards - 1",
default=0,
type=int,
)
parser.add_argument(
"--num_shards",
help="Number of shards using by the job",
default=1,
type=int,
)
parser.add_argument(
"--init_method",
help="Initialization method, includes TCP or shared file-system",
default="tcp://localhost:9999",
type=str,
)
parser.add_argument(
"--cfg",
dest="cfg_files",
help="Path to the config files",
default=["configs/Kinetics/SLOWFAST_4x16_R50.yaml"],
nargs="+",
)
parser.add_argument(
"--opts",
help="See slowfast/config/defaults.py for all options",
default=None,
nargs=argparse.REMAINDER,
)
if len(sys.argv) == 1:
parser.print_help()
return parser.parse_args() | Parse the following arguments for a default parser for PySlowFast users. Args: shard_id (int): shard id for the current machine. Starts from 0 to num_shards - 1. If single machine is used, then set shard id to 0. num_shards (int): number of shards using by the job. init_method (str): initialization method to launch the job with multiple devices. Options includes TCP or shared file-system for initialization. details can be find in https://pytorch.org/docs/stable/distributed.html#tcp-initialization cfg (str): path to the config file. opts (argument): provide addtional options from the command line, it overwrites the config loaded from file. |
22,410 | import argparse
import sys
import slowfast.utils.checkpoint as cu
from slowfast.config.defaults import get_cfg
def get_cfg():
"""
Get a copy of the default config.
"""
return _C.clone()
The provided code snippet includes necessary dependencies for implementing the `load_config` function. Write a Python function `def load_config(args, path_to_config=None)` to solve the following problem:
Given the arguemnts, load and initialize the configs. Args: args (argument): arguments includes `shard_id`, `num_shards`, `init_method`, `cfg_file`, and `opts`.
Here is the function:
def load_config(args, path_to_config=None):
"""
Given the arguemnts, load and initialize the configs.
Args:
args (argument): arguments includes `shard_id`, `num_shards`,
`init_method`, `cfg_file`, and `opts`.
"""
# Setup cfg.
cfg = get_cfg()
# Load config from cfg.
if path_to_config is not None:
cfg.merge_from_file(path_to_config)
# Load config from command line, overwrite config from opts.
if args.opts is not None:
cfg.merge_from_list(args.opts)
# Inherit parameters from args.
if hasattr(args, "num_shards") and hasattr(args, "shard_id"):
cfg.NUM_SHARDS = args.num_shards
cfg.SHARD_ID = args.shard_id
if hasattr(args, "rng_seed"):
cfg.RNG_SEED = args.rng_seed
if hasattr(args, "output_dir"):
cfg.OUTPUT_DIR = args.output_dir
# Create the checkpoint dir.
cu.make_checkpoint_dir(cfg.OUTPUT_DIR)
return cfg | Given the arguemnts, load and initialize the configs. Args: args (argument): arguments includes `shard_id`, `num_shards`, `init_method`, `cfg_file`, and `opts`. |
22,411 | import numpy as np
import slowfast.utils.logging as logging
logger = logging.get_logger(__name__)
The provided code snippet includes necessary dependencies for implementing the `print_schedule` function. Write a Python function `def print_schedule(schedule)` to solve the following problem:
Log schedule.
Here is the function:
def print_schedule(schedule):
"""
Log schedule.
"""
logger.info("Long cycle index\tBase shape\tEpochs")
for s in schedule:
logger.info("{}\t{}\t{}".format(s[0], s[1], s[2])) | Log schedule. |
22,412 | import numpy as np
import slowfast.utils.logging as logging
The provided code snippet includes necessary dependencies for implementing the `get_current_long_cycle_shape` function. Write a Python function `def get_current_long_cycle_shape(schedule, epoch)` to solve the following problem:
Given a schedule and epoch index, return the long cycle base shape. Args: schedule (configs): configs that contains training and multigrid specific hyperparameters. Details can be seen in slowfast/config/defaults.py. cur_epoch (int): current epoch index. Returns: shapes (list): A list describing the base shape in a long cycle: [batch size relative to default, number of frames, spatial dimension].
Here is the function:
def get_current_long_cycle_shape(schedule, epoch):
"""
Given a schedule and epoch index, return the long cycle base shape.
Args:
schedule (configs): configs that contains training and multigrid specific
hyperparameters. Details can be seen in
slowfast/config/defaults.py.
cur_epoch (int): current epoch index.
Returns:
shapes (list): A list describing the base shape in a long cycle:
[batch size relative to default,
number of frames, spatial dimension].
"""
for s in schedule:
if epoch < s[-1]:
return s[1]
return schedule[-1][1] | Given a schedule and epoch index, return the long cycle base shape. Args: schedule (configs): configs that contains training and multigrid specific hyperparameters. Details can be seen in slowfast/config/defaults.py. cur_epoch (int): current epoch index. Returns: shapes (list): A list describing the base shape in a long cycle: [batch size relative to default, number of frames, spatial dimension]. |
22,413 | from __future__ import absolute_import, division, print_function, unicode_literals
import csv
import logging
import pprint
import time
from collections import defaultdict
import numpy as np
import slowfast.utils.distributed as du
from ava_evaluation import object_detection_evaluation, standard_fields
from slowfast.utils.env import pathmgr
def read_csv(csv_file, class_whitelist=None, load_score=False):
"""Loads boxes and class labels from a CSV file in the AVA format.
CSV file format described at https://research.google.com/ava/download.html.
Args:
csv_file: A file object.
class_whitelist: If provided, boxes corresponding to (integer) class labels
not in this set are skipped.
Returns:
boxes: A dictionary mapping each unique image key (string) to a list of
boxes, given as coordinates [y1, x1, y2, x2].
labels: A dictionary mapping each unique image key (string) to a list of
integer class lables, matching the corresponding box in `boxes`.
scores: A dictionary mapping each unique image key (string) to a list of
score values lables, matching the corresponding label in `labels`. If
scores are not provided in the csv, then they will default to 1.0.
"""
boxes = defaultdict(list)
labels = defaultdict(list)
scores = defaultdict(list)
with pathmgr.open(csv_file, "r") as f:
reader = csv.reader(f)
for row in reader:
assert len(row) in [7, 8], "Wrong number of columns: " + row
image_key = make_image_key(row[0], row[1])
x1, y1, x2, y2 = [float(n) for n in row[2:6]]
action_id = int(row[6])
if class_whitelist and action_id not in class_whitelist:
continue
score = 1.0
if load_score:
score = float(row[7])
boxes[image_key].append([y1, x1, y2, x2])
labels[image_key].append(action_id)
scores[image_key].append(score)
return boxes, labels, scores
def read_exclusions(exclusions_file):
"""Reads a CSV file of excluded timestamps.
Args:
exclusions_file: A file object containing a csv of video-id,timestamp.
Returns:
A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904",
or an empty set if exclusions file is None.
"""
excluded = set()
if exclusions_file:
with pathmgr.open(exclusions_file, "r") as f:
reader = csv.reader(f)
for row in reader:
assert len(row) == 2, "Expected only 2 columns, got: " + row
excluded.add(make_image_key(row[0], row[1]))
return excluded
def read_labelmap(labelmap_file):
"""Read label map and class ids."""
labelmap = []
class_ids = set()
name = ""
class_id = ""
with pathmgr.open(labelmap_file, "r") as f:
for line in f:
if line.startswith(" name:"):
name = line.split('"')[1]
elif line.startswith(" id:") or line.startswith(" label_id:"):
class_id = int(line.strip().split(" ")[-1])
labelmap.append({"id": class_id, "name": name})
class_ids.add(class_id)
return labelmap, class_ids
def run_evaluation(
categories, groundtruth, detections, excluded_keys, verbose=True
):
"""AVA evaluation main logic."""
pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
categories
)
boxes, labels, _ = groundtruth
gt_keys = []
pred_keys = []
for image_key in boxes:
if image_key in excluded_keys:
logging.info(
(
"Found excluded timestamp in ground truth: %s. "
"It will be ignored."
),
image_key,
)
continue
pascal_evaluator.add_single_ground_truth_image_info(
image_key,
{
standard_fields.InputDataFields.groundtruth_boxes: np.array(
boxes[image_key], dtype=float
),
standard_fields.InputDataFields.groundtruth_classes: np.array(
labels[image_key], dtype=int
),
standard_fields.InputDataFields.groundtruth_difficult: np.zeros(
len(boxes[image_key]), dtype=bool
),
},
)
gt_keys.append(image_key)
boxes, labels, scores = detections
for image_key in boxes:
if image_key in excluded_keys:
logging.info(
(
"Found excluded timestamp in detections: %s. "
"It will be ignored."
),
image_key,
)
continue
pascal_evaluator.add_single_detected_image_info(
image_key,
{
standard_fields.DetectionResultFields.detection_boxes: np.array(
boxes[image_key], dtype=float
),
standard_fields.DetectionResultFields.detection_classes: np.array(
labels[image_key], dtype=int
),
standard_fields.DetectionResultFields.detection_scores: np.array(
scores[image_key], dtype=float
),
},
)
pred_keys.append(image_key)
metrics = pascal_evaluator.evaluate()
if du.is_master_proc():
pprint.pprint(metrics, indent=2)
return metrics
The provided code snippet includes necessary dependencies for implementing the `evaluate_ava_from_files` function. Write a Python function `def evaluate_ava_from_files(labelmap, groundtruth, detections, exclusions)` to solve the following problem:
Run AVA evaluation given annotation/prediction files.
Here is the function:
def evaluate_ava_from_files(labelmap, groundtruth, detections, exclusions):
"""Run AVA evaluation given annotation/prediction files."""
categories, class_whitelist = read_labelmap(labelmap)
excluded_keys = read_exclusions(exclusions)
groundtruth = read_csv(groundtruth, class_whitelist, load_score=False)
detections = read_csv(detections, class_whitelist, load_score=True)
run_evaluation(categories, groundtruth, detections, excluded_keys) | Run AVA evaluation given annotation/prediction files. |
22,414 | from __future__ import absolute_import, division, print_function, unicode_literals
import csv
import logging
import pprint
import time
from collections import defaultdict
import numpy as np
import slowfast.utils.distributed as du
from ava_evaluation import object_detection_evaluation, standard_fields
from slowfast.utils.env import pathmgr
logger = logging.getLogger(__name__)
def run_evaluation(
categories, groundtruth, detections, excluded_keys, verbose=True
):
"""AVA evaluation main logic."""
pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
categories
)
boxes, labels, _ = groundtruth
gt_keys = []
pred_keys = []
for image_key in boxes:
if image_key in excluded_keys:
logging.info(
(
"Found excluded timestamp in ground truth: %s. "
"It will be ignored."
),
image_key,
)
continue
pascal_evaluator.add_single_ground_truth_image_info(
image_key,
{
standard_fields.InputDataFields.groundtruth_boxes: np.array(
boxes[image_key], dtype=float
),
standard_fields.InputDataFields.groundtruth_classes: np.array(
labels[image_key], dtype=int
),
standard_fields.InputDataFields.groundtruth_difficult: np.zeros(
len(boxes[image_key]), dtype=bool
),
},
)
gt_keys.append(image_key)
boxes, labels, scores = detections
for image_key in boxes:
if image_key in excluded_keys:
logging.info(
(
"Found excluded timestamp in detections: %s. "
"It will be ignored."
),
image_key,
)
continue
pascal_evaluator.add_single_detected_image_info(
image_key,
{
standard_fields.DetectionResultFields.detection_boxes: np.array(
boxes[image_key], dtype=float
),
standard_fields.DetectionResultFields.detection_classes: np.array(
labels[image_key], dtype=int
),
standard_fields.DetectionResultFields.detection_scores: np.array(
scores[image_key], dtype=float
),
},
)
pred_keys.append(image_key)
metrics = pascal_evaluator.evaluate()
if du.is_master_proc():
pprint.pprint(metrics, indent=2)
return metrics
def get_ava_eval_data(
scores,
boxes,
metadata,
class_whitelist,
verbose=False,
video_idx_to_name=None,
):
"""
Convert our data format into the data format used in official AVA
evaluation.
"""
out_scores = defaultdict(list)
out_labels = defaultdict(list)
out_boxes = defaultdict(list)
count = 0
for i in range(scores.shape[0]):
video_idx = int(np.round(metadata[i][0]))
sec = int(np.round(metadata[i][1]))
video = video_idx_to_name[video_idx]
key = video + "," + "%04d" % (sec)
batch_box = boxes[i].tolist()
# The first is batch idx.
batch_box = [batch_box[j] for j in [0, 2, 1, 4, 3]]
one_scores = scores[i].tolist()
for cls_idx, score in enumerate(one_scores):
if cls_idx + 1 in class_whitelist:
out_scores[key].append(score)
out_labels[key].append(cls_idx + 1)
out_boxes[key].append(batch_box[1:])
count += 1
return out_boxes, out_labels, out_scores
def write_results(detections, filename):
"""Write prediction results into official formats."""
start = time.time()
boxes, labels, scores = detections
with pathmgr.open(filename, "w") as f:
for key in boxes.keys():
for box, label, score in zip(boxes[key], labels[key], scores[key]):
f.write(
"%s,%.03f,%.03f,%.03f,%.03f,%d,%.04f\n"
% (key, box[1], box[0], box[3], box[2], label, score)
)
logger.info("AVA results wrote to %s" % filename)
logger.info("\ttook %d seconds." % (time.time() - start))
The provided code snippet includes necessary dependencies for implementing the `evaluate_ava` function. Write a Python function `def evaluate_ava( preds, original_boxes, metadata, excluded_keys, class_whitelist, categories, groundtruth=None, video_idx_to_name=None, name="latest", )` to solve the following problem:
Run AVA evaluation given numpy arrays.
Here is the function:
def evaluate_ava(
preds,
original_boxes,
metadata,
excluded_keys,
class_whitelist,
categories,
groundtruth=None,
video_idx_to_name=None,
name="latest",
):
"""Run AVA evaluation given numpy arrays."""
eval_start = time.time()
detections = get_ava_eval_data(
preds,
original_boxes,
metadata,
class_whitelist,
video_idx_to_name=video_idx_to_name,
)
logger.info("Evaluating with %d unique GT frames." % len(groundtruth[0]))
logger.info(
"Evaluating with %d unique detection frames" % len(detections[0])
)
write_results(detections, "detections_%s.csv" % name)
write_results(groundtruth, "groundtruth_%s.csv" % name)
results = run_evaluation(categories, groundtruth, detections, excluded_keys)
logger.info("AVA eval done in %f seconds." % (time.time() - eval_start))
return results["PascalBoxes_Precision/mAP@0.5IOU"] | Run AVA evaluation given numpy arrays. |
22,415 | import copy
import math
import numpy as np
import os
import pickle
from collections import OrderedDict
import torch
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
from slowfast.utils.c2_model_loading import get_name_convert_func
from slowfast.utils.env import checkpoint_pathmgr as pathmgr
logger = logging.get_logger(__name__)
def get_last_checkpoint(path_to_job, task):
"""
Get the last checkpoint from the checkpointing folder.
Args:
path_to_job (string): the path to the folder of the current job.
"""
d = get_checkpoint_dir(path_to_job)
names = pathmgr.ls(d) if pathmgr.exists(d) else []
if task != "":
names = [f for f in names if "{}_checkpoint".format(task) in f]
else:
names = [f for f in names if f.startswith("checkpoint")]
if len(names) == 0:
return None
# Sort the checkpoints by epoch.
name = sorted(names)[-1]
return os.path.join(d, name)
def has_checkpoint(path_to_job):
"""
Determines if the given directory contains a checkpoint.
Args:
path_to_job (string): the path to the folder of the current job.
"""
d = get_checkpoint_dir(path_to_job)
files = pathmgr.ls(d) if pathmgr.exists(d) else []
return any("checkpoint" in f for f in files)
def load_checkpoint(
path_to_checkpoint,
model,
data_parallel=True,
optimizer=None,
scaler=None,
inflation=False,
convert_from_caffe2=False,
epoch_reset=False,
clear_name_pattern=(),
image_init=False,
):
"""
Load the checkpoint from the given file. If inflation is True, inflate the
2D Conv weights from the checkpoint to 3D Conv.
Args:
path_to_checkpoint (string): path to the checkpoint to load.
model (model): model to load the weights from the checkpoint.
data_parallel (bool): if true, model is wrapped by
torch.nn.parallel.DistributedDataParallel.
optimizer (optim): optimizer to load the historical state.
scaler (GradScaler): GradScaler to load the mixed precision scale.
inflation (bool): if True, inflate the weights from the checkpoint.
convert_from_caffe2 (bool): if True, load the model from caffe2 and
convert it to pytorch.
epoch_reset (bool): if True, reset #train iterations from the checkpoint.
clear_name_pattern (string): if given, this (sub)string will be cleared
from a layer name if it can be matched.
Returns:
(int): the number of training epoch of the checkpoint.
"""
logger.info("Loading network weights from {}.".format(path_to_checkpoint))
# Account for the DDP wrapper in the multi-gpu setting.
ms = model.module if data_parallel else model
if convert_from_caffe2:
with pathmgr.open(path_to_checkpoint, "rb") as f:
caffe2_checkpoint = pickle.load(f, encoding="latin1")
state_dict = OrderedDict()
name_convert_func = get_name_convert_func()
for key in caffe2_checkpoint["blobs"].keys():
converted_key = name_convert_func(key)
converted_key = c2_normal_to_sub_bn(converted_key, ms.state_dict())
if converted_key in ms.state_dict():
c2_blob_shape = caffe2_checkpoint["blobs"][key].shape
model_blob_shape = ms.state_dict()[converted_key].shape
# expand shape dims if they differ (eg for converting linear to conv params)
if len(c2_blob_shape) < len(model_blob_shape):
c2_blob_shape += (1,) * (
len(model_blob_shape) - len(c2_blob_shape)
)
caffe2_checkpoint["blobs"][key] = np.reshape(
caffe2_checkpoint["blobs"][key], c2_blob_shape
)
# Load BN stats to Sub-BN.
if (
len(model_blob_shape) == 1
and len(c2_blob_shape) == 1
and model_blob_shape[0] > c2_blob_shape[0]
and model_blob_shape[0] % c2_blob_shape[0] == 0
):
caffe2_checkpoint["blobs"][key] = np.concatenate(
[caffe2_checkpoint["blobs"][key]]
* (model_blob_shape[0] // c2_blob_shape[0])
)
c2_blob_shape = caffe2_checkpoint["blobs"][key].shape
if c2_blob_shape == tuple(model_blob_shape):
state_dict[converted_key] = torch.tensor(
caffe2_checkpoint["blobs"][key]
).clone()
logger.info(
"{}: {} => {}: {}".format(
key,
c2_blob_shape,
converted_key,
tuple(model_blob_shape),
)
)
else:
logger.warn(
"!! {}: {} does not match {}: {}".format(
key,
c2_blob_shape,
converted_key,
tuple(model_blob_shape),
)
)
else:
if not any(
prefix in key for prefix in ["momentum", "lr", "model_iter"]
):
logger.warn(
"!! {}: can not be converted, got {}".format(
key, converted_key
)
)
diff = set(ms.state_dict()) - set(state_dict)
diff = {d for d in diff if "num_batches_tracked" not in d}
if len(diff) > 0:
logger.warn("Not loaded {}".format(diff))
ms.load_state_dict(state_dict, strict=False)
epoch = -1
else:
# Load the checkpoint on CPU to avoid GPU mem spike.
with pathmgr.open(path_to_checkpoint, "rb") as f:
checkpoint = torch.load(f, map_location="cpu")
model_state_dict_3d = (
model.module.state_dict() if data_parallel else model.state_dict()
)
checkpoint["model_state"] = normal_to_sub_bn(
checkpoint["model_state"], model_state_dict_3d
)
if inflation:
# Try to inflate the model.
inflated_model_dict = inflate_weight(
checkpoint["model_state"], model_state_dict_3d
)
ms.load_state_dict(inflated_model_dict, strict=False)
else:
if clear_name_pattern:
for item in clear_name_pattern:
model_state_dict_new = OrderedDict()
for k in checkpoint["model_state"]:
if item in k:
k_re = k.replace(
item, "", 1
) # only repace first occurence of pattern
model_state_dict_new[k_re] = checkpoint[
"model_state"
][k]
logger.info("renaming: {} -> {}".format(k, k_re))
else:
model_state_dict_new[k] = checkpoint["model_state"][
k
]
checkpoint["model_state"] = model_state_dict_new
pre_train_dict = checkpoint["model_state"]
model_dict = ms.state_dict()
if image_init:
if (
"pos_embed" in pre_train_dict.keys()
and "pos_embed_xy" in model_dict.keys()
):
print(
pre_train_dict["pos_embed"].shape,
model_dict["pos_embed_xy"].shape,
model_dict["pos_embed_class"].shape,
)
if (
pre_train_dict["pos_embed"].shape[1]
== model_dict["pos_embed_xy"].shape[1] + 1
):
pre_train_dict["pos_embed_xy"] = pre_train_dict[
"pos_embed"
][:, 1:]
pre_train_dict["pos_embed_class"] = pre_train_dict[
"pos_embed"
][:, :1]
if (
"patch_embed.proj.weight" in pre_train_dict.keys()
and "patch_embed.proj.weight" in model_dict.keys()
):
print(
pre_train_dict["patch_embed.proj.weight"].shape,
model_dict["patch_embed.proj.weight"].shape,
)
if (
len(pre_train_dict["patch_embed.proj.weight"].shape)
== 4
and len(model_dict["patch_embed.proj.weight"].shape)
== 5
): # img->video
t = model_dict["patch_embed.proj.weight"].shape[2]
pre_train_dict[
"patch_embed.proj.weight"
] = pre_train_dict["patch_embed.proj.weight"][
:, :, None, :, :
].repeat(
1, 1, t, 1, 1
)
logger.info(
f"inflate patch_embed.proj.weight to {pre_train_dict['patch_embed.proj.weight'].shape}"
)
elif (
len(pre_train_dict["patch_embed.proj.weight"].shape)
== 5
and len(model_dict["patch_embed.proj.weight"].shape)
== 4
): # video->img
orig_shape = pre_train_dict[
"patch_embed.proj.weight"
].shape
# pre_train_dict["patch_embed.proj.weight"] = pre_train_dict["patch_embed.proj.weight"][:, :, orig_shape[2]//2, :, :] # take center
pre_train_dict[
"patch_embed.proj.weight"
] = pre_train_dict["patch_embed.proj.weight"].sum(
2
) # take avg
logger.info(
f"deflate patch_embed.proj.weight from {orig_shape} to {pre_train_dict['patch_embed.proj.weight'].shape}"
)
if (
"pos_embed_spatial" in pre_train_dict.keys()
and "pos_embed" in model_dict.keys()
):
pos_embds = pre_train_dict["pos_embed_spatial"]
if (
"pos_embed_class" in pre_train_dict.keys()
and pos_embds.shape
!= model_dict["pos_embed"].shape
):
pos_embds = torch.cat(
[
pre_train_dict["pos_embed_class"],
pos_embds,
],
1,
)
pre_train_dict.pop("pos_embed_class")
if pos_embds.shape == model_dict["pos_embed"].shape:
pre_train_dict["pos_embed"] = pos_embds
pre_train_dict.pop("pos_embed_spatial")
logger.info(
f"successful surgery of pos embed w/ shape {pos_embds.shape} "
)
else:
logger.info(
f"UNSUCCESSFUL surgery of pos embed w/ shape {pos_embds.shape} "
)
qkv = [
"attn.pool_k.weight",
"attn.pool_q.weight",
"attn.pool_v.weight",
]
for k in pre_train_dict.keys():
if (
any([x in k for x in qkv])
and pre_train_dict[k].shape != model_dict[k].shape
):
# print(pre_train_dict[k].shape, model_dict[k].shape)
logger.info(
f"inflate {k} from {pre_train_dict[k].shape} to {model_dict[k].shape}"
)
t = model_dict[k].shape[2]
pre_train_dict[k] = pre_train_dict[k].repeat(
1, 1, t, 1, 1
)
for k in pre_train_dict.keys():
if (
"rel_pos" in k
and pre_train_dict[k].shape != model_dict[k].shape
):
# print(pre_train_dict[k].shape, model_dict[k].shape)
logger.info(
f"interpolating {k} from {pre_train_dict[k].shape} to {model_dict[k].shape}"
)
new_pos_embed = torch.nn.functional.interpolate(
pre_train_dict[k]
.reshape(1, pre_train_dict[k].shape[0], -1)
.permute(0, 2, 1),
size=model_dict[k].shape[0],
mode="linear",
)
new_pos_embed = (
new_pos_embed.reshape(-1, model_dict[k].shape[0])
.permute(1, 0)
.squeeze()
)
pre_train_dict[k] = new_pos_embed
# Match pre-trained weights that have same shape as current model.
pre_train_dict_match = {}
not_used_layers = []
for k, v in pre_train_dict.items():
if k in model_dict:
if v.size() == model_dict[k].size():
pre_train_dict_match[k] = v
else:
if "attn.rel_pos" in k:
v_shape = v.shape
v = v.t().unsqueeze(0)
v = torch.nn.functional.interpolate(
v,
size=model_dict[k].size()[0],
mode="linear",
)
v = v[0].t()
pre_train_dict_match[k] = v
logger.info(
"{} reshaped from {} to {}".format(
k, v_shape, v.shape
)
)
elif "pos_embed_temporal" in k:
v_shape = v.shape
v = torch.nn.functional.interpolate(
v.permute(0, 2, 1),
size=model_dict[k].shape[1],
mode="linear",
)
pre_train_dict_match[k] = v.permute(0, 2, 1)
logger.info(
"{} reshaped from {} to {}".format(
k, v_shape, pre_train_dict_match[k].shape
)
)
elif "pos_embed_spatial" in k:
v_shape = v.shape
pretrain_size = int(math.sqrt(v_shape[1]))
model_size = int(math.sqrt(model_dict[k].shape[1]))
assert pretrain_size * pretrain_size == v_shape[1]
assert (
model_size * model_size
== model_dict[k].shape[1]
)
v = torch.nn.functional.interpolate(
v.reshape(
1, pretrain_size, pretrain_size, -1
).permute(0, 3, 1, 2),
size=(model_size, model_size),
mode="bicubic",
)
pre_train_dict_match[k] = v.reshape(
1, -1, model_size * model_size
).permute(0, 2, 1)
logger.info(
"{} reshaped from {} to {}".format(
k, v_shape, pre_train_dict_match[k].shape
)
)
else:
not_used_layers.append(k)
else:
not_used_layers.append(k)
# Weights that do not have match from the pre-trained model.
not_load_layers = [
k
for k in model_dict.keys()
if k not in pre_train_dict_match.keys()
]
# Log weights that are not loaded with the pre-trained weights.
if not_load_layers:
for k in not_load_layers:
logger.info("Network weights {} not loaded.".format(k))
if not_used_layers:
for k in not_used_layers:
logger.info("Network weights {} not used.".format(k))
# Load pre-trained weights.
missing_keys, unexpected_keys = ms.load_state_dict(
pre_train_dict_match, strict=False
)
print("missing keys: {}".format(missing_keys))
print("unexpected keys: {}".format(unexpected_keys))
epoch = -1
# Load the optimizer state (commonly not done when fine-tuning)
if "epoch" in checkpoint.keys() and not epoch_reset:
epoch = checkpoint["epoch"]
if optimizer:
optimizer.load_state_dict(checkpoint["optimizer_state"])
if scaler:
scaler.load_state_dict(checkpoint["scaler_state"])
else:
epoch = -1
return epoch
The provided code snippet includes necessary dependencies for implementing the `load_train_checkpoint` function. Write a Python function `def load_train_checkpoint(cfg, model, optimizer, scaler=None)` to solve the following problem:
Loading checkpoint logic for training.
Here is the function:
def load_train_checkpoint(cfg, model, optimizer, scaler=None):
"""
Loading checkpoint logic for training.
"""
if cfg.TRAIN.AUTO_RESUME and has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = get_last_checkpoint(cfg.OUTPUT_DIR, cfg.TASK)
logger.info("Load from last checkpoint, {}.".format(last_checkpoint))
checkpoint_epoch = load_checkpoint(
last_checkpoint,
model,
cfg.NUM_GPUS > 1,
optimizer,
scaler=scaler,
clear_name_pattern=cfg.TRAIN.CHECKPOINT_CLEAR_NAME_PATTERN,
)
start_epoch = checkpoint_epoch + 1
elif cfg.TRAIN.CHECKPOINT_FILE_PATH != "":
logger.info("Load from given checkpoint file.")
checkpoint_epoch = load_checkpoint(
cfg.TRAIN.CHECKPOINT_FILE_PATH,
model,
cfg.NUM_GPUS > 1,
optimizer,
scaler=scaler,
inflation=cfg.TRAIN.CHECKPOINT_INFLATE,
convert_from_caffe2=cfg.TRAIN.CHECKPOINT_TYPE == "caffe2",
epoch_reset=cfg.TRAIN.CHECKPOINT_EPOCH_RESET,
clear_name_pattern=cfg.TRAIN.CHECKPOINT_CLEAR_NAME_PATTERN,
image_init=cfg.TRAIN.CHECKPOINT_IN_INIT,
)
start_epoch = checkpoint_epoch + 1
else:
start_epoch = 0
return start_epoch | Loading checkpoint logic for training. |
22,416 | import itertools
import torch
The provided code snippet includes necessary dependencies for implementing the `compute_and_update_bn_stats` function. Write a Python function `def compute_and_update_bn_stats(model, data_loader, num_batches=200)` to solve the following problem:
Compute and update the batch norm stats to make it more precise. During training both bn stats and the weight are changing after every iteration, so the bn can not precisely reflect the latest stats of the current model. Here the bn stats is recomputed without change of weights, to make the running mean and running var more precise. Args: model (model): the model using to compute and update the bn stats. data_loader (dataloader): dataloader using to provide inputs. num_batches (int): running iterations using to compute the stats.
Here is the function:
def compute_and_update_bn_stats(model, data_loader, num_batches=200):
"""
Compute and update the batch norm stats to make it more precise. During
training both bn stats and the weight are changing after every iteration,
so the bn can not precisely reflect the latest stats of the current model.
Here the bn stats is recomputed without change of weights, to make the
running mean and running var more precise.
Args:
model (model): the model using to compute and update the bn stats.
data_loader (dataloader): dataloader using to provide inputs.
num_batches (int): running iterations using to compute the stats.
"""
# Prepares all the bn layers.
bn_layers = [
m
for m in model.modules()
if any(
(
isinstance(m, bn_type)
for bn_type in (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
)
)
)
]
# In order to make the running stats only reflect the current batch, the
# momentum is disabled.
# bn.running_mean = (1 - momentum) * bn.running_mean + momentum * batch_mean
# Setting the momentum to 1.0 to compute the stats without momentum.
momentum_actual = [bn.momentum for bn in bn_layers]
for bn in bn_layers:
bn.momentum = 1.0
# Calculates the running iterations for precise stats computation.
running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers]
running_square_mean = [torch.zeros_like(bn.running_var) for bn in bn_layers]
for ind, (inputs, _, _) in enumerate(
itertools.islice(data_loader, num_batches)
):
# Forwards the model to update the bn stats.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].float().cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
model(inputs)
for i, bn in enumerate(bn_layers):
# Accumulates the bn stats.
running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1)
# $E(x^2) = Var(x) + E(x)^2$.
cur_square_mean = bn.running_var + bn.running_mean**2
running_square_mean[i] += (
cur_square_mean - running_square_mean[i]
) / (ind + 1)
for i, bn in enumerate(bn_layers):
bn.running_mean = running_mean[i]
# Var(x) = $E(x^2) - E(x)^2$.
bn.running_var = running_square_mean[i] - bn.running_mean**2
# Sets the precise bn stats.
bn.momentum = momentum_actual[i] | Compute and update the batch norm stats to make it more precise. During training both bn stats and the weight are changing after every iteration, so the bn can not precisely reflect the latest stats of the current model. Here the bn stats is recomputed without change of weights, to make the running mean and running var more precise. Args: model (model): the model using to compute and update the bn stats. data_loader (dataloader): dataloader using to provide inputs. num_batches (int): running iterations using to compute the stats. |
22,417 | import json
import logging
import math
import numpy as np
import os
from datetime import datetime
import psutil
import torch
import torchvision.io as io
from fvcore.nn.activation_count import activation_count
from fvcore.nn.flop_count import flop_count
from matplotlib import pyplot as plt
from torch import nn
from torchvision.utils import make_grid
import slowfast.utils.logging as logging
import slowfast.utils.multiprocessing as mpu
from slowfast.datasets.utils import pack_pathway_output
from slowfast.models.batchnorm_helper import SubBatchNorm3d
from slowfast.utils.env import pathmgr
The provided code snippet includes necessary dependencies for implementing the `plot_input` function. Write a Python function `def plot_input(tensor, bboxes=(), texts=(), path="./tmp_vis.png")` to solve the following problem:
Plot the input tensor with the optional bounding box and save it to disk. Args: tensor (tensor): a tensor with shape of `NxCxHxW`. bboxes (tuple): bounding boxes with format of [[x, y, h, w]]. texts (tuple): a tuple of string to plot. path (str): path to the image to save to.
Here is the function:
def plot_input(tensor, bboxes=(), texts=(), path="./tmp_vis.png"):
"""
Plot the input tensor with the optional bounding box and save it to disk.
Args:
tensor (tensor): a tensor with shape of `NxCxHxW`.
bboxes (tuple): bounding boxes with format of [[x, y, h, w]].
texts (tuple): a tuple of string to plot.
path (str): path to the image to save to.
"""
tensor = tensor.float()
tensor = tensor - tensor.min()
tensor = tensor / tensor.max()
f, ax = plt.subplots(nrows=1, ncols=tensor.shape[0], figsize=(50, 20))
for i in range(tensor.shape[0]):
ax[i].axis("off")
ax[i].imshow(tensor[i].permute(1, 2, 0))
# ax[1][0].axis('off')
if bboxes is not None and len(bboxes) > i:
for box in bboxes[i]:
x1, y1, x2, y2 = box
ax[i].vlines(x1, y1, y2, colors="g", linestyles="solid")
ax[i].vlines(x2, y1, y2, colors="g", linestyles="solid")
ax[i].hlines(y1, x1, x2, colors="g", linestyles="solid")
ax[i].hlines(y2, x1, x2, colors="g", linestyles="solid")
if texts is not None and len(texts) > i:
ax[i].text(0, 0, texts[i])
f.savefig(path) | Plot the input tensor with the optional bounding box and save it to disk. Args: tensor (tensor): a tensor with shape of `NxCxHxW`. bboxes (tuple): bounding boxes with format of [[x, y, h, w]]. texts (tuple): a tuple of string to plot. path (str): path to the image to save to. |
22,418 | import json
import logging
import math
import numpy as np
import os
from datetime import datetime
import psutil
import torch
import torchvision.io as io
from fvcore.nn.activation_count import activation_count
from fvcore.nn.flop_count import flop_count
from matplotlib import pyplot as plt
from torch import nn
from torchvision.utils import make_grid
import slowfast.utils.logging as logging
import slowfast.utils.multiprocessing as mpu
from slowfast.datasets.utils import pack_pathway_output
from slowfast.models.batchnorm_helper import SubBatchNorm3d
from slowfast.utils.env import pathmgr
def convert_normalized_images(tensor):
tensor = tensor * 0.225
tensor = tensor + 0.45
tensor = tensor.clamp(min=0.0, max=1.0)
return tensor
pathmgr = PathManagerFactory.get(key="pyslowfast")
The provided code snippet includes necessary dependencies for implementing the `plot_input_normed` function. Write a Python function `def plot_input_normed( tensor, bboxes=(), texts=(), path="./tmp_vis.png", folder_path="", make_grids=False, output_video=False, )` to solve the following problem:
Plot the input tensor with the optional bounding box and save it to disk. Args: tensor (tensor): a tensor with shape of `NxCxHxW`. bboxes (tuple): bounding boxes with format of [[x, y, h, w]]. texts (tuple): a tuple of string to plot. path (str): path to the image to save to.
Here is the function:
def plot_input_normed(
tensor,
bboxes=(),
texts=(),
path="./tmp_vis.png",
folder_path="",
make_grids=False,
output_video=False,
):
"""
Plot the input tensor with the optional bounding box and save it to disk.
Args:
tensor (tensor): a tensor with shape of `NxCxHxW`.
bboxes (tuple): bounding boxes with format of [[x, y, h, w]].
texts (tuple): a tuple of string to plot.
path (str): path to the image to save to.
"""
tensor = tensor.float()
try:
os.mkdir(folder_path)
except Exception as e:
pass
tensor = convert_normalized_images(tensor)
if output_video:
# assert make_grids, "video needs to have make_grids on"
assert tensor.ndim == 5
sz = tensor.shape
if make_grids:
vid = tensor.reshape([sz[0], sz[1] * sz[2], sz[3], sz[4]])
vid = make_grid(vid, padding=8, pad_value=1.0, nrow=sz[0])
vid = vid.reshape([sz[1], sz[2], vid.shape[1], vid.shape[2]])
else:
vid = tensor.reshape([sz[0] * sz[1], sz[2], sz[3], sz[4]])
vid = vid.permute([0, 2, 3, 1])
vid *= 255.0
vid = vid.to(torch.uint8)
fps = 30.0 * vid.shape[0] / 64.0
io.video.write_video(path, vid, fps, video_codec="libx264")
elif make_grids:
if tensor.ndim > 4 and tensor.shape[0] == 1:
tensor = tensor.squeeze()
nrow = 1
elif tensor.ndim == 5:
nrow = tensor.shape[1]
tensor = tensor.reshape(
shape=(-1, tensor.shape[2], tensor.shape[3], tensor.shape[4])
)
vis2 = (
make_grid(tensor, padding=8, pad_value=1.0, nrow=nrow)
.permute(1, 2, 0)
.cpu()
.numpy()
)
plt.imsave(fname=path, arr=vis2, format="png")
else:
f, ax = plt.subplots(
nrows=tensor.shape[0],
ncols=tensor.shape[1],
figsize=(10 * tensor.shape[1], 10 * tensor.shape[0]),
)
if tensor.shape[0] == 1:
for i in range(tensor.shape[1]):
ax[i].axis("off")
ax[i].imshow(tensor[0][i].permute(1, 2, 0))
# ax[1][0].axis('off')
if bboxes is not None and len(bboxes) > i:
for box in bboxes[i]:
x1, y1, x2, y2 = box
ax[i].vlines(x1, y1, y2, colors="g", linestyles="solid")
ax[i].vlines(x2, y1, y2, colors="g", linestyles="solid")
ax[i].hlines(y1, x1, x2, colors="g", linestyles="solid")
ax[i].hlines(y2, x1, x2, colors="g", linestyles="solid")
if texts is not None and len(texts) > i:
ax[i].text(0, 0, texts[i])
else:
for i in range(tensor.shape[0]):
for j in range(tensor.shape[1]):
ax[i][j].axis("off")
ax[i][j].imshow(tensor[i][j].permute(1, 2, 0))
# ax[1][0].axis('off')
if bboxes is not None and len(bboxes) > i:
for box in bboxes[i]:
x1, y1, x2, y2 = box
ax[i].vlines(
x1, y1, y2, colors="g", linestyles="solid"
)
ax[i].vlines(
x2, y1, y2, colors="g", linestyles="solid"
)
ax[i].hlines(
y1, x1, x2, colors="g", linestyles="solid"
)
ax[i].hlines(
y2, x1, x2, colors="g", linestyles="solid"
)
if texts is not None and len(texts) > i:
ax[i].text(0, 0, texts[i])
print(f"{path}")
f.tight_layout(pad=0.0)
with pathmgr.open(path, "wb") as h:
f.savefig(h) | Plot the input tensor with the optional bounding box and save it to disk. Args: tensor (tensor): a tensor with shape of `NxCxHxW`. bboxes (tuple): bounding boxes with format of [[x, y, h, w]]. texts (tuple): a tuple of string to plot. path (str): path to the image to save to. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.