id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
15,175 | import random
import torch
import torch.nn.functional as F
from damo.augmentations.box_level_augs.gaussian_maps import _merge_gaussian
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 1.0.
"""
if factor == 0.0:
return image1
if factor == 1.0:
return image2
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = image1 + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return temp
# Extrapolate:
#
# We need to clip and then cast.
return torch.clamp(temp, 0.0, 1.0)
The provided code snippet includes necessary dependencies for implementing the `brightness` function. Write a Python function `def brightness(image, factor)` to solve the following problem:
Equivalent of PIL Brightness.
Here is the function:
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = torch.zeros(image.shape)
return blend(degenerate, image, factor) | Equivalent of PIL Brightness. |
15,176 | import random
import torch
import torch.nn.functional as F
from damo.augmentations.box_level_augs.gaussian_maps import _merge_gaussian
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 1.0.
"""
if factor == 0.0:
return image1
if factor == 1.0:
return image2
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = image1 + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return temp
# Extrapolate:
#
# We need to clip and then cast.
return torch.clamp(temp, 0.0, 1.0)
The provided code snippet includes necessary dependencies for implementing the `sharpness` function. Write a Python function `def sharpness(image, factor)` to solve the following problem:
Implements Sharpness function from PIL using TF ops.
Here is the function:
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
if image.shape[0] == 0 or image.shape[1] == 0:
return image
channels = image.shape[0]
kernel = torch.Tensor([[1, 1, 1], [1, 5, 1], [1, 1, 1]]).reshape(
1, 1, 3, 3) / 13.0
kernel = kernel.repeat((3, 1, 1, 1))
image_newaxis = image.unsqueeze(0)
image_pad = F.pad(image_newaxis, (1, 1, 1, 1), mode='reflect')
degenerate = F.conv2d(image_pad, weight=kernel, groups=channels).squeeze(0)
return blend(degenerate, image, factor) | Implements Sharpness function from PIL using TF ops. |
15,177 | import random
import torch
import torch.nn.functional as F
from damo.augmentations.box_level_augs.gaussian_maps import _merge_gaussian
The provided code snippet includes necessary dependencies for implementing the `equalize` function. Write a Python function `def equalize(image)` to solve the following problem:
Implements Equalize function from PIL using PyTorch ops based on: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/ autoaugment.py#L352
Here is the function:
def equalize(image):
"""Implements Equalize function from PIL using PyTorch ops based on:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/
autoaugment.py#L352"""
image = image * 255
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = im[c, :, :]
# Compute the histogram of the image channel.
histo = torch.histc(im, bins=256, min=0, max=255) # .type(torch.int32)
# For the purposes of computing the step, filter out the nonzeros.
nonzero_histo = torch.reshape(histo[histo != 0], [-1])
step = (torch.sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (torch.cumsum(histo, 0) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = torch.cat([torch.zeros(1), lut[:-1]])
# Clip the counts to be in range. This is done
# in the C code for image.point.
return torch.clamp(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
if step == 0:
result = im
else:
# can't index using 2d index. Have to flatten and then reshape
result = torch.gather(build_lut(histo, step), 0,
im.flatten().long())
result = result.reshape_as(im)
return result # .type(torch.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = torch.stack([s1, s2, s3], 0) / 255.0
return image | Implements Equalize function from PIL using PyTorch ops based on: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/ autoaugment.py#L352 |
15,178 | import random
import torch
import torch.nn.functional as F
from damo.augmentations.box_level_augs.gaussian_maps import _merge_gaussian
def autocontrast(image):
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
lo = torch.min(image)
hi = torch.max(image)
# Scale the image, making the lowest value 0 and the highest value 1.
def scale_values(im):
scale = 1.0 / (hi - lo)
offset = -lo * scale
im = im * scale + offset
im = torch.clamp(im, 0.0, 1.0)
return im
if hi > lo:
result = scale_values(image)
else:
result = image
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[0, :, :])
s2 = scale_channel(image[1, :, :])
s3 = scale_channel(image[2, :, :])
image = torch.stack([s1, s2, s3], 0)
return image | null |
15,179 | import random
import torch
import torch.nn.functional as F
from damo.augmentations.box_level_augs.gaussian_maps import _merge_gaussian
The provided code snippet includes necessary dependencies for implementing the `posterize` function. Write a Python function `def posterize(image, bits)` to solve the following problem:
Equivalent of PIL Posterize.
Here is the function:
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
image *= 255
image = image.long()
shift = bits # 8 - bits
image_rightshift = image >> shift
image_leftshift = image_rightshift << shift
image_leftshift = image_leftshift.float() / 255.0
return image_leftshift | Equivalent of PIL Posterize. |
15,180 | import random
import torch
import torch.nn.functional as F
from damo.augmentations.box_level_augs.gaussian_maps import _merge_gaussian
def _merge_gaussian(img, img_aug, boxes, scale_ratios, scale_splits):
g_maps = _gaussian_map(img, boxes, scale_splits, scale_ratios)
g_maps = g_maps.clamp(min=0, max=1.0)
out = img * (1 - g_maps) + img_aug * g_maps
return out
def _color_aug_func(img, img_aug, target, scale_ratios_splits,
box_sample_probs):
scale_ratios, scale_splits = scale_ratios_splits
boxes = [
bbox for i, bbox in enumerate(target.bbox)
if random.random() < box_sample_probs[i]
]
img_aug = _merge_gaussian(img, img_aug, boxes, scale_ratios, scale_splits)
return img_aug | null |
15,181 | import torch
from .bounding_box import BoxList
The provided code snippet includes necessary dependencies for implementing the `remove_small_boxes` function. Write a Python function `def remove_small_boxes(boxlist, min_size)` to solve the following problem:
Only keep boxes with both sides >= min_size Arguments: boxlist (Boxlist) min_size (int)
Here is the function:
def remove_small_boxes(boxlist, min_size):
"""
Only keep boxes with both sides >= min_size
Arguments:
boxlist (Boxlist)
min_size (int)
"""
xywh_boxes = boxlist.convert('xywh').bbox
_, _, ws, hs = xywh_boxes.unbind(dim=1)
keep = ((ws >= min_size) & (hs >= min_size)).nonzero().squeeze(1)
return boxlist[keep] | Only keep boxes with both sides >= min_size Arguments: boxlist (Boxlist) min_size (int) |
15,182 | import torch
from .bounding_box import BoxList
def _cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only
a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
class BoxList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order to uniquely determine the bounding boxes with respect
to an image, we also store the corresponding image dimensions.
They can contain extra information that is specific to each bounding box,
such as labels.
"""
def __init__(self, bbox, image_size, mode='xyxy'):
device = bbox.device if isinstance(
bbox, torch.Tensor) else torch.device('cpu')
bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
if bbox.ndimension() != 2:
raise ValueError('bbox should have 2 dimensions, got {}'.format(
bbox.ndimension()))
if bbox.size(-1) != 4:
raise ValueError('last dimension of bbox should have a '
'size of 4, got {}'.format(bbox.size(-1)))
if mode not in ('xyxy', 'xywh'):
raise ValueError("mode should be 'xyxy' or 'xywh'")
self.bbox = bbox
self.size = image_size # (image_width, image_height)
self.mode = mode
self.extra_fields = {}
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def has_field(self, field):
return field in self.extra_fields
def fields(self):
return list(self.extra_fields.keys())
def _copy_extra_fields(self, bbox):
for k, v in bbox.extra_fields.items():
self.extra_fields[k] = v
def convert(self, mode):
if mode not in ('xyxy', 'xywh'):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
# we only have two modes, so don't need to check
# self.mode
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if mode == 'xyxy':
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 0
bbox = torch.cat(
(xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE),
dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
def _split_into_xyxy(self):
if self.mode == 'xyxy':
xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
return xmin, ymin, xmax, ymax
elif self.mode == 'xywh':
TO_REMOVE = 0
xmin, ymin, w, h = self.bbox.split(1, dim=-1)
return (
xmin,
ymin,
xmin + (w - TO_REMOVE).clamp(min=0),
ymin + (h - TO_REMOVE).clamp(min=0),
)
else:
raise RuntimeError('Should not be here')
def resize(self, size, *args, **kwargs):
"""
Returns a resized copy of this bounding box
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
ratios = tuple(
float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_box = self.bbox * ratio
bbox = BoxList(scaled_box, size, mode=self.mode)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox
ratio_width, ratio_height = ratios
xmin, ymin, xmax, ymax = self._split_into_xyxy()
scaled_xmin = xmin * ratio_width
scaled_xmax = xmax * ratio_width
scaled_ymin = ymin * ratio_height
scaled_ymax = ymax * ratio_height
scaled_box = torch.cat(
(scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1)
bbox = BoxList(scaled_box, size, mode='xyxy')
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
'Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented')
image_width, image_height = self.size
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 0
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat((transposed_xmin, transposed_ymin,
transposed_xmax, transposed_ymax),
dim=-1)
bbox = BoxList(transposed_boxes, self.size, mode='xyxy')
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def crop(self, box):
"""
Cropss a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
xmin, ymin, xmax, ymax = self._split_into_xyxy()
w, h = box[2] - box[0], box[3] - box[1]
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
cropped_box = torch.cat(
(cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1)
bbox = BoxList(cropped_box, (w, h), mode='xyxy')
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.crop(box)
bbox.add_field(k, v)
return bbox.convert(self.mode)
# Tensor-like methods
def to(self, device):
bbox = BoxList(self.bbox.to(device), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, 'to'):
v = v.to(device)
bbox.add_field(k, v)
return bbox
def __getitem__(self, item):
bbox = BoxList(self.bbox[item], self.size, self.mode)
for k, v in self.extra_fields.items():
bbox.add_field(k, v[item])
return bbox
def __len__(self):
return self.bbox.shape[0]
def clip_to_image(self, remove_empty=True):
TO_REMOVE = 0
self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
if remove_empty:
box = self.bbox
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return self[keep]
return self
def area(self):
box = self.bbox
if self.mode == 'xyxy':
TO_REMOVE = 0
area = (box[:, 2] - box[:, 0] +
TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)
elif self.mode == 'xywh':
area = box[:, 2] * box[:, 3]
else:
raise RuntimeError('Should not be here')
return area
def copy_with_fields(self, fields, skip_missing=False):
bbox = BoxList(self.bbox, self.size, self.mode)
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
if self.has_field(field):
bbox.add_field(field, self.get_field(field))
elif not skip_missing:
raise KeyError("Field '{}' not found in {}".format(
field, self))
return bbox
def __repr__(self):
s = self.__class__.__name__ + '('
s += 'num_boxes={}, '.format(len(self))
s += 'image_width={}, '.format(self.size[0])
s += 'image_height={}, '.format(self.size[1])
s += 'mode={})'.format(self.mode)
return s
The provided code snippet includes necessary dependencies for implementing the `cat_boxlist` function. Write a Python function `def cat_boxlist(bboxes)` to solve the following problem:
Concatenates a list of BoxList (having the same image size) into a single BoxList Arguments: bboxes (list[BoxList])
Here is the function:
def cat_boxlist(bboxes):
"""
Concatenates a list of BoxList (having the same image size) into a
single BoxList
Arguments:
bboxes (list[BoxList])
"""
assert isinstance(bboxes, (list, tuple))
assert all(isinstance(bbox, BoxList) for bbox in bboxes)
size = bboxes[0].size
assert all(bbox.size == size for bbox in bboxes)
mode = bboxes[0].mode
assert all(bbox.mode == mode for bbox in bboxes)
fields = set(bboxes[0].fields())
assert all(set(bbox.fields()) == fields for bbox in bboxes)
cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size,
mode)
for field in fields:
data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0)
cat_boxes.add_field(field, data)
return cat_boxes | Concatenates a list of BoxList (having the same image size) into a single BoxList Arguments: bboxes (list[BoxList]) |
15,183 | import os
import torch
from loguru import logger
from tqdm import tqdm
from damo.dataset.datasets.evaluation import evaluate
from damo.utils import all_gather, get_world_size, is_main_process, synchronize
from damo.utils.timer import Timer, get_time_str
def compute_on_dataset(model, data_loader, device, timer=None, tta=False):
model.eval()
results_dict = {}
cpu_device = torch.device('cpu')
for _, batch in enumerate(tqdm(data_loader)):
images, targets, image_ids = batch
with torch.no_grad():
if timer:
timer.tic()
output = model(images.to(device))
if timer:
# torch.cuda.synchronize() # consume much time
timer.toc()
output = [o.to(cpu_device) if o is not None else o for o in output]
results_dict.update(
{img_id: result
for img_id, result in zip(image_ids, output)})
return results_dict
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu,
multi_gpu_infer):
if multi_gpu_infer:
all_predictions = all_gather(predictions_per_gpu)
else:
all_predictions = [predictions_per_gpu]
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
# convert a dict where the key is the index in a list
image_ids = list(sorted(predictions.keys()))
if len(image_ids) != image_ids[-1] + 1:
logger.warning(
'Number of images that were gathered from multiple processes is'
'not a contiguous set. Some images might be missing from the'
'evaluation')
# convert to a list
predictions = [predictions[i] for i in image_ids]
return predictions
def evaluate(dataset, predictions, output_folder, **kwargs):
"""evaluate dataset using different methods based on dataset type.
Args:
dataset: Dataset object
predictions(list[BoxList]): each item in the list represents the
prediction results for one image.
output_folder: output folder, to save evaluation files or results.
**kwargs: other args.
Returns:
evaluation result
"""
args = dict(dataset=dataset,
predictions=predictions,
output_folder=output_folder,
**kwargs)
if isinstance(dataset, datasets.COCODataset):
return coco_evaluation(**args)
else:
dataset_name = dataset.__class__.__name__
raise NotImplementedError(
'Unsupported dataset type {}.'.format(dataset_name))
class Timer(object):
def __init__(self):
self.reset()
def average_time(self):
return self.total_time / self.calls if self.calls > 0 else 0.0
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.add(time.time() - self.start_time)
if average:
return self.average_time
else:
return self.diff
def add(self, time_diff):
self.diff = time_diff
self.total_time += self.diff
self.calls += 1
def reset(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
def avg_time_str(self):
time_str = str(datetime.timedelta(seconds=self.average_time))
return time_str
def get_time_str(time_diff):
time_str = str(datetime.timedelta(seconds=time_diff))
return time_str
def inference(
model,
data_loader,
dataset_name,
iou_types=('bbox', ),
box_only=False,
device='cuda',
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
multi_gpu_infer=True,
):
# convert to a torch.device for efficiency
device = torch.device(device)
num_devices = get_world_size()
dataset = data_loader.dataset
logger.info('Start evaluation on {} dataset({} images).'.format(
dataset_name, len(dataset)))
total_timer = Timer()
inference_timer = Timer()
total_timer.tic()
predictions = compute_on_dataset(model, data_loader, device,
inference_timer)
# wait for all processes to complete before measuring the time
if multi_gpu_infer:
synchronize()
total_time = total_timer.toc()
total_time_str = get_time_str(total_time)
logger.info(
'Total run time: {} ({} s / img per device, on {} devices)'.format(
total_time_str, total_time * num_devices / len(dataset),
num_devices))
total_infer_time = get_time_str(inference_timer.total_time)
logger.info(
'Model inference time: {} ({} s / img per device, on {} devices)'.
format(
total_infer_time,
inference_timer.total_time * num_devices / len(dataset),
num_devices,
))
predictions = _accumulate_predictions_from_multiple_gpus(
predictions, multi_gpu_infer)
if not is_main_process():
return
if output_folder:
torch.save(predictions, os.path.join(output_folder, 'predictions.pth'))
extra_args = dict(
box_only=box_only,
iou_types=iou_types,
expected_results=expected_results,
expected_results_sigma_tol=expected_results_sigma_tol,
)
return evaluate(dataset=dataset,
predictions=predictions,
output_folder=output_folder,
**extra_args) | null |
15,184 | import datetime
import math
import os
import random
import time
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
from loguru import logger
from torch.nn.parallel import DistributedDataParallel as DDP
from damo.apis.detector_inference import inference
from damo.base_models.losses.distill_loss import FeatureLoss
from damo.dataset import build_dataloader, build_dataset
from damo.detectors.detector import build_ddp_model, build_local_model
from damo.utils import (MeterBuffer, get_model_info, get_rank, gpu_mem_usage,
save_checkpoint, setup_logger, synchronize)
from torch.nn import GroupNorm, LayerNorm
from torch.nn.modules.batchnorm import _BatchNorm
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path) | null |
15,185 | import datetime
import math
import os
import random
import time
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
from loguru import logger
from torch.nn.parallel import DistributedDataParallel as DDP
from damo.apis.detector_inference import inference
from damo.base_models.losses.distill_loss import FeatureLoss
from damo.dataset import build_dataloader, build_dataset
from damo.detectors.detector import build_ddp_model, build_local_model
from damo.utils import (MeterBuffer, get_model_info, get_rank, gpu_mem_usage,
save_checkpoint, setup_logger, synchronize)
from torch.nn import GroupNorm, LayerNorm
from torch.nn.modules.batchnorm import _BatchNorm
def set_seed(seed):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed) | null |
15,186 | import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..core.bbox_calculator import bbox_overlaps
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
The provided code snippet includes necessary dependencies for implementing the `weighted_loss` function. Write a Python function `def weighted_loss(loss_func)` to solve the following problem:
Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000)
Here is the function:
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper | Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) |
15,187 | import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..core.bbox_calculator import bbox_overlaps
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
"""Calculate overlap between two set of bboxes.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "iof" (intersection over
foreground).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
# Either the boxes are empty or the length of boxes's last dimenstion is 4
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
# Batch dim must be the same
# Batch dim: (B1, B2, ... Bn)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows, ))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
rb = torch.min(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
# calculate gious
enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
The provided code snippet includes necessary dependencies for implementing the `giou_loss` function. Write a Python function `def giou_loss(pred, target, eps=1e-7)` to solve the following problem:
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression <https://arxiv.org/abs/1902.09630>`_. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor.
Here is the function:
def giou_loss(pred, target, eps=1e-7):
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
return loss | r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression <https://arxiv.org/abs/1902.09630>`_. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. |
15,188 | import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..core.bbox_calculator import bbox_overlaps
The provided code snippet includes necessary dependencies for implementing the `distribution_focal_loss` function. Write a Python function `def distribution_focal_loss(pred, label)` to solve the following problem:
r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. label (torch.Tensor): Target distance label for bounding boxes with shape (N,). Returns: torch.Tensor: Loss tensor with shape (N,).
Here is the function:
def distribution_focal_loss(pred, label):
r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted general distribution of bounding boxes
(before softmax) with shape (N, n+1), n is the max value of the
integral set `{0, ..., n}` in paper.
label (torch.Tensor): Target distance label for bounding boxes with
shape (N,).
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
dis_left = label.long()
dis_right = dis_left + 1
weight_left = dis_right.float() - label
weight_right = label - dis_left.float()
loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
+ F.cross_entropy(pred, dis_right, reduction='none') * weight_right
return loss | r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. label (torch.Tensor): Target distance label for bounding boxes with shape (N,). Returns: torch.Tensor: Loss tensor with shape (N,). |
15,189 | import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..core.bbox_calculator import bbox_overlaps
The provided code snippet includes necessary dependencies for implementing the `quality_focal_loss` function. Write a Python function `def quality_focal_loss(pred, target, beta=2.0, use_sigmoid=True)` to solve the following problem:
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. Returns: torch.Tensor: Loss tensor with shape (N,).
Here is the function:
def quality_focal_loss(pred, target, beta=2.0, use_sigmoid=True):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert len(target) == 2, """target for QFL must be a tuple of two elements,
including category label and quality label, respectively"""
# label denotes the category id, score denotes the quality score
label, score = target
if use_sigmoid:
func = F.binary_cross_entropy_with_logits
else:
func = F.binary_cross_entropy
# negatives are supervised by 0 quality score
pred_sigmoid = pred.sigmoid() if use_sigmoid else pred
scale_factor = pred_sigmoid # 8400, 81
zerolabel = scale_factor.new_zeros(pred.shape)
loss = func(pred, zerolabel, reduction='none') * scale_factor.pow(beta)
bg_class_ind = pred.size(1)
pos = ((label >= 0) &
(label < bg_class_ind)).nonzero(as_tuple=False).squeeze(1)
pos_label = label[pos].long()
# positives are supervised by bbox quality (IoU) score
scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
loss[pos,
pos_label] = func(pred[pos, pos_label], score[pos],
reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss | r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. Returns: torch.Tensor: Loss tensor with shape (N,). |
15,190 | import torch
import torch.nn as nn
from ..core.ops import Focus, RepConv, SPPBottleneck, get_activation
class TinyNAS(nn.Module):
def __init__(self,
structure_info=None,
out_indices=[2, 4, 5],
with_spp=False,
use_focus=False,
act='silu',
reparam=False):
super(TinyNAS, self).__init__()
self.out_indices = out_indices
self.block_list = nn.ModuleList()
for idx, block_info in enumerate(structure_info):
the_block_class = block_info['class']
if the_block_class == 'ConvKXBNRELU':
if use_focus:
the_block = Focus(block_info['in'],
block_info['out'],
block_info['k'],
act=act)
else:
the_block = ConvKXBNRELU(block_info['in'],
block_info['out'],
block_info['k'],
block_info['s'],
act=act)
self.block_list.append(the_block)
elif the_block_class == 'SuperResConvK1KX':
spp = with_spp if idx == len(structure_info) - 1 else False
the_block = SuperResStem(block_info['in'],
block_info['out'],
block_info['btn'],
block_info['k'],
block_info['s'],
block_info['L'],
spp,
act=act,
reparam=reparam,
block_type='k1kx')
self.block_list.append(the_block)
elif the_block_class == 'SuperResConvKXKX':
spp = with_spp if idx == len(structure_info) - 1 else False
the_block = SuperResStem(block_info['in'],
block_info['out'],
block_info['btn'],
block_info['k'],
block_info['s'],
block_info['L'],
spp,
act=act,
reparam=reparam,
block_type='kxkx')
self.block_list.append(the_block)
else:
raise NotImplementedError
def init_weights(self, pretrain=None):
pass
def forward(self, x):
output = x
stage_feature_list = []
for idx, block in enumerate(self.block_list):
output = block(output)
if idx in self.out_indices:
stage_feature_list.append(output)
return stage_feature_list
def load_tinynas_net(backbone_cfg):
# load masternet model to path
import ast
struct_str = ''.join([x.strip() for x in backbone_cfg.net_structure_str])
struct_info = ast.literal_eval(struct_str)
for layer in struct_info:
if 'nbitsA' in layer:
del layer['nbitsA']
if 'nbitsW' in layer:
del layer['nbitsW']
model = TinyNAS(structure_info=struct_info,
out_indices=backbone_cfg.out_indices,
with_spp=backbone_cfg.with_spp,
use_focus=backbone_cfg.use_focus,
act=backbone_cfg.act,
reparam=backbone_cfg.reparam)
return model | null |
15,191 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from ..core.ops import Focus, RepConv, SPPBottleneck, get_activation, DepthwiseConv
from damo.utils import make_divisible
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i) | null |
15,192 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from ..core.ops import Focus, RepConv, SPPBottleneck, get_activation, DepthwiseConv
from damo.utils import make_divisible
def channel_shuffle(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batchsize, -1, height, width)
return x | null |
15,193 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from ..core.ops import Focus, RepConv, SPPBottleneck, get_activation, DepthwiseConv
from damo.utils import make_divisible
class TinyNAS(nn.Module):
def __init__(self,
structure_info=None,
out_indices=[2, 4, 5],
with_spp=False,
use_focus=False,
act='silu',
reparam=False,
depthwise=False,
use_se=False,):
super(TinyNAS, self).__init__()
self.out_indices = out_indices
self.block_list = nn.ModuleList()
for idx, block_info in enumerate(structure_info):
the_block_class = block_info['class']
if the_block_class == 'ConvKXBNRELU':
if use_focus:
the_block = Focus(block_info['in'],
block_info['out'],
block_info['k'],
act=act)
else:
the_block = ConvKXBNRELU(3,
block_info['out'],
block_info['k'],
2,
act=act)
self.block_list.append(the_block)
elif the_block_class == 'SuperResConvK1KX':
spp = with_spp if idx == len(structure_info) - 1 else False
the_block = SuperResStem(block_info['in'],
block_info['out'],
block_info['btn'],
block_info['k'],
block_info['s'],
block_info['L'],
spp,
act=act,
reparam=reparam,
block_type='k1kx',
depthwise=depthwise,
use_se=use_se,
block_pos=idx)
self.block_list.append(the_block)
elif the_block_class == 'SuperResConvKXKX':
spp = with_spp if idx == len(structure_info) - 1 else False
the_block = SuperResStem(block_info['in'],
block_info['out'],
block_info['btn'],
block_info['k'],
block_info['s'],
block_info['L'],
spp,
act=act,
reparam=reparam,
block_type='kxkx',
depthwise=depthwise,
use_se=use_se)
self.block_list.append(the_block)
else:
raise NotImplementedError
def init_weights(self, pretrain=None):
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d):
if "first" in name:
nn.init.normal_(m.weight, 0, 0.01)
else:
nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
def forward(self, x):
output = x
stage_feature_list = []
for idx, block in enumerate(self.block_list):
output = block(output)
if idx in self.out_indices:
stage_feature_list.append(output)
return stage_feature_list
def load_tinynas_net(backbone_cfg):
# load masternet model to path
import ast
struct_str = ''.join([x.strip() for x in backbone_cfg.net_structure_str])
struct_info = ast.literal_eval(struct_str)
for layer in struct_info:
if 'nbitsA' in layer:
del layer['nbitsA']
if 'nbitsW' in layer:
del layer['nbitsW']
model = TinyNAS(structure_info=struct_info,
out_indices=backbone_cfg.out_indices,
with_spp=backbone_cfg.with_spp,
use_focus=backbone_cfg.use_focus,
act=backbone_cfg.act,
reparam=backbone_cfg.reparam,
depthwise=backbone_cfg.depthwise,
use_se=backbone_cfg.use_se,)
return model | null |
15,194 | import torch
import torch.nn as nn
from ..core.ops import Focus, RepConv, SPPBottleneck, get_activation
class TinyNAS(nn.Module):
def __init__(self,
structure_info=None,
out_indices=[2, 3, 4],
with_spp=False,
use_focus=False,
act='silu',
reparam=False):
super(TinyNAS, self).__init__()
self.out_indices = out_indices
self.block_list = nn.ModuleList()
self.stride_list = []
for idx, block_info in enumerate(structure_info):
the_block_class = block_info['class']
if the_block_class == 'ConvKXBNRELU':
if use_focus and idx == 0:
the_block = Focus(block_info['in'],
block_info['out'],
block_info['k'],
act=act)
else:
the_block = ConvKXBNRELU(block_info['in'],
block_info['out'],
block_info['k'],
block_info['s'],
act=act)
elif the_block_class == 'SuperResConvK1KX':
the_block = CSPStem(block_info['in'],
block_info['out'],
block_info['btn'],
block_info['s'],
block_info['k'],
block_info['L'],
act=act,
reparam=reparam,
block_type='k1kx')
elif the_block_class == 'SuperResConvKXKX':
the_block = CSPStem(block_info['in'],
block_info['out'],
block_info['btn'],
block_info['s'],
block_info['k'],
block_info['L'],
act=act,
reparam=reparam,
block_type='kxkx')
else:
raise NotImplementedError
self.block_list.append(the_block)
self.csp_stage = nn.ModuleList()
self.csp_stage.append(self.block_list[0])
self.csp_stage.append(CSPWrapper(self.block_list[1]))
self.csp_stage.append(CSPWrapper(self.block_list[2]))
self.csp_stage.append(
CSPWrapper((self.block_list[3], self.block_list[4])))
self.csp_stage.append(CSPWrapper(self.block_list[5],
with_spp=with_spp))
del self.block_list
def init_weights(self, pretrain=None):
pass
def forward(self, x):
output = x
stage_feature_list = []
for idx, block in enumerate(self.csp_stage):
output = block(output)
if idx in self.out_indices:
stage_feature_list.append(output)
return stage_feature_list
def load_tinynas_net(backbone_cfg):
# load masternet model to path
import ast
struct_str = ''.join([x.strip() for x in backbone_cfg.net_structure_str])
struct_info = ast.literal_eval(struct_str)
for layer in struct_info:
if 'nbitsA' in layer:
del layer['nbitsA']
if 'nbitsW' in layer:
del layer['nbitsW']
model = TinyNAS(structure_info=struct_info,
out_indices=backbone_cfg.out_indices,
with_spp=backbone_cfg.with_spp,
use_focus=backbone_cfg.use_focus,
act=backbone_cfg.act,
reparam=backbone_cfg.reparam)
return model | null |
15,195 | from functools import partial
import torch
import torch.distributed as dist
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `multi_apply` function. Write a Python function `def multi_apply(func, *args, **kwargs)` to solve the following problem:
Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function
Here is the function:
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments.
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains \
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results))) | Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function |
15,196 | from functools import partial
import torch
import torch.distributed as dist
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `unmap` function. Write a Python function `def unmap(data, count, inds, fill=0)` to solve the following problem:
Unmap a subset of item (data) back to the original set of items (of size count)
Here is the function:
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of size
count)"""
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds.type(torch.bool), :] = data
return ret | Unmap a subset of item (data) back to the original set of items (of size count) |
15,197 | from functools import partial
import torch
import torch.distributed as dist
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `reduce_mean` function. Write a Python function `def reduce_mean(tensor)` to solve the following problem:
Obtain the mean of tensor on different GPUs.
Here is the function:
def reduce_mean(tensor):
""""Obtain the mean of tensor on different GPUs."""
if not (dist.is_available() and dist.is_initialized()):
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor | Obtain the mean of tensor on different GPUs. |
15,198 | from functools import partial
import torch
import torch.distributed as dist
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `images_to_levels` function. Write a Python function `def images_to_levels(target, num_levels)` to solve the following problem:
Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...]
Here is the function:
def images_to_levels(target, num_levels):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_levels:
end = start + n
# level_targets.append(target[:, start:end].squeeze(0))
level_targets.append(target[:, start:end])
start = end
return level_targets | Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] |
15,199 | import numpy as np
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from .weight_init import kaiming_init, constant_init
from damo.utils import make_divisible
class SiLU(nn.Module):
def forward(x):
class Swish(nn.Module):
def __init__(self, inplace=True):
def forward(self, x):
def get_activation(name='silu', inplace=True):
if name is None:
return nn.Identity()
if isinstance(name, str):
if name == 'silu':
module = nn.SiLU(inplace=inplace)
elif name == 'relu':
module = nn.ReLU(inplace=inplace)
elif name == 'lrelu':
module = nn.LeakyReLU(0.1, inplace=inplace)
elif name == 'swish':
module = Swish(inplace=inplace)
elif name == 'hardsigmoid':
module = nn.Hardsigmoid(inplace=inplace)
elif name == 'identity':
module = nn.Identity()
else:
raise AttributeError('Unsupported act type: {}'.format(name))
return module
elif isinstance(name, nn.Module):
return name
else:
raise AttributeError('Unsupported act type: {}'.format(name)) | null |
15,200 | import numpy as np
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from .weight_init import kaiming_init, constant_init
from damo.utils import make_divisible
def get_norm(name, out_channels):
if name == 'bn':
module = nn.BatchNorm2d(out_channels)
elif name == 'gn':
module = nn.GroupNorm(out_channels)
else:
raise NotImplementedError
return module | null |
15,201 | import numpy as np
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from .weight_init import kaiming_init, constant_init
from damo.utils import make_divisible
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i) | null |
15,202 | import numpy as np
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from .weight_init import kaiming_init, constant_init
from damo.utils import make_divisible
The provided code snippet includes necessary dependencies for implementing the `conv_bn` function. Write a Python function `def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1)` to solve the following problem:
Basic cell for rep-style block, including conv and bn
Here is the function:
def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
'''Basic cell for rep-style block, including conv and bn'''
result = nn.Sequential()
result.add_module(
'conv',
nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False))
result.add_module('bn', nn.BatchNorm2d(num_features=out_channels))
return result | Basic cell for rep-style block, including conv and bn |
15,203 | import torch
def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
"""Performs non-maximum suppression in a batched fashion.
Modified from https://github.com/pytorch/vision/blob
/505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.
In order to perform NMS independently per class, we add an offset to all
the boxes. The offset is dependent only on the class idx, and is large
enough so that boxes from different classes do not overlap.
Arguments:
boxes (torch.Tensor): boxes in shape (N, 4).
scores (torch.Tensor): scores in shape (N, ).
idxs (torch.Tensor): each index value correspond to a bbox cluster,
and NMS will not be applied between elements of different idxs,
shape (N, ).
nms_cfg (dict): specify nms type and other parameters like iou_thr.
Possible keys includes the following.
- iou_thr (float): IoU threshold used for NMS.
- split_thr (float): threshold number of boxes. In some cases the
number of boxes is large (e.g., 200k). To avoid OOM during
training, the users could set `split_thr` to a small value.
If the number of boxes is greater than the threshold, it will
perform NMS on each group of boxes separately and sequentially.
Defaults to 10000.
class_agnostic (bool): if true, nms is class agnostic,
i.e. IoU thresholding happens over all boxes,
regardless of the predicted class.
Returns:
tuple: kept dets and indice.
"""
nms_cfg_ = nms_cfg.copy()
class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic)
if class_agnostic:
boxes_for_nms = boxes
else:
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
boxes_for_nms = boxes + offsets[:, None]
nms_type = nms_cfg_.pop('type', 'nms')
nms_op = eval(nms_type)
split_thr = nms_cfg_.pop('split_thr', 10000)
# Won't split to multiple nms nodes when exporting to onnx
if boxes_for_nms.shape[0] < split_thr or torch.onnx.is_in_onnx_export():
dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_)
boxes = boxes[keep]
# -1 indexing works abnormal in TensorRT
# This assumes `dets` has 5 dimensions where
# the last dimension is score.
# TODO: more elegant way to handle the dimension issue.
# Some type of nms would reweight the score, such as SoftNMS
scores = dets[:, 4]
else:
max_num = nms_cfg_.pop('max_num', -1)
total_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
# Some type of nms would reweight the score, such as SoftNMS
scores_after_nms = scores.new_zeros(scores.size())
for id in torch.unique(idxs):
mask = (idxs == id).nonzero(as_tuple=False).view(-1)
dets, keep = nms_op(boxes_for_nms[mask], scores[mask], **nms_cfg_)
total_mask[mask[keep]] = True
scores_after_nms[mask[keep]] = dets[:, -1]
keep = total_mask.nonzero(as_tuple=False).view(-1)
scores, inds = scores_after_nms[keep].sort(descending=True)
keep = keep[inds]
boxes = boxes[keep]
if max_num > 0:
keep = keep[:max_num]
boxes = boxes[:max_num]
scores = scores[:max_num]
return torch.cat([boxes, scores[:, None]], -1), keep
The provided code snippet includes necessary dependencies for implementing the `multiclass_nms` function. Write a Python function `def multiclass_nms(multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None)` to solve the following problem:
NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the last column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_thr (float): NMS IoU threshold max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. score_factors (Tensor): The factors multiplied to scores before applying NMS Returns: tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \ are 0-based.
Here is the function:
def multiclass_nms(multi_bboxes,
multi_scores,
score_thr,
nms_cfg,
max_num=-1,
score_factors=None):
"""NMS for multi-class bboxes.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class), where the last column
contains scores of the background class, but this will be ignored.
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_thr (float): NMS IoU threshold
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept.
score_factors (Tensor): The factors multiplied to scores before
applying NMS
Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \
are 0-based.
"""
num_classes = multi_scores.size(1) - 1
# exclude background category
if multi_bboxes.shape[1] > 4:
bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)
else:
bboxes = multi_bboxes[:, None].expand(multi_scores.size(0),
num_classes, 4)
scores = multi_scores[:, :-1]
# filter out boxes with low scores
valid_mask = scores > score_thr
# We use masked_select for ONNX exporting purpose,
# which is equivalent to bboxes = bboxes[valid_mask]
# (TODO): as ONNX does not support repeat now,
# we have to use this ugly code
bboxes = torch.masked_select(
bboxes,
torch.stack((valid_mask, valid_mask, valid_mask, valid_mask),
-1)).view(-1, 4)
if score_factors is not None:
scores = scores * score_factors[:, None]
scores = torch.masked_select(scores, valid_mask)
labels = valid_mask.nonzero(as_tuple=False)[:, 1]
if bboxes.numel() == 0:
bboxes = multi_bboxes.new_zeros((0, 5))
labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)
if torch.onnx.is_in_onnx_export():
raise RuntimeError('[ONNX Error] Can not record NMS '
'as it has not been executed this time')
return bboxes, labels
dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)
if max_num > 0:
dets = dets[:max_num]
keep = keep[:max_num]
return dets, labels[keep] | NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the last column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_thr (float): NMS IoU threshold max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. score_factors (Tensor): The factors multiplied to scores before applying NMS Returns: tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \ are 0-based. |
15,204 | import torch
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
"""Calculate overlap between two set of bboxes.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "iof" (intersection over
foreground).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
# Either the boxes are empty or the length of boxes's last dimenstion is 4
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
# Batch dim must be the same
# Batch dim: (B1, B2, ... Bn)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows, ))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
rb = torch.min(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
# calculate gious
enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
The provided code snippet includes necessary dependencies for implementing the `fast_nms` function. Write a Python function `def fast_nms(multi_bboxes, multi_scores, multi_coeffs, score_thr, iou_thr, top_k, max_num=-1)` to solve the following problem:
Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_. Fast NMS allows already-removed detections to suppress other detections so that every instance can be decided to be kept or discarded in parallel, which is not possible in traditional NMS. This relaxation allows us to implement Fast NMS entirely in standard GPU-accelerated matrix operations. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class+1), where the last column contains scores of the background class, but this will be ignored. multi_coeffs (Tensor): shape (n, #class*coeffs_dim). score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_thr (float): IoU threshold to be considered as conflicted. top_k (int): if there are more than top_k bboxes before NMS, only top top_k will be kept. max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. If -1, keep all the bboxes. Default: -1. Returns: tuple: (bboxes, labels, coefficients), tensors of shape (k, 5), (k, 1), and (k, coeffs_dim). Labels are 0-based.
Here is the function:
def fast_nms(multi_bboxes,
multi_scores,
multi_coeffs,
score_thr,
iou_thr,
top_k,
max_num=-1):
"""Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_.
Fast NMS allows already-removed detections to suppress other detections so
that every instance can be decided to be kept or discarded in parallel,
which is not possible in traditional NMS. This relaxation allows us to
implement Fast NMS entirely in standard GPU-accelerated matrix operations.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class+1), where the last column
contains scores of the background class, but this will be ignored.
multi_coeffs (Tensor): shape (n, #class*coeffs_dim).
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
iou_thr (float): IoU threshold to be considered as conflicted.
top_k (int): if there are more than top_k bboxes before NMS,
only top top_k will be kept.
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept. If -1, keep all the bboxes.
Default: -1.
Returns:
tuple: (bboxes, labels, coefficients), tensors of shape (k, 5), (k, 1),
and (k, coeffs_dim). Labels are 0-based.
"""
scores = multi_scores[:, :-1].t() # [#class, n]
scores, idx = scores.sort(1, descending=True)
idx = idx[:, :top_k].contiguous()
scores = scores[:, :top_k] # [#class, topk]
num_classes, num_dets = idx.size()
boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4)
coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1)
iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk]
iou.triu_(diagonal=1)
iou_max, _ = iou.max(dim=1)
# Now just filter out the ones higher than the threshold
keep = iou_max <= iou_thr
# Second thresholding introduces 0.2 mAP gain at negligible time cost
keep *= scores > score_thr
# Assign each kept detection to its corresponding class
classes = torch.arange(num_classes,
device=boxes.device)[:, None].expand_as(keep)
classes = classes[keep]
boxes = boxes[keep]
coeffs = coeffs[keep]
scores = scores[keep]
# Only keep the top max_num highest scores across all classes
scores, idx = scores.sort(0, descending=True)
if max_num > 0:
idx = idx[:max_num]
scores = scores[:max_num]
classes = classes[idx]
boxes = boxes[idx]
coeffs = coeffs[idx]
cls_dets = torch.cat([boxes, scores[:, None]], dim=1)
return cls_dets, classes, coeffs | Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_. Fast NMS allows already-removed detections to suppress other detections so that every instance can be decided to be kept or discarded in parallel, which is not possible in traditional NMS. This relaxation allows us to implement Fast NMS entirely in standard GPU-accelerated matrix operations. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class+1), where the last column contains scores of the background class, but this will be ignored. multi_coeffs (Tensor): shape (n, #class*coeffs_dim). score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_thr (float): IoU threshold to be considered as conflicted. top_k (int): if there are more than top_k bboxes before NMS, only top top_k will be kept. max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. If -1, keep all the bboxes. Default: -1. Returns: tuple: (bboxes, labels, coefficients), tensors of shape (k, 5), (k, 1), and (k, coeffs_dim). Labels are 0-based. |
15,205 | import numpy as np
import torch.nn as nn
def normal_init(module, mean=0, std=1, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.normal_(module.weight, mean, std)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias) | null |
15,206 | import numpy as np
import torch.nn as nn
def constant_init(module, val, bias=0):
if hasattr(module, "weight") and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, "bias") and module.bias is not None:
nn.init.constant_(module.bias, bias) | null |
15,207 | import numpy as np
import torch.nn as nn
def kaiming_init(
module, a=0, mode="fan_out", nonlinearity="relu", bias=0, distribution="normal"
):
assert distribution in ["uniform", "normal"]
if distribution == "uniform":
nn.init.kaiming_uniform_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity
)
else:
nn.init.kaiming_normal_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity
)
if hasattr(module, "bias") and module.bias is not None:
nn.init.constant_(module.bias, bias) | null |
15,208 | import numpy as np
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `bias_init_with_prob` function. Write a Python function `def bias_init_with_prob(prior_prob)` to solve the following problem:
initialize conv/fc bias value according to a given probability value.
Here is the function:
def bias_init_with_prob(prior_prob):
"""initialize conv/fc bias value according to a given probability value."""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init | initialize conv/fc bias value according to a given probability value. |
15,209 | import torch
import torch.nn as nn
import torch.nn.functional as F
from damo.utils import postprocess
from ..core.ops import ConvBNAct
from ..core.ota_assigner import AlignOTAAssigner
from ..core.utils import Scale, multi_apply, reduce_mean
from ..core.weight_init import bias_init_with_prob, normal_init
from ..losses.gfocal_loss import (DistributionFocalLoss, GIoULoss,
QualityFocalLoss)
from loguru import logger
The provided code snippet includes necessary dependencies for implementing the `distance2bbox` function. Write a Python function `def distance2bbox(points, distance, max_shape=None)` to solve the following problem:
Decode distance prediction to bounding box.
Here is the function:
def distance2bbox(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
"""
x1 = points[..., 0] - distance[..., 0]
y1 = points[..., 1] - distance[..., 1]
x2 = points[..., 0] + distance[..., 2]
y2 = points[..., 1] + distance[..., 3]
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1])
y1 = y1.clamp(min=0, max=max_shape[0])
x2 = x2.clamp(min=0, max=max_shape[1])
y2 = y2.clamp(min=0, max=max_shape[0])
return torch.stack([x1, y1, x2, y2], -1) | Decode distance prediction to bounding box. |
15,210 | import torch
import torch.nn as nn
import torch.nn.functional as F
from damo.utils import postprocess
from ..core.ops import ConvBNAct
from ..core.ota_assigner import AlignOTAAssigner
from ..core.utils import Scale, multi_apply, reduce_mean
from ..core.weight_init import bias_init_with_prob, normal_init
from ..losses.gfocal_loss import (DistributionFocalLoss, GIoULoss,
QualityFocalLoss)
from loguru import logger
The provided code snippet includes necessary dependencies for implementing the `bbox2distance` function. Write a Python function `def bbox2distance(points, bbox, max_dis=None, eps=0.1)` to solve the following problem:
Decode bounding box based on distances.
Here is the function:
def bbox2distance(points, bbox, max_dis=None, eps=0.1):
"""Decode bounding box based on distances.
"""
left = points[:, 0] - bbox[:, 0]
top = points[:, 1] - bbox[:, 1]
right = bbox[:, 2] - points[:, 0]
bottom = bbox[:, 3] - points[:, 1]
if max_dis is not None:
left = left.clamp(min=0, max=max_dis - eps)
top = top.clamp(min=0, max=max_dis - eps)
right = right.clamp(min=0, max=max_dis - eps)
bottom = bottom.clamp(min=0, max=max_dis - eps)
return torch.stack([left, top, right, bottom], -1) | Decode bounding box based on distances. |
15,211 | import torch
from damo.dataset.transforms import transforms as T
from damo.structures.bounding_box import BoxList
from damo.structures.image_list import to_image_list
from damo.utils.boxes import filter_results
def im_detect_bbox(model, images, target_scale, target_max_size, device,
config):
def im_detect_bbox_hflip(model, images, target_scale, target_max_size, device,
config):
def im_detect_bbox_scale(model,
images,
target_scale,
target_max_size,
device,
config,
hflip=False):
class BoxList(object):
def __init__(self, bbox, image_size, mode='xyxy'):
def add_field(self, field, field_data):
def get_field(self, field):
def has_field(self, field):
def fields(self):
def _copy_extra_fields(self, bbox):
def convert(self, mode):
def _split_into_xyxy(self):
def resize(self, size, *args, **kwargs):
def transpose(self, method):
def crop(self, box):
def to(self, device):
def __getitem__(self, item):
def __len__(self):
def clip_to_image(self, remove_empty=True):
def area(self):
def copy_with_fields(self, fields, skip_missing=False):
def __repr__(self):
def filter_results(boxlist, num_classes, nms_thre):
def im_detect_bbox_aug(model, images, device, config):
# Collect detections computed under different transformations
boxlists_ts = []
for _ in range(len(images)):
boxlists_ts.append([])
def add_preds_t(boxlists_t):
for i, boxlist_t in enumerate(boxlists_t):
if len(boxlists_ts[i]) == 0:
# The first one is identity transform,
# no need to resize the boxlist
boxlists_ts[i].append(boxlist_t)
else:
# Resize the boxlist as the first one
boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size))
# Compute detections for the original image (identity transform)
boxlists_i = im_detect_bbox(model, images, config.testing.input_min_size,
config.testing.input_max_size, device, config)
add_preds_t(boxlists_i)
# Perform detection on the horizontally flipped image
if config.testing.augmentation.hflip:
boxlists_hf = im_detect_bbox_hflip(model, images,
config.testing.input_min_size,
config.testing.input_max_size,
device, config)
add_preds_t(boxlists_hf)
# Compute detections at different scales
for scale in config.testing.augmentation.scales:
max_size = config.testing.augmentation.scales_max_size
boxlists_scl = im_detect_bbox_scale(model, images, scale, max_size,
device, config)
add_preds_t(boxlists_scl)
if config.testing.augmentation.scales_hflip:
boxlists_scl_hf = im_detect_bbox_scale(model,
images,
scale,
max_size,
device,
config,
hflip=True)
add_preds_t(boxlists_scl_hf)
# Merge boxlists detected by different bbox aug params
boxlists = []
for i, boxlist_ts in enumerate(boxlists_ts):
bbox = torch.cat([boxlist_t.bbox for boxlist_t in boxlist_ts])
scores = torch.cat(
[boxlist_t.get_field('scores') for boxlist_t in boxlist_ts])
labels = torch.cat(
[boxlist_t.get_field('labels') for boxlist_t in boxlist_ts])
boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode)
boxlist.add_field('scores', scores)
boxlist.add_field('labels', labels)
boxlists.append(boxlist)
# Apply NMS and limit the final detections
results = []
for boxlist in boxlists:
results.append(
filter_results(boxlist, config.model.head.num_classes,
config.testing.augmentation.nms_thres))
return results | null |
15,212 | from damo.augmentations.scale_aware_aug import SA_Aug
from . import transforms as T
class SA_Aug(object):
def __init__(self, iters_per_epoch, start_epoch, total_epochs,
no_aug_epochs, batch_size, num_gpus, num_workers, sada_cfg):
autoaug_list = sada_cfg.autoaug_params
num_policies = sada_cfg.num_subpolicies
scale_splits = sada_cfg.scale_splits
box_prob = sada_cfg.box_prob
self.batch_size = batch_size / num_gpus
self.num_workers = num_workers
self.max_iters = (total_epochs - no_aug_epochs) * iters_per_epoch
self.count = start_epoch * iters_per_epoch
if self.num_workers == 0:
self.num_workers += 1
box_aug_list = autoaug_list[4:]
color_aug_types = list(color_aug_func.keys())
geometric_aug_types = list(geometric_aug_func.keys())
policies = []
for i in range(num_policies):
_start_pos = i * 6
sub_policy = [
(
color_aug_types[box_aug_list[_start_pos + 0] %
len(color_aug_types)],
box_aug_list[_start_pos + 1] * 0.1,
box_aug_list[_start_pos + 2],
), # box_color policy
(geometric_aug_types[box_aug_list[_start_pos + 3] %
len(geometric_aug_types)],
box_aug_list[_start_pos + 4] * 0.1,
box_aug_list[_start_pos + 5])
] # box_geometric policy
policies.append(sub_policy)
_start_pos = num_policies * 6
scale_ratios = {
'area': [
box_aug_list[_start_pos + 0], box_aug_list[_start_pos + 1],
box_aug_list[_start_pos + 2]
],
'prob': [
box_aug_list[_start_pos + 3], box_aug_list[_start_pos + 4],
box_aug_list[_start_pos + 5]
]
}
box_augs_dict = {'policies': policies, 'scale_ratios': scale_ratios}
self.box_augs = Box_augs(box_augs_dict=box_augs_dict,
max_iters=self.max_iters,
scale_splits=scale_splits,
box_prob=box_prob)
def __call__(self, tensor, target):
iteration = self.count // self.batch_size * self.num_workers
tensor = copy.deepcopy(tensor)
target = copy.deepcopy(target)
tensor, target = self.box_augs(tensor, target, iteration=iteration)
self.count += 1
return tensor, target
def build_transforms(start_epoch,
total_epochs,
no_aug_epochs,
iters_per_epoch,
num_workers,
batch_size,
num_gpus,
image_max_range=(640, 640),
flip_prob=0.5,
image_mean=[0, 0, 0],
image_std=[1., 1., 1.],
autoaug_dict=None,
keep_ratio=True):
transform = [
T.Resize(image_max_range, keep_ratio=keep_ratio),
T.RandomHorizontalFlip(flip_prob),
T.ToTensor(),
T.Normalize(mean=image_mean, std=image_std),
]
if autoaug_dict is not None:
transform += [
SA_Aug(iters_per_epoch, start_epoch, total_epochs, no_aug_epochs,
batch_size, num_gpus, num_workers, autoaug_dict)
]
transform = T.Compose(transform)
return transform | null |
15,213 | import math
import random
import cv2
import numpy as np
import torch
from damo.structures.bounding_box import BoxList
from damo.utils import adjust_box_anns, get_rank
def xyn2xy(x, scale_w, scale_h, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = scale_w * x[:, 0] + padw # top left x
y[:, 1] = scale_h * x[:, 1] + padh # top left y
return y | null |
15,214 | import math
import random
import cv2
import numpy as np
import torch
from damo.structures.bounding_box import BoxList
from damo.utils import adjust_box_anns, get_rank
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([
np.interp(x, xp, s[:, i]) for i in range(2)
]).reshape(2, -1).T # segment xy
return segments
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint,
# i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(),
y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def box_candidates(box1,
box2,
wh_thr=2,
ar_thr=20,
area_thr=0.1,
eps=1e-16): # box1(4,n), box2(4,n)
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 /
(w1 * h1 + eps) > area_thr) & (
ar < ar_thr) # candidates
def get_transform_matrix(img_shape, new_shape, degrees, scale, shear,
translate):
new_height, new_width = new_shape
# Center
C = np.eye(3)
C[0, 2] = -img_shape[1] / 2 # x translation (pixels)
C[1, 2] = -img_shape[0] / 2 # y translation (pixels)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
s = get_aug_params(scale, center=1.0)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi /
180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi /
180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(
0.5 - translate, 0.5 + translate) * new_width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 +
translate) * new_height # y transla ion (pixels)
# Combined rotation matrix
M = T @ S @ R @ C # order of operations (right to left) is IMPORTANT
return M, s
def random_affine(
img,
targets=(),
segments=None,
target_size=(640, 640),
degrees=10,
translate=0.1,
scales=0.1,
shear=10,
):
M, scale = get_transform_matrix(img.shape[:2], target_size, degrees,
scales, shear, translate)
if (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img,
M[:2],
dsize=target_size,
borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if (n and len(segments)==0) or (len(segments) != len(targets)):
new = np.zeros((n, 4))
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = xy[:, :2].reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate(
(x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, target_size[0])
new[:, [1, 3]] = new[:, [1, 3]].clip(0, target_size[1])
else:
segments = resample_segments(segments) # upsample
new = np.zeros((len(targets), 4))
assert len(segments) <= len(targets)
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, target_size[0], target_size[1])
# filter candidates
i = box_candidates(box1=targets[:, 0:4].T * scale,
box2=new.T,
area_thr=0.1)
targets = targets[i]
targets[:, 0:4] = new[i]
return img, targets | null |
15,215 | import math
import random
import cv2
import numpy as np
import torch
from damo.structures.bounding_box import BoxList
from damo.utils import adjust_box_anns, get_rank
def get_mosaic_coordinate(mosaic_image, mosaic_index, xc, yc, w, h, input_h,
input_w):
# TODO update doc
# index0 to top left part of image
if mosaic_index == 0:
x1, y1, x2, y2 = max(xc - w, 0), max(yc - h, 0), xc, yc
small_coord = w - (x2 - x1), h - (y2 - y1), w, h
# index1 to top right part of image
elif mosaic_index == 1:
x1, y1, x2, y2 = xc, max(yc - h, 0), min(xc + w, input_w * 2), yc
small_coord = 0, h - (y2 - y1), min(w, x2 - x1), h
# index2 to bottom left part of image
elif mosaic_index == 2:
x1, y1, x2, y2 = max(xc - w, 0), yc, xc, min(input_h * 2, yc + h)
small_coord = w - (x2 - x1), 0, w, min(y2 - y1, h)
# index2 to bottom right part of image
elif mosaic_index == 3:
x1, y1, x2, y2 = xc, yc, min(xc + w,
input_w * 2), min(input_h * 2,
yc + h) # noqa
small_coord = 0, 0, min(w, x2 - x1), min(y2 - y1, h)
return (x1, y1, x2, y2), small_coord | null |
15,216 | import os
import tempfile
from collections import OrderedDict
import torch
from loguru import logger
from damo.structures.bounding_box import BoxList
from damo.structures.boxlist_ops import boxlist_iou
def prepare_for_coco_detection(predictions, dataset):
# assert isinstance(dataset, COCODataset)
coco_results = []
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
img_info = dataset.get_img_info(image_id)
image_width = img_info['width']
image_height = img_info['height']
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert('xywh')
boxes = prediction.bbox.tolist()
scores = prediction.get_field('scores').tolist()
labels = prediction.get_field('labels').tolist()
mapped_labels = [
dataset.ori_class2id[dataset.contiguous_id2class[i]] for i in labels
]
coco_results.extend([{
'image_id': original_id,
'category_id': mapped_labels[k],
'bbox': box,
'score': scores[k],
} for k, box in enumerate(boxes)])
return coco_results
def evaluate_box_proposals(predictions,
dataset,
thresholds=None,
area='all',
limit=None):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code.
However, it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
'all': 0,
'small': 1,
'medium': 2,
'large': 3,
'96-128': 4,
'128-256': 5,
'256-512': 6,
'512-inf': 7,
}
area_ranges = [
[0**2, 1e5**2], # all
[0**2, 32**2], # small
[32**2, 96**2], # medium
[96**2, 1e5**2], # large
[96**2, 128**2], # 96-128
[128**2, 256**2], # 128-256
[256**2, 512**2], # 256-512
[512**2, 1e5**2],
] # 512-inf
assert area in areas, 'Unknown area range: {}'.format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for image_id, prediction in enumerate(predictions):
original_id = dataset.id_to_img_map[image_id]
img_info = dataset.get_img_info(image_id)
image_width = img_info['width']
image_height = img_info['height']
prediction = prediction.resize((image_width, image_height))
# prediction = prediction.resize((image_height, image_width))
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = prediction.get_field('objectness').sort(descending=True)[1]
prediction = prediction[inds]
ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
anno = dataset.coco.loadAnns(ann_ids)
gt_boxes = [obj['bbox'] for obj in anno if obj['iscrowd'] == 0]
gt_boxes = torch.as_tensor(gt_boxes).reshape(
-1, 4) # guard against no boxes
gt_boxes = BoxList(gt_boxes, (image_width, image_height),
mode='xywh').convert('xyxy')
gt_areas = torch.as_tensor(
[obj['area'] for obj in anno if obj['iscrowd'] == 0])
if len(gt_boxes) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <=
area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if len(prediction) == 0:
continue
if limit is not None and len(prediction) > limit:
prediction = prediction[:limit]
overlaps = boxlist_iou(prediction, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(prediction), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = torch.cat(gt_overlaps, dim=0)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
'ar': ar,
'recalls': recalls,
'thresholds': thresholds,
'gt_overlaps': gt_overlaps,
'num_pos': num_pos,
}
def evaluate_predictions_on_coco(coco_gt,
coco_results,
json_result_file,
iou_type='bbox'):
import json
with open(json_result_file, 'w') as f:
json.dump(coco_results, f)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
coco_dt = coco_gt.loadRes(
str(json_result_file)) if coco_results else COCO()
# coco_dt = coco_gt.loadRes(coco_results)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
# compute_thresholds_for_classes(coco_eval)
return coco_eval
class COCOResults(object):
METRICS = {
'bbox': ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl'],
'segm': ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl'],
'box_proposal': [
'AR@100',
'ARs@100',
'ARm@100',
'ARl@100',
'AR@1000',
'ARs@1000',
'ARm@1000',
'ARl@1000',
],
'keypoints': ['AP', 'AP50', 'AP75', 'APm', 'APl'],
}
def __init__(self, *iou_types):
allowed_types = ('box_proposal', 'bbox', 'segm', 'keypoints')
assert all(iou_type in allowed_types for iou_type in iou_types)
results = OrderedDict()
for iou_type in iou_types:
results[iou_type] = OrderedDict([
(metric, -1) for metric in COCOResults.METRICS[iou_type]
])
self.results = results
def update(self, coco_eval):
if coco_eval is None:
return
from pycocotools.cocoeval import COCOeval
assert isinstance(coco_eval, COCOeval)
s = coco_eval.stats
iou_type = coco_eval.params.iouType
res = self.results[iou_type]
metrics = COCOResults.METRICS[iou_type]
for idx, metric in enumerate(metrics):
res[metric] = s[idx]
def __repr__(self):
# TODO make it pretty
return repr(self.results)
def check_expected_results(results, expected_results, sigma_tol):
if not expected_results:
return
for task, metric, (mean, std) in expected_results:
actual_val = results.results[task][metric]
lo = mean - sigma_tol * std
hi = mean + sigma_tol * std
ok = (lo < actual_val) and (actual_val < hi)
msg = ('{} > {} sanity check (actual vs. expected): '
'{:.3f} vs. mean={:.4f}, std={:.4}, range=({:.4f}, {:.4f})'
).format(task, metric, actual_val, mean, std, lo, hi)
if not ok:
msg = 'FAIL: ' + msg
logger.error(msg)
else:
msg = 'PASS: ' + msg
logger.info(msg)
def do_coco_evaluation(
dataset,
predictions,
box_only,
output_folder,
iou_types,
expected_results,
expected_results_sigma_tol,
):
if box_only:
logger.info('Evaluating bbox proposals')
areas = {'all': '', 'small': 's', 'medium': 'm', 'large': 'l'}
res = COCOResults('box_proposal')
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = evaluate_box_proposals(predictions,
dataset,
area=area,
limit=limit)
key = 'AR{}@{:d}'.format(suffix, limit)
res.results['box_proposal'][key] = stats['ar'].item()
logger.info(res)
check_expected_results(res, expected_results,
expected_results_sigma_tol)
if output_folder:
torch.save(res, os.path.join(output_folder, 'box_proposals.pth'))
return
logger.info('Preparing results for COCO format')
coco_results = {}
if 'bbox' in iou_types:
logger.info('Preparing bbox results')
coco_results['bbox'] = prepare_for_coco_detection(predictions, dataset)
results = COCOResults(*iou_types)
logger.info('Evaluating predictions')
for iou_type in iou_types:
with tempfile.NamedTemporaryFile() as f:
file_path = f.name
if output_folder:
file_path = os.path.join(output_folder, iou_type + '.json')
res = evaluate_predictions_on_coco(dataset.coco,
coco_results[iou_type],
file_path, iou_type)
results.update(res)
logger.info(results)
check_expected_results(results, expected_results,
expected_results_sigma_tol)
if output_folder:
torch.save(results, os.path.join(output_folder, 'coco_results.pth'))
return results, coco_results | null |
15,217 | import os
import tempfile
from collections import OrderedDict
import torch
from loguru import logger
from damo.structures.bounding_box import BoxList
from damo.structures.boxlist_ops import boxlist_iou
The provided code snippet includes necessary dependencies for implementing the `compute_thresholds_for_classes` function. Write a Python function `def compute_thresholds_for_classes(coco_eval)` to solve the following problem:
The function is used to compute the thresholds corresponding to best f-measure. The resulting thresholds are used in fcos_demo.py.
Here is the function:
def compute_thresholds_for_classes(coco_eval):
'''
The function is used to compute the thresholds corresponding to best
f-measure. The resulting thresholds are used in fcos_demo.py.
'''
import numpy as np
# dimension of precision: [TxRxKxAxM]
precision = coco_eval.eval['precision']
# we compute thresholds with IOU being 0.5
precision = precision[0, :, :, 0, -1]
scores = coco_eval.eval['scores']
scores = scores[0, :, :, 0, -1]
recall = np.linspace(0, 1, num=precision.shape[0])
recall = recall[:, None]
f_measure = (2 * precision * recall) / (np.maximum(precision + recall,
1e-6))
max_f_measure = f_measure.max(axis=0)
max_f_measure_inds = f_measure.argmax(axis=0)
scores = scores[max_f_measure_inds, range(len(max_f_measure_inds))]
print('Maximum f-measures for classes:')
print(list(max_f_measure))
print('Score thresholds for classes (used in demos for visualization):')
print(list(scores)) | The function is used to compute the thresholds corresponding to best f-measure. The resulting thresholds are used in fcos_demo.py. |
15,218 | import bisect
import copy
import math
import torch.utils.data
from damo.utils import get_world_size
from . import datasets as D
from .collate_batch import BatchCollator
from .datasets import MosaicWrapper
from .samplers import DistributedSampler, IterationBasedBatchSampler
from .transforms import build_transforms
def build_dataset(cfg, ann_files, is_train=True, mosaic_mixup=None):
if not isinstance(ann_files, (list, tuple)):
raise RuntimeError(
'datasets should be a list of strings, got {}'.format(ann_files))
datasets = []
for dataset_name in ann_files:
# read data from config first
data = cfg.get_data(dataset_name)
factory = getattr(D, data['factory'])
args = data['args']
args['transforms'] = None
args['class_names'] = cfg.dataset.class_names
# make dataset from factory
dataset = factory(**args)
# mosaic wrapped
if is_train and mosaic_mixup is not None:
dataset = MosaicWrapper(dataset=dataset,
img_size=mosaic_mixup.mosaic_size,
mosaic_prob=mosaic_mixup.mosaic_prob,
mixup_prob=mosaic_mixup.mixup_prob,
transforms=None,
degrees=mosaic_mixup.degrees,
translate=mosaic_mixup.translate,
shear=mosaic_mixup.shear,
mosaic_scale=mosaic_mixup.mosaic_scale,
mixup_scale=mosaic_mixup.mixup_scale,
keep_ratio=mosaic_mixup.keep_ratio)
datasets.append(dataset)
return datasets | null |
15,219 | import bisect
import copy
import math
import torch.utils.data
from damo.utils import get_world_size
from . import datasets as D
from .collate_batch import BatchCollator
from .datasets import MosaicWrapper
from .samplers import DistributedSampler, IterationBasedBatchSampler
from .transforms import build_transforms
def _quantize(x, bins):
bins = copy.copy(bins)
bins = sorted(bins)
quantized = list(map(lambda y: bisect.bisect_right(bins, y), x))
return quantized | null |
15,220 | import bisect
import copy
import math
import torch.utils.data
from damo.utils import get_world_size
from . import datasets as D
from .collate_batch import BatchCollator
from .datasets import MosaicWrapper
from .samplers import DistributedSampler, IterationBasedBatchSampler
from .transforms import build_transforms
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
img_info = dataset.get_img_info(i)
aspect_ratio = float(img_info['height']) / float(img_info['width'])
aspect_ratios.append(aspect_ratio)
return aspect_ratios | null |
15,221 | import bisect
import copy
import math
import torch.utils.data
from damo.utils import get_world_size
from . import datasets as D
from .collate_batch import BatchCollator
from .datasets import MosaicWrapper
from .samplers import DistributedSampler, IterationBasedBatchSampler
from .transforms import build_transforms
def make_data_sampler(dataset, shuffle):
return DistributedSampler(dataset, shuffle=shuffle)
def make_batch_sampler(dataset,
sampler,
images_per_batch,
num_iters=None,
start_iter=0,
mosaic_warpper=False):
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler,
images_per_batch,
drop_last=False)
if num_iters is not None:
batch_sampler = IterationBasedBatchSampler(
batch_sampler, num_iters, start_iter, enable_mosaic=mosaic_warpper)
return batch_sampler
class BatchCollator(object):
"""
From a list of samples from the dataset,
returns the batched images and targets.
This should be passed to the DataLoader
"""
def __init__(self, size_divisible=0):
self.size_divisible = size_divisible
def __call__(self, batch):
transposed_batch = list(zip(*batch))
images = to_image_list(transposed_batch[0], self.size_divisible)
targets = transposed_batch[1]
img_ids = transposed_batch[2]
return images, targets, img_ids
def build_dataloader(datasets,
augment,
batch_size=128,
start_epoch=None,
total_epochs=None,
no_aug_epochs=0,
is_train=True,
num_workers=8,
size_div=32):
num_gpus = get_world_size()
assert (
batch_size % num_gpus == 0
), 'training_imgs_per_batch ({}) must be divisible by the number ' \
'of GPUs ({}) used.'.format(batch_size, num_gpus)
images_per_gpu = batch_size // num_gpus
if is_train:
iters_per_epoch = math.ceil(len(datasets[0]) / batch_size)
shuffle = True
num_iters = total_epochs * iters_per_epoch
start_iter = start_epoch * iters_per_epoch
else:
iters_per_epoch = math.ceil(len(datasets[0]) / batch_size)
shuffle = False
num_iters = None
start_iter = 0
transforms = augment.transform
enable_mosaic_mixup = 'mosaic_mixup' in augment
transforms = build_transforms(start_epoch, total_epochs, no_aug_epochs,
iters_per_epoch, num_workers, batch_size,
num_gpus, **transforms)
for dataset in datasets:
dataset._transforms = transforms
if hasattr(dataset, '_dataset'):
dataset._dataset._transforms = transforms
data_loaders = []
for dataset in datasets:
sampler = make_data_sampler(dataset, shuffle)
batch_sampler = make_batch_sampler(dataset, sampler, images_per_gpu,
num_iters, start_iter,
enable_mosaic_mixup)
collator = BatchCollator(size_div)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=collator,
)
data_loaders.append(data_loader)
if is_train:
assert len(
data_loaders) == 1, 'multi-training set is not supported yet!'
return data_loaders[0]
return data_loaders | null |
15,222 | import ast
import importlib
import os
import pprint
import sys
from abc import ABCMeta
from os.path import dirname, join
from easydict import EasyDict as easydict
from tabulate import tabulate
from .augmentations import test_aug, train_aug
from .paths_catalog import DatasetCatalog
def get_config_by_file(config_file):
try:
sys.path.append(os.path.dirname(config_file))
current_config = importlib.import_module(
os.path.basename(config_file).split('.')[0])
exp = current_config.Config()
except Exception:
raise ImportError(
"{} doesn't contains class named 'Config'".format(config_file))
return exp
The provided code snippet includes necessary dependencies for implementing the `parse_config` function. Write a Python function `def parse_config(config_file)` to solve the following problem:
get config object by file. Args: config_file (str): file path of config.
Here is the function:
def parse_config(config_file):
"""
get config object by file.
Args:
config_file (str): file path of config.
"""
assert (config_file is not None), 'plz provide config file'
if config_file is not None:
return get_config_by_file(config_file) | get config object by file. Args: config_file (str): file path of config. |
15,223 | import torch
import torch.nn as nn
from loguru import logger
from torch.nn.parallel import DistributedDataParallel as DDP
from damo.base_models.backbones import build_backbone
from damo.base_models.heads import build_head
from damo.base_models.necks import build_neck
from damo.structures.image_list import to_image_list
class Detector(nn.Module):
def __init__(self, config):
def init_bn(self, M):
def init_model(self):
def load_pretrain_detector(self, pretrain_model):
def forward(self, x, targets=None, tea=False, stu=False):
def build_local_model(config, device):
model = Detector(config)
model.init_model()
model.to(device)
return model | null |
15,224 | import torch
import torch.nn as nn
from loguru import logger
from torch.nn.parallel import DistributedDataParallel as DDP
from damo.base_models.backbones import build_backbone
from damo.base_models.heads import build_head
from damo.base_models.necks import build_neck
from damo.structures.image_list import to_image_list
def build_ddp_model(model, local_rank):
model = DDP(model,
device_ids=[local_rank],
output_device=local_rank,
broadcast_buffers=False,
find_unused_parameters=True)
return model | null |
15,225 | import cv2
import numpy as np
def debug_input_vis(imgs, targets, ids, train_loader):
std = np.array([1.0, 1.0, 1.0]).reshape(3, 1, 1)
mean = np.array([0.0, 0.0, 0.0]).reshape(3, 1, 1)
n, c, h, w = imgs.shape
for i in range(n):
img = imgs[i, :, :, :].cpu()
bboxs = targets[i].bbox.cpu().numpy()
cls = targets[i].get_field('labels').cpu().numpy()
if True:
# if self.config.training_mosaic:
img_id = train_loader.dataset._dataset.id_to_img_map[ids[i]]
else:
img_id = train_loader.dataset.id_to_img_map[ids[i]]
img = np.clip(
(img.numpy() * std + mean).transpose(1, 2,
0).copy().astype(np.uint8), 0,
255)
for bbox, obj_cls in zip(bboxs, cls):
x1, y1, x2, y2 = map(int, bbox)
cv2.rectangle(img,
pt1=(x1, y1),
pt2=(x2, y2),
color=(0, 0, 255),
thickness=2)
cv2.putText(img, f'{obj_cls}', (x1, y1), cv2.FONT_HERSHEY_SIMPLEX,
1.0, (0, 0, 255))
cv2.imwrite(f'visimgs/vis_{img_id}.jpg', img) | null |
15,226 | import cv2
import numpy as np
_COLORS = np.array([
0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,
0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078,
0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,
1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000,
0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667,
0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,
0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000,
1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000,
0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,
0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667,
0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333,
0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,
0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333,
0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000,
1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,
1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.333,
0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000,
0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000,
0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000,
1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000,
0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000,
0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286, 0.286, 0.286, 0.429,
0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714, 0.714, 0.857, 0.857,
0.857, 0.000, 0.447, 0.741, 0.314, 0.717, 0.741, 0.50, 0.5, 0
]).astype(np.float32).reshape(-1, 3)
def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None):
for i in range(len(boxes)):
box = boxes[i]
cls_id = int(cls_ids[i])
score = scores[i]
if score < conf:
continue
x0 = int(box[0])
y0 = int(box[1])
x1 = int(box[2])
y1 = int(box[3])
color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist()
text = '{}:{:.1f}%'.format(class_names[cls_id], score * 100)
txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255,
255)
font = cv2.FONT_HERSHEY_SIMPLEX
txt_size = cv2.getTextSize(text, font, 0.4, 1)[0]
cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist()
cv2.rectangle(img, (x0, y0 + 1),
(x0 + txt_size[0] + 1, y0 + int(1.5 * txt_size[1])),
txt_bk_color, -1)
cv2.putText(img,
text, (x0, y0 + txt_size[1]),
font,
0.4,
txt_color,
thickness=1)
return img | null |
15,227 | import os
import shutil
import torch
from loguru import logger
def load_ckpt(model, ckpt):
model_state_dict = model.state_dict()
load_dict = {}
for key_model, v in model_state_dict.items():
if key_model not in ckpt:
logger.warning('{} is not in the ckpt. \
Please double check and see if this is desired.'.format(
key_model))
continue
v_ckpt = ckpt[key_model]
if v.shape != v_ckpt.shape:
logger.warning('Shape of {} in checkpoint is {}, \
while shape of {} in model is {}.'.format(
key_model, v_ckpt.shape, key_model, v.shape))
continue
load_dict[key_model] = v_ckpt
model.load_state_dict(load_dict, strict=False)
return model | null |
15,228 | import os
import shutil
import torch
from loguru import logger
def save_checkpoint(state, is_best, save_dir, model_name=''):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
filename = os.path.join(save_dir, model_name + '_ckpt.pth')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save_dir, 'best_ckpt.pth')
shutil.copyfile(filename, best_filename) | null |
15,229 | import os
import numpy as np
from damo.dataset.transforms import transforms as T
from damo.structures.image_list import to_image_list
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True) | null |
15,230 | import os
import numpy as np
from damo.dataset.transforms import transforms as T
from damo.structures.image_list import to_image_list
def nms(boxes, scores, nms_thr):
"""Single class NMS implemented in Numpy."""
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= nms_thr)[0]
order = order[inds + 1]
return keep
The provided code snippet includes necessary dependencies for implementing the `multiclass_nms` function. Write a Python function `def multiclass_nms(boxes, scores, nms_thr, score_thr)` to solve the following problem:
Multiclass NMS implemented in Numpy
Here is the function:
def multiclass_nms(boxes, scores, nms_thr, score_thr):
"""Multiclass NMS implemented in Numpy"""
final_dets = []
num_classes = scores.shape[1]
for cls_ind in range(num_classes):
cls_scores = scores[:, cls_ind]
valid_score_mask = cls_scores > score_thr
if valid_score_mask.sum() == 0:
continue
else:
valid_scores = cls_scores[valid_score_mask]
valid_boxes = boxes[valid_score_mask]
keep = nms(valid_boxes, valid_scores, nms_thr)
if len(keep) > 0:
cls_inds = np.ones((len(keep), 1)) * cls_ind
dets = np.concatenate(
[valid_boxes[keep], valid_scores[keep, None], cls_inds], 1)
final_dets.append(dets)
if len(final_dets) == 0:
return None
return np.concatenate(final_dets, 0) | Multiclass NMS implemented in Numpy |
15,231 | import os
import numpy as np
from damo.dataset.transforms import transforms as T
from damo.structures.image_list import to_image_list
def demo_postprocess(outputs, img_size, p6=False):
grids = []
expanded_strides = []
if not p6:
strides = [8, 16, 32]
else:
strides = [8, 16, 32, 64]
hsizes = [img_size[0] // stride for stride in strides]
wsizes = [img_size[1] // stride for stride in strides]
for hsize, wsize, stride in zip(hsizes, wsizes, strides):
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
expanded_strides.append(np.full((*shape, 1), stride))
grids = np.concatenate(grids, 1)
expanded_strides = np.concatenate(expanded_strides, 1)
outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
return outputs | null |
15,232 | import os
import numpy as np
from damo.dataset.transforms import transforms as T
from damo.structures.image_list import to_image_list
def to_image_list(tensors, size_divisible=0, max_size=None):
"""
tensors can be an ImageList, a torch.Tensor or
an iterable of Tensors. It can't be a numpy array.
When tensors is an iterable of Tensors, it pads
the Tensors with zeros so that they have the same
shape
"""
if isinstance(tensors, torch.Tensor) and size_divisible > 0:
tensors = [tensors]
if isinstance(tensors, ImageList):
return tensors
elif isinstance(tensors, torch.Tensor):
# single tensor shape can be inferred
if tensors.dim() == 3:
tensors = tensors[None]
assert tensors.dim() == 4
image_sizes = [tensor.shape[-2:] for tensor in tensors]
return ImageList(tensors, image_sizes, image_sizes)
elif isinstance(tensors, (tuple, list)):
if max_size is None:
max_size = tuple(
max(s) for s in zip(*[img.shape for img in tensors]))
# TODO Ideally, just remove this and let me model handle arbitrary
# input sizs
if size_divisible > 0:
import math
stride = size_divisible
max_size = list(max_size)
max_size[1] = int(math.ceil(max_size[1] / stride) * stride)
max_size[2] = int(math.ceil(max_size[2] / stride) * stride)
max_size = tuple(max_size)
batch_shape = (len(tensors), ) + max_size
batched_imgs = tensors[0].new(*batch_shape).zero_() # + 114
for img, pad_img in zip(tensors, batched_imgs):
pad_img[:img.shape[0], :img.shape[1], :img.shape[2]].copy_(img)
image_sizes = [im.shape[-2:] for im in tensors]
pad_sizes = [batched_imgs.shape[-2:] for im in batched_imgs]
return ImageList(batched_imgs, image_sizes, pad_sizes)
else:
raise TypeError('Unsupported type for to_image_list: {}'.format(
type(tensors)))
def transform_img(origin_img, size_divisibility, image_max_range, flip_prob,
image_mean, image_std, keep_ratio, infer_size=None):
transform = [
T.Resize(image_max_range, target_size=infer_size, keep_ratio=keep_ratio),
T.RandomHorizontalFlip(flip_prob),
T.ToTensor(),
T.Normalize(mean=image_mean, std=image_std),
]
transform = T.Compose(transform)
img, _ = transform(origin_img)
img = to_image_list(img, size_divisibility)
return img | null |
15,233 | import functools
import os
import pickle
import time
from contextlib import contextmanager
import numpy as np
import torch
from loguru import logger
from torch import distributed as dist
def get_num_devices():
gpu_list = os.getenv('CUDA_VISIBLE_DEVICES', None)
if gpu_list is not None:
return len(gpu_list.split(','))
else:
devices_list_info = os.popen('nvidia-smi -L')
devices_list_info = devices_list_info.read().strip().split('\n')
return len(devices_list_info) | null |
15,234 | import functools
import os
import pickle
import time
from contextlib import contextmanager
import numpy as np
import torch
from loguru import logger
from torch import distributed as dist
The provided code snippet includes necessary dependencies for implementing the `wait_for_the_master` function. Write a Python function `def wait_for_the_master(local_rank: int)` to solve the following problem:
Make all processes waiting for the master to do some task.
Here is the function:
def wait_for_the_master(local_rank: int):
"""
Make all processes waiting for the master to do some task.
"""
if local_rank > 0:
dist.barrier()
yield
if local_rank == 0:
if not dist.is_available():
return
if not dist.is_initialized():
return
else:
dist.barrier() | Make all processes waiting for the master to do some task. |
15,235 | import functools
import os
import pickle
import time
from contextlib import contextmanager
import numpy as np
import torch
from loguru import logger
from torch import distributed as dist
_LOCAL_PROCESS_GROUP = None
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
The provided code snippet includes necessary dependencies for implementing the `get_local_rank` function. Write a Python function `def get_local_rank() -> int` to solve the following problem:
Returns: The rank of the current process within the local (per-machine) process group.
Here is the function:
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the
local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP) | Returns: The rank of the current process within the local (per-machine) process group. |
15,236 | import functools
import os
import pickle
import time
from contextlib import contextmanager
import numpy as np
import torch
from loguru import logger
from torch import distributed as dist
_LOCAL_PROCESS_GROUP = None
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
The provided code snippet includes necessary dependencies for implementing the `get_local_size` function. Write a Python function `def get_local_size() -> int` to solve the following problem:
Returns: The size of the per-machine process group, i.e. the number of processes per machine.
Here is the function:
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) | Returns: The size of the per-machine process group, i.e. the number of processes per machine. |
15,237 | import functools
import os
import pickle
import time
from contextlib import contextmanager
import numpy as np
import torch
from loguru import logger
from torch import distributed as dist
def get_rank() -> int:
def is_main_process() -> bool:
return get_rank() == 0 | null |
15,238 | import functools
import os
import pickle
import time
from contextlib import contextmanager
import numpy as np
import torch
from loguru import logger
from torch import distributed as dist
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == 'nccl':
return dist.new_group(backend='gloo')
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ['gloo', 'nccl']
device = torch.device('cpu' if backend == 'gloo' else 'cuda')
buffer = pickle.dumps(data)
if len(buffer) > 1024**3:
logger.warning(
'Rank {} trying to all-gather {:.2f} GB of data on device {}'.
format(get_rank(),
len(buffer) / (1024**3), device))
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), 'comm.gather/all_gather must be called from ranks within the group!'
local_size = torch.tensor([tensor.numel()],
dtype=torch.int64,
device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device)
for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros((max_size - local_size, ),
dtype=torch.uint8,
device=tensor.device)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
The provided code snippet includes necessary dependencies for implementing the `gather` function. Write a Python function `def gather(data, dst=0, group=None)` to solve the following problem:
Run gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object dst (int): destination rank group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: on dst, a list of data gathered from each rank. Otherwise, an empty list.
Here is the function:
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size, ), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return [] | Run gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object dst (int): destination rank group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: on dst, a list of data gathered from each rank. Otherwise, an empty list. |
15,239 | import functools
import os
import pickle
import time
from contextlib import contextmanager
import numpy as np
import torch
from loguru import logger
from torch import distributed as dist
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size, ), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
The provided code snippet includes necessary dependencies for implementing the `shared_random_seed` function. Write a Python function `def shared_random_seed()` to solve the following problem:
Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock.
Here is the function:
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2**31)
all_ints = all_gather(ints)
return all_ints[0] | Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock. |
15,240 | import functools
import os
import pickle
import time
from contextlib import contextmanager
import numpy as np
import torch
from loguru import logger
from torch import distributed as dist
def synchronize():
"""
Helper function to synchronize (barrier)
among all processes when using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
The provided code snippet includes necessary dependencies for implementing the `time_synchronized` function. Write a Python function `def time_synchronized()` to solve the following problem:
pytorch-accurate time
Here is the function:
def time_synchronized():
"""pytorch-accurate time"""
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time() | pytorch-accurate time |
15,241 | import time
from copy import deepcopy
import torch
import torch.nn as nn
from thop import profile
def make_divisible(v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v | null |
15,242 | import time
from copy import deepcopy
import torch
import torch.nn as nn
from thop import profile
def get_latency(model, inp, iters=500, warmup=2):
start = time.time()
for i in range(iters):
out = model(inp)
if torch.cuda.is_available():
torch.cuda.synchronize()
if i <= warmup:
start = time.time()
latency = (time.time() - start) / (iters - warmup)
return out, latency
def get_model_info(model, tsize):
stride = 640
model = model.eval()
backbone = model.backbone
neck = model.neck
head = model.head
h, w = tsize
img = torch.randn((1, 3, stride, stride),
device=next(model.parameters()).device)
bf, bp = profile(deepcopy(backbone), inputs=(img, ), verbose=False)
bo, bl = get_latency(backbone, img, iters=10)
nf, np = profile(deepcopy(neck), inputs=(bo, ), verbose=False)
no, nl = get_latency(neck, bo, iters=10)
hf, hp = profile(deepcopy(head), inputs=(no, ), verbose=False)
ho, hl = get_latency(head, no, iters=10)
_, total_latency = get_latency(model, img)
total_flops = 0
total_params = 0
info = ''
for name, flops, params, latency in zip(('backbone', 'neck', 'head'),
(bf, nf, hf), (bp, np, hp),
(bl, nl, hl)):
params /= 1e6
flops /= 1e9
flops *= tsize[0] * tsize[1] / stride / stride * 2 # Gflops
total_flops += flops
total_params += params
info += f"{name}'s params(M): {params:.2f}, " + \
f'flops(G): {flops:.2f}, latency(ms): {latency*1000:.3f}\n'
info += f'total latency(ms): {total_latency*1000:.3f}, ' + \
f'total flops(G): {total_flops:.2f}, ' + f'total params(M): {total_params:.2f}\n'
return info | null |
15,243 | import time
from copy import deepcopy
import torch
import torch.nn as nn
from thop import profile
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = (nn.Conv2d(
conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True,
).requires_grad_(False).to(conv.weight.device))
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# prepare spatial bias
b_conv = (torch.zeros(conv.weight.size(0), device=conv.weight.device)
if conv.bias is None else conv.bias)
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(
torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(
torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
class ConvBNAct(nn.Module):
"""A Conv2d -> Batchnorm -> silu/leaky relu block"""
def __init__(
self,
in_channels,
out_channels,
ksize,
stride=1,
groups=1,
bias=False,
act='silu',
norm='bn',
reparam=False,
):
super().__init__()
# same padding
pad = (ksize - 1) // 2
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=ksize,
stride=stride,
padding=pad,
groups=groups,
bias=bias,
)
if norm is not None:
self.bn = get_norm(norm, out_channels)
if act is not None:
self.act = get_activation(act, inplace=True)
self.with_norm = norm is not None
self.with_act = act is not None
def forward(self, x):
x = self.conv(x)
if self.with_norm:
x = self.bn(x)
if self.with_act:
x = self.act(x)
return x
def fuseforward(self, x):
return self.act(self.conv(x))
class ConvKXBN(nn.Module):
def __init__(self, in_c, out_c, kernel_size, stride):
super(ConvKXBN, self).__init__()
self.conv1 = nn.Conv2d(in_c,
out_c,
kernel_size,
stride, (kernel_size - 1) // 2,
groups=1,
bias=False)
self.bn1 = nn.BatchNorm2d(out_c)
def forward(self, x):
return self.bn1(self.conv1(x))
def fuseforward(self, x):
return self.conv1(x)
def fuse_model(model):
from damo.base_models.core.ops import ConvBNAct
from damo.base_models.backbones.tinynas_res import ConvKXBN
for m in model.modules():
if type(m) is ConvBNAct and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
elif type(m) is ConvKXBN and hasattr(m, 'bn1'):
m.conv1 = fuse_conv_and_bn(m.conv1, m.bn1) # update conv
delattr(m, 'bn1') # remove batchnorm
m.forward = m.fuseforward # update forward
return model | null |
15,244 | import time
from copy import deepcopy
import torch
import torch.nn as nn
from thop import profile
The provided code snippet includes necessary dependencies for implementing the `replace_module` function. Write a Python function `def replace_module(module, replaced_module_type, new_module_type, replace_func=None)` to solve the following problem:
Replace given type in module to a new type. mostly used in deploy. Args: module (nn.Module): model to apply replace operation. replaced_module_type (Type): module type to be replaced. new_module_type (Type) replace_func (function): python function to describe replace logic. Defalut value None. Returns: model (nn.Module): module that already been replaced.
Here is the function:
def replace_module(module,
replaced_module_type,
new_module_type,
replace_func=None):
"""
Replace given type in module to a new type. mostly used in deploy.
Args:
module (nn.Module): model to apply replace operation.
replaced_module_type (Type): module type to be replaced.
new_module_type (Type)
replace_func (function): python function to describe replace logic.
Defalut value None.
Returns:
model (nn.Module): module that already been replaced.
"""
def default_replace_func(replaced_module_type, new_module_type):
return new_module_type()
if replace_func is None:
replace_func = default_replace_func
model = module
if isinstance(module, replaced_module_type):
model = replace_func(replaced_module_type, new_module_type)
else: # recurrsively replace
for name, child in module.named_children():
new_child = replace_module(child, replaced_module_type,
new_module_type)
if new_child is not child: # child is already replaced
model.add_module(name, new_child)
return model | Replace given type in module to a new type. mostly used in deploy. Args: module (nn.Module): model to apply replace operation. replaced_module_type (Type): module type to be replaced. new_module_type (Type) replace_func (function): python function to describe replace logic. Defalut value None. Returns: model (nn.Module): module that already been replaced. |
15,245 | import functools
import os
from collections import defaultdict, deque
import numpy as np
import torch
def get_total_and_free_memory_in_Mb(cuda_device):
devices_info_str = os.popen(
'nvidia-smi --query-gpu=memory.total,memory.used \
--format=csv,nounits,noheader')
devices_info = devices_info_str.read().strip().split('\n')
total, used = devices_info[int(cuda_device)].split(',')
return int(total), int(used) | null |
15,246 | import functools
import os
from collections import defaultdict, deque
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `gpu_mem_usage` function. Write a Python function `def gpu_mem_usage()` to solve the following problem:
Compute the GPU memory usage for the current device (MB).
Here is the function:
def gpu_mem_usage():
"""
Compute the GPU memory usage for the current device (MB).
"""
mem_usage_bytes = torch.cuda.max_memory_allocated()
return mem_usage_bytes / (1024 * 1024) | Compute the GPU memory usage for the current device (MB). |
15,247 | import torch
import sys
if sys.version_info[0] == 3 and sys.version_info[1] >= 7:
import importlib
import importlib.util
import sys
else:
import imp
def import_file(module_name, file_path, make_importable=False):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if make_importable:
sys.modules[module_name] = module
return module | null |
15,248 | import torch
import sys
def import_file(module_name, file_path, make_importable=None):
module = imp.load_source(module_name, file_path)
return module | null |
15,249 | import inspect
import os
import sys
import datetime
from loguru import logger
The provided code snippet includes necessary dependencies for implementing the `get_caller_name` function. Write a Python function `def get_caller_name(depth=0)` to solve the following problem:
Args: depth (int): Depth of caller conext, use 0 for caller depth. Default value: 0. Returns: str: module name of the caller
Here is the function:
def get_caller_name(depth=0):
"""
Args:
depth (int): Depth of caller conext, use 0 for caller depth.
Default value: 0.
Returns:
str: module name of the caller
"""
# the following logic is a little bit faster than inspect.stack() logic
frame = inspect.currentframe().f_back
for _ in range(depth):
frame = frame.f_back
return frame.f_globals['__name__'] | Args: depth (int): Depth of caller conext, use 0 for caller depth. Default value: 0. Returns: str: module name of the caller |
15,250 | import inspect
import os
import sys
import datetime
from loguru import logger
def redirect_sys_output(log_level='INFO'):
redirect_logger = StreamToLoguru(log_level)
sys.stderr = redirect_logger
sys.stdout = redirect_logger
The provided code snippet includes necessary dependencies for implementing the `setup_logger` function. Write a Python function `def setup_logger(save_dir, distributed_rank=0, mode='a')` to solve the following problem:
setup logger for training and testing. Args: save_dir(str): location to save log file distributed_rank(int): device rank when multi-gpu environment mode(str): log file write mode, `append` or `override`. default is `a`. Return: logger instance.
Here is the function:
def setup_logger(save_dir, distributed_rank=0, mode='a'):
"""setup logger for training and testing.
Args:
save_dir(str): location to save log file
distributed_rank(int): device rank when multi-gpu environment
mode(str): log file write mode, `append` or `override`. default is `a`.
Return:
logger instance.
"""
loguru_format = (
'<green>{time:YYYY-MM-DD HH:mm:ss}</green> | '
'<level>{level: <8}</level> | '
'<cyan>{name}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>')
logger.remove()
# only keep logger in rank0 process
if distributed_rank == 0:
filename = datetime.datetime.now().strftime('%Y-%m-%d-%H%M')
save_file = os.path.join(save_dir, filename)
logger.add(
sys.stderr,
format=loguru_format,
level='INFO',
enqueue=True,
)
logger.add(save_file)
# redirect stdout/stderr to loguru
redirect_sys_output('INFO') | setup logger for training and testing. Args: save_dir(str): location to save log file distributed_rank(int): device rank when multi-gpu environment mode(str): log file write mode, `append` or `override`. default is `a`. Return: logger instance. |
15,251 | import numpy as np
import torch
import torchvision
from damo.structures.bounding_box import BoxList
The provided code snippet includes necessary dependencies for implementing the `filter_box` function. Write a Python function `def filter_box(output, scale_range)` to solve the following problem:
output: (N, 5+class) shape
Here is the function:
def filter_box(output, scale_range):
"""
output: (N, 5+class) shape
"""
min_scale, max_scale = scale_range
w = output[:, 2] - output[:, 0]
h = output[:, 3] - output[:, 1]
keep = (w * h > min_scale * min_scale) & (w * h < max_scale * max_scale)
return output[keep] | output: (N, 5+class) shape |
15,252 | import numpy as np
import torch
import torchvision
from damo.structures.bounding_box import BoxList
def multiclass_nms(multi_bboxes,
multi_scores,
score_thr,
iou_thr,
max_num=100,
score_factors=None):
"""NMS for multi-class bboxes.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class), where the last column
contains scores of the background class, but this will be ignored.
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_thr (float): NMS IoU threshold
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept.
score_factors (Tensor): The factors multiplied to scores before
applying NMS
Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \
are 0-based.
"""
num_classes = multi_scores.size(1)
# exclude background category
if multi_bboxes.shape[1] > 4:
bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)
else:
bboxes = multi_bboxes[:, None].expand(multi_scores.size(0),
num_classes, 4)
scores = multi_scores
# filter out boxes with low scores
valid_mask = scores > score_thr # 1000 * 80 bool
# We use masked_select for ONNX exporting purpose,
# which is equivalent to bboxes = bboxes[valid_mask]
# (TODO): as ONNX does not support repeat now,
# we have to use this ugly code
# bboxes -> 1000, 4
bboxes = torch.masked_select(
bboxes,
torch.stack((valid_mask, valid_mask, valid_mask, valid_mask),
-1)).view(-1, 4) # mask-> 1000*80*4, 80000*4
if score_factors is not None:
scores = scores * score_factors[:, None]
scores = torch.masked_select(scores, valid_mask)
labels = valid_mask.nonzero(as_tuple=False)[:, 1]
if bboxes.numel() == 0:
bboxes = multi_bboxes.new_zeros((0, 5))
labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)
scores = multi_bboxes.new_zeros((0, ))
return bboxes, scores, labels
keep = torchvision.ops.batched_nms(bboxes, scores, labels, iou_thr)
if max_num > 0:
keep = keep[:max_num]
return bboxes[keep], scores[keep], labels[keep]
class BoxList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order to uniquely determine the bounding boxes with respect
to an image, we also store the corresponding image dimensions.
They can contain extra information that is specific to each bounding box,
such as labels.
"""
def __init__(self, bbox, image_size, mode='xyxy'):
device = bbox.device if isinstance(
bbox, torch.Tensor) else torch.device('cpu')
bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
if bbox.ndimension() != 2:
raise ValueError('bbox should have 2 dimensions, got {}'.format(
bbox.ndimension()))
if bbox.size(-1) != 4:
raise ValueError('last dimension of bbox should have a '
'size of 4, got {}'.format(bbox.size(-1)))
if mode not in ('xyxy', 'xywh'):
raise ValueError("mode should be 'xyxy' or 'xywh'")
self.bbox = bbox
self.size = image_size # (image_width, image_height)
self.mode = mode
self.extra_fields = {}
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def has_field(self, field):
return field in self.extra_fields
def fields(self):
return list(self.extra_fields.keys())
def _copy_extra_fields(self, bbox):
for k, v in bbox.extra_fields.items():
self.extra_fields[k] = v
def convert(self, mode):
if mode not in ('xyxy', 'xywh'):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
# we only have two modes, so don't need to check
# self.mode
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if mode == 'xyxy':
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 0
bbox = torch.cat(
(xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE),
dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
def _split_into_xyxy(self):
if self.mode == 'xyxy':
xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
return xmin, ymin, xmax, ymax
elif self.mode == 'xywh':
TO_REMOVE = 0
xmin, ymin, w, h = self.bbox.split(1, dim=-1)
return (
xmin,
ymin,
xmin + (w - TO_REMOVE).clamp(min=0),
ymin + (h - TO_REMOVE).clamp(min=0),
)
else:
raise RuntimeError('Should not be here')
def resize(self, size, *args, **kwargs):
"""
Returns a resized copy of this bounding box
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
ratios = tuple(
float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_box = self.bbox * ratio
bbox = BoxList(scaled_box, size, mode=self.mode)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox
ratio_width, ratio_height = ratios
xmin, ymin, xmax, ymax = self._split_into_xyxy()
scaled_xmin = xmin * ratio_width
scaled_xmax = xmax * ratio_width
scaled_ymin = ymin * ratio_height
scaled_ymax = ymax * ratio_height
scaled_box = torch.cat(
(scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1)
bbox = BoxList(scaled_box, size, mode='xyxy')
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
'Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented')
image_width, image_height = self.size
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 0
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat((transposed_xmin, transposed_ymin,
transposed_xmax, transposed_ymax),
dim=-1)
bbox = BoxList(transposed_boxes, self.size, mode='xyxy')
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def crop(self, box):
"""
Cropss a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
xmin, ymin, xmax, ymax = self._split_into_xyxy()
w, h = box[2] - box[0], box[3] - box[1]
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
cropped_box = torch.cat(
(cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1)
bbox = BoxList(cropped_box, (w, h), mode='xyxy')
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.crop(box)
bbox.add_field(k, v)
return bbox.convert(self.mode)
# Tensor-like methods
def to(self, device):
bbox = BoxList(self.bbox.to(device), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, 'to'):
v = v.to(device)
bbox.add_field(k, v)
return bbox
def __getitem__(self, item):
bbox = BoxList(self.bbox[item], self.size, self.mode)
for k, v in self.extra_fields.items():
bbox.add_field(k, v[item])
return bbox
def __len__(self):
return self.bbox.shape[0]
def clip_to_image(self, remove_empty=True):
TO_REMOVE = 0
self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
if remove_empty:
box = self.bbox
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return self[keep]
return self
def area(self):
box = self.bbox
if self.mode == 'xyxy':
TO_REMOVE = 0
area = (box[:, 2] - box[:, 0] +
TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)
elif self.mode == 'xywh':
area = box[:, 2] * box[:, 3]
else:
raise RuntimeError('Should not be here')
return area
def copy_with_fields(self, fields, skip_missing=False):
bbox = BoxList(self.bbox, self.size, self.mode)
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
if self.has_field(field):
bbox.add_field(field, self.get_field(field))
elif not skip_missing:
raise KeyError("Field '{}' not found in {}".format(
field, self))
return bbox
def __repr__(self):
s = self.__class__.__name__ + '('
s += 'num_boxes={}, '.format(len(self))
s += 'image_width={}, '.format(self.size[0])
s += 'image_height={}, '.format(self.size[1])
s += 'mode={})'.format(self.mode)
return s
def postprocess(cls_scores,
bbox_preds,
num_classes,
conf_thre=0.7,
nms_thre=0.45,
imgs=None):
batch_size = bbox_preds.size(0)
output = [None for _ in range(batch_size)]
for i in range(batch_size):
# If none are remaining => process next image
if not bbox_preds[i].size(0):
continue
detections, scores, labels = multiclass_nms(bbox_preds[i],
cls_scores[i], conf_thre,
nms_thre, 500)
detections = torch.cat((detections, torch.ones_like(
scores[:, None]), scores[:, None], labels[:, None]),
dim=1)
if output[i] is None:
output[i] = detections
else:
output[i] = torch.cat((output[i], detections))
# transfer to BoxList
for i in range(len(output)):
res = output[i]
if res is None or imgs is None:
boxlist = BoxList(torch.zeros(0, 4), (0, 0), mode='xyxy')
boxlist.add_field('objectness', 0)
boxlist.add_field('scores', 0)
boxlist.add_field('labels', -1)
else:
img_h, img_w = imgs.image_sizes[i]
boxlist = BoxList(res[:, :4], (img_w, img_h), mode='xyxy')
boxlist.add_field('objectness', res[:, 4])
boxlist.add_field('scores', res[:, 5])
boxlist.add_field('labels', res[:, 6])
output[i] = boxlist
return output | null |
15,253 | import numpy as np
import torch
import torchvision
from damo.structures.bounding_box import BoxList
def bboxes_iou(bboxes_a, bboxes_b, xyxy=True):
if bboxes_a.shape[1] != 4 or bboxes_b.shape[1] != 4:
raise IndexError
if xyxy:
tl = torch.max(bboxes_a[:, None, :2], bboxes_b[:, :2])
br = torch.min(bboxes_a[:, None, 2:], bboxes_b[:, 2:])
area_a = torch.prod(bboxes_a[:, 2:] - bboxes_a[:, :2], 1)
area_b = torch.prod(bboxes_b[:, 2:] - bboxes_b[:, :2], 1)
else:
tl = torch.max(
(bboxes_a[:, None, :2] - bboxes_a[:, None, 2:] / 2),
(bboxes_b[:, :2] - bboxes_b[:, 2:] / 2),
)
br = torch.min(
(bboxes_a[:, None, :2] + bboxes_a[:, None, 2:] / 2),
(bboxes_b[:, :2] + bboxes_b[:, 2:] / 2),
)
area_a = torch.prod(bboxes_a[:, 2:], 1)
area_b = torch.prod(bboxes_b[:, 2:], 1)
en = (tl < br).type(tl.type()).prod(dim=2)
area_i = torch.prod(br - tl, 2) * en # * ((tl < br).all())
return area_i / (area_a[:, None] + area_b - area_i) | null |
15,254 | import numpy as np
import torch
import torchvision
from damo.structures.bounding_box import BoxList
The provided code snippet includes necessary dependencies for implementing the `matrix_iou` function. Write a Python function `def matrix_iou(a, b)` to solve the following problem:
return iou of a and b, numpy version for data augenmentation
Here is the function:
def matrix_iou(a, b):
"""
return iou of a and b, numpy version for data augenmentation
"""
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
return area_i / (area_a[:, np.newaxis] + area_b - area_i + 1e-12) | return iou of a and b, numpy version for data augenmentation |
15,255 | import numpy as np
import torch
import torchvision
from damo.structures.bounding_box import BoxList
def adjust_box_anns(bbox, scale_ratio, padw, padh, w_max, h_max):
bbox[:, 0::2] = np.clip(bbox[:, 0::2] * scale_ratio + padw, 0, w_max)
bbox[:, 1::2] = np.clip(bbox[:, 1::2] * scale_ratio + padh, 0, h_max)
return bbox | null |
15,256 | import numpy as np
import torch
import torchvision
from damo.structures.bounding_box import BoxList
def xyxy2xywh(bboxes):
bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 0]
bboxes[:, 3] = bboxes[:, 3] - bboxes[:, 1]
return bboxes | null |
15,257 | import numpy as np
import torch
import torchvision
from damo.structures.bounding_box import BoxList
def xyxy2cxcywh(bboxes):
bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 0]
bboxes[:, 3] = bboxes[:, 3] - bboxes[:, 1]
bboxes[:, 0] = bboxes[:, 0] + bboxes[:, 2] * 0.5
bboxes[:, 1] = bboxes[:, 1] + bboxes[:, 3] * 0.5
return bboxes | null |
15,258 | import argparse
import copy
import torch
from loguru import logger
from damo.apis import Trainer
from damo.config.base import parse_config
from damo.utils import synchronize
The provided code snippet includes necessary dependencies for implementing the `make_parser` function. Write a Python function `def make_parser()` to solve the following problem:
Create a parser with some common arguments used by users. Returns: argparse.ArgumentParser
Here is the function:
def make_parser():
"""
Create a parser with some common arguments used by users.
Returns:
argparse.ArgumentParser
"""
parser = argparse.ArgumentParser('Damo-Yolo train parser')
parser.add_argument(
'-f',
'--config_file',
default=None,
type=str,
help='plz input your config file',
)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--tea_config', type=str, default=None)
parser.add_argument('--tea_ckpt', type=str, default=None)
parser.add_argument(
'opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER,
)
return parser | Create a parser with some common arguments used by users. Returns: argparse.ArgumentParser |
15,259 | import os
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import cv2
import glob
import ctypes
import logging
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleup=False, stride=32, return_int=False):
# Resize and pad image while meeting stride-multiple constraints
shape = im.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
if not return_int:
return im, r, (dw, dh)
else:
return im, r, (left, top)
The provided code snippet includes necessary dependencies for implementing the `precess_image` function. Write a Python function `def precess_image(img_src, img_size, stride)` to solve the following problem:
Process image before image inference.
Here is the function:
def precess_image(img_src, img_size, stride):
'''Process image before image inference.'''
image = letterbox(img_src, img_size, auto=False, return_int=True)[0]
# Convert
image = image.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
image = np.ascontiguousarray(image).astype(np.float32)
return image | Process image before image inference. |
15,260 | import argparse
import os
import torch
from loguru import logger
import tensorrt as trt
from damo.apis.detector_inference_trt import inference
from damo.config.base import parse_config
from damo.dataset import build_dataloader, build_dataset
from damo.utils import setup_logger, synchronize
def make_parser():
parser = argparse.ArgumentParser('damo trt engine eval')
parser.add_argument(
'-f',
'--config_file',
default=None,
type=str,
help='pls input your config file',
)
parser.add_argument('-t',
'--trt',
default=None,
type=str,
help='trt for eval')
parser.add_argument('--conf', default=None, type=float, help='test conf')
parser.add_argument('--nms',
default=None,
type=float,
help='test nms threshold')
parser.add_argument('--batch_size',
type=int,
default=None,
help='inference image batch nums')
parser.add_argument('--img_size',
type=int,
default='640',
help='inference image shape')
parser.add_argument(
'--end2end',
action='store_true',
help='trt inference with nms',
)
parser.add_argument(
'opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER,
)
return parser | null |
15,261 | import argparse
import os
import torch
from loguru import logger
import tensorrt as trt
from damo.apis.detector_inference_trt import inference
from damo.config.base import parse_config
from damo.dataset import build_dataloader, build_dataset
from damo.utils import setup_logger, synchronize
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def inference(
config,
context,
data_loader,
dataset_name,
iou_types=('bbox', ),
box_only=False,
device='cuda',
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
end2end=False,
):
# convert to a torch.device for efficiency
device = torch.device(device)
dataset = data_loader.dataset
logger.info('Start evaluation on {} dataset({} images).'.format(
dataset_name, len(dataset)))
total_timer = Timer()
inference_timer = Timer()
total_timer.tic()
predictions = compute_on_dataset(config, context, data_loader, device,
inference_timer, end2end)
# convert to a list
image_ids = list(sorted(predictions.keys()))
predictions = [predictions[i] for i in image_ids]
if output_folder:
torch.save(predictions, os.path.join(output_folder, 'predictions.pth'))
extra_args = dict(
box_only=box_only,
iou_types=iou_types,
expected_results=expected_results,
expected_results_sigma_tol=expected_results_sigma_tol,
)
return evaluate(dataset=dataset,
predictions=predictions,
output_folder=output_folder,
**extra_args)
def trt_inference(config,
trt_name,
img_size,
batch_size=None,
conf=None,
nms=None,
end2end=False):
# dist init
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '5678'
os.environ['WORLD_SIZE'] = '1'
torch.distributed.init_process_group(backend='nccl',
init_method='env://',
rank=0)
synchronize()
file_name = os.path.join(config.miscs.output_dir, config.miscs.exp_name)
os.makedirs(file_name, exist_ok=True)
setup_logger(file_name,
distributed_rank=0,
mode='a')
if conf is not None:
config.model.head.nms_conf_thre = conf
if nms is not None:
config.model.head.nms_iou_thre = nms
if batch_size is not None:
config.test.batch_size = batch_size
# set logs
loggert = trt.Logger(trt.Logger.INFO)
trt.init_libnvinfer_plugins(loggert, '')
# initialize
t = open(trt_name, 'rb')
runtime = trt.Runtime(loggert)
model = runtime.deserialize_cuda_engine(t.read())
context = model.create_execution_context()
# start evaluate
output_folders = [None] * len(config.dataset.val_ann)
if config.miscs.output_dir:
for idx, dataset_name in enumerate(config.dataset.val_ann):
output_folder = os.path.join(config.miscs.output_dir, 'inference',
dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
val_dataset = build_dataset(config, config.dataset.val_ann, is_train=False)
val_loader = build_dataloader(val_dataset,
config.test.augment,
batch_size=config.test.batch_size,
num_workers=config.miscs.num_workers,
is_train=False,
size_div=img_size)
for output_folder, dataset_name, data_loader_val in zip(
output_folders, config.dataset.val_ann, val_loader):
inference(
config,
context,
data_loader_val,
dataset_name,
iou_types=('bbox', ),
box_only=False,
output_folder=output_folder,
end2end=end2end,
) | null |
15,262 | import os
import torch
import torch.nn as nn
import copy
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import tensor_quant
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
from damo.dataset import build_dataloader, build_dataset
def collect_stats(model, data_loader, batch_number, device='cuda'):
"""
code mainly from https://github.com/NVIDIA/TensorRT/blob/99a11a5fcdd1f184739bb20a8c4a473262c8ecc8/tools/pytorch-quantization/examples/torchvision/classification_flow.py
Feed data to the network and collect statistic
"""
# Enable calibrators
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
for i, data_tuple in enumerate(data_loader):
images, targets, image_ids = data_tuple
images = images.to(device)
output = model(images)
if i + 1 >= batch_number:
break
# Disable calibrators
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
module.enable_quant()
module.disable_calib()
else:
module.enable()
def compute_amax(model, **kwargs):
"""
code mainly from https://github.com/NVIDIA/TensorRT/blob/99a11a5fcdd1f184739bb20a8c4a473262c8ecc8/tools/pytorch-quantization/examples/torchvision/classification_flow.py
Load calib result
"""
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
print(F"{name:40}: {module}")
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator):
module.load_calib_amax()
else:
module.load_calib_amax(**kwargs)
def quant_model_init(ori_model, device):
ptq_model = copy.deepcopy(ori_model)
ptq_model.eval()
ptq_model.to(device)
quant_conv_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL
quant_conv_desc_input = QuantDescriptor(num_bits=8, calib_method='histogram')
quant_convtrans_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE2D_WEIGHT_PER_CHANNEL
quant_convtrans_desc_input = QuantDescriptor(num_bits=8, calib_method='histogram')
for k, m in ptq_model.named_modules():
if 'proj_conv' in k:
print("Layer {} won't be quantized".format(k))
continue
if isinstance(m, nn.Conv2d):
quant_conv = quant_nn.QuantConv2d(m.in_channels,
m.out_channels,
m.kernel_size,
m.stride,
m.padding,
quant_desc_input = quant_conv_desc_input,
quant_desc_weight = quant_conv_desc_weight)
quant_conv.weight.data.copy_(m.weight.detach())
if m.bias is not None:
quant_conv.bias.data.copy_(m.bias.detach())
else:
quant_conv.bias = None
set_module(ptq_model, k, quant_conv)
elif isinstance(m, nn.ConvTranspose2d):
quant_convtrans = quant_nn.QuantConvTranspose2d(m.in_channels,
m.out_channels,
m.kernel_size,
m.stride,
m.padding,
quant_desc_input = quant_convtrans_desc_input,
quant_desc_weight = quant_convtrans_desc_weight)
quant_convtrans.weight.data.copy_(m.weight.detach())
if m.bias is not None:
quant_convtrans.bias.data.copy_(m.bias.detach())
else:
quant_convtrans.bias = None
set_module(ptq_model, k, quant_convtrans)
elif isinstance(m, nn.MaxPool2d):
kernel_size = m.kernel_size
stride = m.stride
padding = m.padding
dilation = m.dilation
ceil_mode = m.ceil_mode
quant_maxpool2d = quant_nn.QuantMaxPool2d(m.kernel_size,
m.stride,
m.padding,
m.dilation,
m.ceil_mode,
quant_desc_input = quant_conv_desc_input)
set_module(ptq_model, k, quant_maxpool2d)
else:
continue
return ptq_model.to(device)
def post_train_quant(ori_model, calib_data_loader, calib_img_number, device):
ptq_model = quant_model_init(ori_model, device)
with torch.no_grad():
collect_stats(ptq_model, calib_data_loader, calib_img_number, device)
compute_amax(ptq_model, method='entropy')
return ptq_model | null |
15,263 | import os
import torch
import torch.nn as nn
import copy
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import tensor_quant
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
from damo.dataset import build_dataloader, build_dataset
def quant_model_init(ori_model, device):
ptq_model = copy.deepcopy(ori_model)
ptq_model.eval()
ptq_model.to(device)
quant_conv_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL
quant_conv_desc_input = QuantDescriptor(num_bits=8, calib_method='histogram')
quant_convtrans_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE2D_WEIGHT_PER_CHANNEL
quant_convtrans_desc_input = QuantDescriptor(num_bits=8, calib_method='histogram')
for k, m in ptq_model.named_modules():
if 'proj_conv' in k:
print("Layer {} won't be quantized".format(k))
continue
if isinstance(m, nn.Conv2d):
quant_conv = quant_nn.QuantConv2d(m.in_channels,
m.out_channels,
m.kernel_size,
m.stride,
m.padding,
quant_desc_input = quant_conv_desc_input,
quant_desc_weight = quant_conv_desc_weight)
quant_conv.weight.data.copy_(m.weight.detach())
if m.bias is not None:
quant_conv.bias.data.copy_(m.bias.detach())
else:
quant_conv.bias = None
set_module(ptq_model, k, quant_conv)
elif isinstance(m, nn.ConvTranspose2d):
quant_convtrans = quant_nn.QuantConvTranspose2d(m.in_channels,
m.out_channels,
m.kernel_size,
m.stride,
m.padding,
quant_desc_input = quant_convtrans_desc_input,
quant_desc_weight = quant_convtrans_desc_weight)
quant_convtrans.weight.data.copy_(m.weight.detach())
if m.bias is not None:
quant_convtrans.bias.data.copy_(m.bias.detach())
else:
quant_convtrans.bias = None
set_module(ptq_model, k, quant_convtrans)
elif isinstance(m, nn.MaxPool2d):
kernel_size = m.kernel_size
stride = m.stride
padding = m.padding
dilation = m.dilation
ceil_mode = m.ceil_mode
quant_maxpool2d = quant_nn.QuantMaxPool2d(m.kernel_size,
m.stride,
m.padding,
m.dilation,
m.ceil_mode,
quant_desc_input = quant_conv_desc_input)
set_module(ptq_model, k, quant_maxpool2d)
else:
continue
return ptq_model.to(device)
def load_quanted_model(model, calib_weights_path, device):
ptq_model = quant_model_init(model, device)
ptq_model.load_state_dict(torch.load(calib_weights_path)['model'].state_dict())
return ptq_model | null |
15,264 | import os
import torch
import torch.nn as nn
import copy
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import tensor_quant
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
from damo.dataset import build_dataloader, build_dataset
def module_quant_disable(ptq_model, k):
verified_module = get_module(ptq_model, k)
if hasattr(verified_module, '_input_quantizer'):
verified_module._input_quantizer.disable()
if hasattr(verified_module, '_weight_quantizer'):
verified_module._weight_quantizer.disable()
def quantable_op_check(k, ops_to_quant):
if ops_to_quant is None:
return True
if k in ops_to_quant:
return True
else:
return False
def execute_partial_quant(ptq_model, ops_to_quant=None):
for k, m in ptq_model.named_modules():
if quantable_op_check(k, ops_to_quant):
continue
# enable full-precision
if isinstance(m, quant_nn.QuantConv2d) or \
isinstance(m, quant_nn.QuantConvTranspose2d) or \
isinstance(m, quant_nn.QuantMaxPool2d):
module_quant_disable(ptq_model, k) | null |
15,265 | import os
import torch
import torch.nn as nn
import copy
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import tensor_quant
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
from damo.dataset import build_dataloader, build_dataset
def init_calib_data_loader(config):
# init dataloader
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '5678'
os.environ['WORLD_SIZE'] = '1'
torch.distributed.init_process_group(backend='nccl',
init_method='env://',
rank=0)
val_dataset = build_dataset(config, config.dataset.val_ann, is_train=False)
val_loader = build_dataloader(val_dataset,
config.test.augment,
batch_size=config.test.batch_size,
num_workers=config.miscs.num_workers,
is_train=False,
size_div=32)
return val_loader[0] | null |
15,266 | import os
import argparse
import sys
import onnx
import torch
from loguru import logger
from torch import nn
from damo.base_models.core.end2end import End2End
from damo.base_models.core.ops import RepConv, SiLU
from damo.config.base import parse_config
from damo.detectors.detector import build_local_model
from damo.utils.model_utils import get_model_info, replace_module
from tools.trt_eval import trt_inference
from tools.partial_quantization.utils import post_train_quant, load_quanted_model, execute_partial_quant, init_calib_data_loader
from pytorch_quantization import nn as quant_nn
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path) | null |
15,267 | import os
import argparse
import sys
import onnx
import torch
from loguru import logger
from torch import nn
from damo.base_models.core.end2end import End2End
from damo.base_models.core.ops import RepConv, SiLU
from damo.config.base import parse_config
from damo.detectors.detector import build_local_model
from damo.utils.model_utils import get_model_info, replace_module
from tools.trt_eval import trt_inference
from tools.partial_quantization.utils import post_train_quant, load_quanted_model, execute_partial_quant, init_calib_data_loader
from pytorch_quantization import nn as quant_nn
def make_parser():
parser = argparse.ArgumentParser('damo converter deployment toolbox')
# mode part
parser.add_argument('--mode',
default='onnx',
type=str,
help='onnx, trt_16 or trt_32')
# model part
parser.add_argument(
'-f',
'--config_file',
default=None,
type=str,
help='expriment description file',
)
parser.add_argument('-c',
'--ckpt',
default=None,
type=str,
help='ckpt path')
parser.add_argument('--trt',
action='store_true',
help='whether convert onnx into tensorrt')
parser.add_argument(
'--trt_type', type=str, default='fp32',
help='one type of int8, fp16, fp32')
parser.add_argument('--batch_size',
type=int,
default=None,
help='inference image batch nums')
parser.add_argument('--img_size',
type=int,
default='640',
help='inference image shape')
# onnx part
parser.add_argument('--input',
default='images',
type=str,
help='input node name of onnx model')
parser.add_argument('--output',
default='output',
type=str,
help='output node name of onnx model')
parser.add_argument('-o',
'--opset',
default=11,
type=int,
help='onnx opset version')
parser.add_argument('--calib_weights',
type=str,
default=None,
help='calib weights')
parser.add_argument('--model_type',
type=str,
default=None,
help='quant model type(tiny, small, medium)')
parser.add_argument('--sensitivity_file',
type=str,
default=None,
help='sensitivity file')
parser.add_argument('--end2end',
action='store_true',
help='export end2end onnx')
parser.add_argument('--ort',
action='store_true',
help='export onnx for onnxruntime')
parser.add_argument('--trt_eval',
action='store_true',
help='trt evaluation')
parser.add_argument('--iou-thres',
type=float,
default=0.65,
help='iou threshold for NMS')
parser.add_argument('--conf-thres',
type=float,
default=0.05,
help='conf threshold for NMS')
parser.add_argument('--device',
default='0',
help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument(
'opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER,
)
return parser | null |
15,268 | import os
import argparse
import sys
import onnx
import torch
from loguru import logger
from torch import nn
from damo.base_models.core.end2end import End2End
from damo.base_models.core.ops import RepConv, SiLU
from damo.config.base import parse_config
from damo.detectors.detector import build_local_model
from damo.utils.model_utils import get_model_info, replace_module
from tools.trt_eval import trt_inference
from tools.partial_quantization.utils import post_train_quant, load_quanted_model, execute_partial_quant, init_calib_data_loader
from pytorch_quantization import nn as quant_nn
def trt_export(onnx_path, batch_size, inference_h, inference_w):
import tensorrt as trt
TRT_LOGGER = trt.Logger()
engine_path = onnx_path.replace('.onnx', f'_bs{batch_size}.trt')
EXPLICIT_BATCH = 1 << (int)(
trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, \
builder.create_network(EXPLICIT_BATCH) as network, \
trt.OnnxParser(network, TRT_LOGGER) as parser:
logger.info('Loading ONNX file from path {}...'.format(onnx_path))
with open(onnx_path, 'rb') as model:
logger.info('Beginning ONNX file parsing')
if not parser.parse(model.read()):
logger.info('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
logger.info(parser.get_error(error))
builder.max_batch_size = batch_size
logger.info('Building an engine. This would take a while...')
config = builder.create_builder_config()
config.max_workspace_size = 2 << 30
config.flags |= 1 << int(trt.BuilderFlag.INT8)
config.flags |= 1 << int(trt.BuilderFlag.FP16)
engine = builder.build_engine(network, config)
try:
assert engine
except AssertionError:
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
"Parsing failed on line {} in statement {}".format(line, text)
)
logger.info('generated trt engine named {}'.format(engine_path))
with open(engine_path, 'wb') as f:
f.write(engine.serialize())
return engine_path | null |
15,269 | import argparse
import os
import cv2
import numpy as np
import torch
from loguru import logger
from PIL import Image
from damo.base_models.core.ops import RepConv
from damo.config.base import parse_config
from damo.detectors.detector import build_local_model
from damo.utils import get_model_info, vis, postprocess
from damo.utils.demo_utils import transform_img
from damo.structures.image_list import ImageList
from damo.structures.bounding_box import BoxList
def make_parser():
parser = argparse.ArgumentParser('DAMO-YOLO Demo')
parser.add_argument('input_type',
default='image',
help="input type, support [image, video, camera]")
parser.add_argument('-f',
'--config_file',
default=None,
type=str,
help='pls input your config file',)
parser.add_argument('-p',
'--path',
default='./assets/dog.jpg',
type=str,
help='path to image or video')
parser.add_argument('--camid',
type=int,
default=0,
help='camera id, necessary when input_type is camera')
parser.add_argument('--engine',
default=None,
type=str,
help='engine for inference')
parser.add_argument('--device',
default='cuda',
type=str,
help='device used to inference')
parser.add_argument('--output_dir',
default='./demo',
type=str,
help='where to save inference results')
parser.add_argument('--conf',
default=0.6,
type=float,
help='conf of visualization')
parser.add_argument('--infer_size',
nargs='+',
type=int,
help='test img size')
parser.add_argument('--end2end',
action='store_true',
help='trt engine with nms')
parser.add_argument('--save_result',
default=True,
type=bool,
help='whether save visualization results')
return parser | null |
15,270 | import argparse
import os
import torch
from loguru import logger
from damo.base_models.core.ops import RepConv
from damo.apis.detector_inference import inference
from damo.config.base import parse_config
from damo.dataset import build_dataloader, build_dataset
from damo.detectors.detector import build_ddp_model, build_local_model
from damo.utils import fuse_model, get_model_info, setup_logger, synchronize
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path) | null |
15,271 | import argparse
import os
import torch
from loguru import logger
from damo.base_models.core.ops import RepConv
from damo.apis.detector_inference import inference
from damo.config.base import parse_config
from damo.dataset import build_dataloader, build_dataset
from damo.detectors.detector import build_ddp_model, build_local_model
from damo.utils import fuse_model, get_model_info, setup_logger, synchronize
def make_parser():
parser = argparse.ArgumentParser('damo eval')
# distributed
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'-f',
'--config_file',
default=None,
type=str,
help='pls input your config file',
)
parser.add_argument('-c',
'--ckpt',
default=None,
type=str,
help='ckpt for eval')
parser.add_argument('--conf', default=None, type=float, help='test conf')
parser.add_argument('--nms',
default=None,
type=float,
help='test nms threshold')
parser.add_argument('--tsize',
default=None,
type=int,
help='test img size')
parser.add_argument('--seed', default=None, type=int, help='eval seed')
parser.add_argument(
'--fuse',
dest='fuse',
default=False,
action='store_true',
help='Fuse conv and bn for testing.',
)
parser.add_argument(
'--test',
dest='test',
default=False,
action='store_true',
help='Evaluating on test-dev set.',
) # TODO
parser.add_argument(
'opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER,
)
return parser | null |
15,272 | import argparse
import sys
import onnx
import torch
from loguru import logger
from torch import nn
from damo.base_models.core.end2end import End2End
from damo.base_models.core.ops import RepConv, SiLU
from damo.config.base import parse_config
from damo.detectors.detector import build_local_model
from damo.utils.model_utils import get_model_info, replace_module
def make_parser():
parser = argparse.ArgumentParser('damo converter deployment toolbox')
# mode part
parser.add_argument('--mode',
default='onnx',
type=str,
help='onnx, trt_16 or trt_32')
# model part
parser.add_argument(
'-f',
'--config_file',
default=None,
type=str,
help='expriment description file',
)
parser.add_argument(
'--benchmark',
action='store_true',
help='if true, export without postprocess'
)
parser.add_argument('-c',
'--ckpt',
default=None,
type=str,
help='ckpt path')
parser.add_argument('--trt',
action='store_true',
help='whether convert onnx into tensorrt')
parser.add_argument(
'--trt_type', type=str, default='fp32',
help='one type of int8, fp16, fp32')
parser.add_argument('--batch_size',
type=int,
default=None,
help='inference image batch nums')
parser.add_argument('--img_size',
type=int,
default='640',
help='inference image shape')
# onnx part
parser.add_argument('--input',
default='images',
type=str,
help='input node name of onnx model')
parser.add_argument('--output',
default='output',
type=str,
help='output node name of onnx model')
parser.add_argument('-o',
'--opset',
default=11,
type=int,
help='onnx opset version')
parser.add_argument('--end2end',
action='store_true',
help='export end2end onnx')
parser.add_argument('--ort',
action='store_true',
help='export onnx for onnxruntime')
parser.add_argument('--trt_eval',
action='store_true',
help='trt evaluation')
parser.add_argument('--with-preprocess',
action='store_true',
help='export bgr2rgb and normalize')
parser.add_argument('--topk-all',
type=int,
default=100,
help='topk objects for every images')
parser.add_argument('--iou-thres',
type=float,
default=0.65,
help='iou threshold for NMS')
parser.add_argument('--conf-thres',
type=float,
default=0.05,
help='conf threshold for NMS')
parser.add_argument('--device',
default='0',
help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument(
'opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER,
)
return parser | null |
15,273 | import argparse
import sys
import onnx
import torch
from loguru import logger
from torch import nn
from damo.base_models.core.end2end import End2End
from damo.base_models.core.ops import RepConv, SiLU
from damo.config.base import parse_config
from damo.detectors.detector import build_local_model
from damo.utils.model_utils import get_model_info, replace_module
class Calibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, stream, cache_file=""):
trt.IInt8EntropyCalibrator2.__init__(self)
self.stream = stream
self.d_input = cuda.mem_alloc(self.stream.calibration_data.nbytes)
self.cache_file = cache_file
stream.reset()
def get_batch_size(self):
return self.stream.batch_size
def get_batch(self, names):
print("######################")
print(names)
print("######################")
batch = self.stream.next_batch()
if not batch.size:
return None
cuda.memcpy_htod(self.d_input, batch)
return [int(self.d_input)]
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
logger.info("Using calibration cache to save time: {:}".format(self.cache_file))
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
logger.info("Caching calibration data for future use: {:}".format(self.cache_file))
f.write(cache)
class DataLoader:
def __init__(self, batch_size, batch_num, calib_img_dir, input_w, input_h):
self.index = 0
self.length = batch_num
self.batch_size = batch_size
self.input_h = input_h
self.input_w = input_w
# self.img_list = [i.strip() for i in open('calib.txt').readlines()]
self.img_list = glob.glob(os.path.join(calib_img_dir, "*.jpg"))
assert len(self.img_list) > self.batch_size * self.length, \
'{} must contains more than '.format(calib_img_dir) + str(self.batch_size * self.length) + ' images to calib'
print('found all {} images to calib.'.format(len(self.img_list)))
self.calibration_data = np.zeros((self.batch_size, 3, input_h, input_w), dtype=np.float32)
def reset(self):
self.index = 0
def next_batch(self):
if self.index < self.length:
for i in range(self.batch_size):
assert os.path.exists(self.img_list[i + self.index * self.batch_size]), 'not found!!'
img = cv2.imread(self.img_list[i + self.index * self.batch_size])
img = precess_image(img, self.input_h, 32)
self.calibration_data[i] = img
self.index += 1
return np.ascontiguousarray(self.calibration_data, dtype=np.float32)
else:
return np.array([])
def __len__(self):
return self.length
def trt_export(onnx_path, batch_size, inference_h, inference_w, trt_mode, calib_loader=None, calib_cache='./damoyolo_calibration.cache'):
import tensorrt as trt
trt_version = int(trt.__version__[0])
if trt_mode == 'int8':
from calibrator import DataLoader, Calibrator
calib_loader = DataLoader(1, 999, 'datasets/coco/val2017', 640, 640)
TRT_LOGGER = trt.Logger()
engine_path = onnx_path.replace('.onnx', f'_{trt_mode}_bs{batch_size}.trt')
EXPLICIT_BATCH = 1 << (int)(
trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
logger.info(f'trt_{trt_mode} converting ...')
with trt.Builder(TRT_LOGGER) as builder, \
builder.create_network(EXPLICIT_BATCH) as network, \
trt.OnnxParser(network, TRT_LOGGER) as parser:
logger.info('Loading ONNX file from path {}...'.format(onnx_path))
with open(onnx_path, 'rb') as model:
logger.info('Beginning ONNX file parsing')
if not parser.parse(model.read()):
logger.info('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
logger.info(parser.get_error(error))
# builder.max_workspace_size = 1 << 30
builder.max_batch_size = batch_size
logger.info('Building an engine. This would take a while...')
config = builder.create_builder_config()
config.max_workspace_size = 2 << 30
if trt_mode == 'fp16':
assert (builder.platform_has_fast_fp16 == True), 'not support fp16'
# builder.fp16_mode = True
config.flags |= 1 << int(trt.BuilderFlag.FP16)
if trt_mode == 'int8':
config.flags |= 1 << int(trt.BuilderFlag.INT8)
config.flags |= 1 << int(trt.BuilderFlag.FP16)
if calib_loader is not None:
config.int8_calibrator = Calibrator(calib_loader, calib_cache)
logger.info('Int8 calibation is enabled.')
if trt_version >= 8:
config.set_tactic_sources(1 << int(trt.TacticSource.CUBLAS))
engine = builder.build_engine(network, config)
try:
assert engine
except AssertionError:
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
"Parsing failed on line {} in statement {}".format(line, text)
)
logger.info('generated trt engine named {}'.format(engine_path))
with open(engine_path, 'wb') as f:
f.write(engine.serialize())
return engine_path | null |
15,274 | import logging
import math
from . import pulse_counter
from . import force_move
import toolhead
import copy
class Ercf:
def __init__(self, config):
def handle_connect(self):
def get_status(self, eventtime):
def _sample_stats(self, values):
def _gear_stepper_move_wait(self, dist, wait=True, speed=None, accel=None):
def _selector_stepper_move_wait(self, dist, home=0, wait=True,
speed=80., accel=1800):
def cmd_ERCF_CALIBRATE_ENCODER(self, gcmd):
def cmd_ERCF_HOME_EXTRUDER(self, gcmd):
def cmd_ERCF_RESET_ENCODER_COUNTS(self, gcmd):
def cmd_ERCF_BUZZ_GEAR_MOTOR(self, gcmd):
def cmd_ERCF_LOAD(self, gcmd):
def cmd_ERCF_UNLOAD(self, gcmd):
def cmd_ERCF_SET_STEPS(self, gcmd):
def cmd_ERCF_GET_SELECTOR_POS(self, gcmd):
def cmd_ERCF_MOVE_SELECTOR(self, gcmd):
def cmd_ERCF_ENDLESSSPOOL_UNLOAD(self, gcmd):
def cmd_ERCF_FINALIZE_LOAD(self, gcmd):
def load_config(config):
return Ercf(config) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.