id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
155,170 | import math
import random
import cv2
import numpy as np
from ..augmentations import box_candidates
from ..general import resample_segments, segment2box
The provided code snippet includes necessary dependencies for implementing the `mixup` function. Write a Python function `def mixup(im, labels, segments, im2, labels2, segments2)` to solve the following problem:
Applies MixUp augmentation blending two images, labels, and segments with a random ratio. See https://arxiv.org/pdf/1710.09412.pdf
Here is the function:
def mixup(im, labels, segments, im2, labels2, segments2):
"""
Applies MixUp augmentation blending two images, labels, and segments with a random ratio.
See https://arxiv.org/pdf/1710.09412.pdf
"""
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
im = (im * r + im2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
segments = np.concatenate((segments, segments2), 0)
return im, labels, segments | Applies MixUp augmentation blending two images, labels, and segments with a random ratio. See https://arxiv.org/pdf/1710.09412.pdf |
155,171 | import math
import random
import cv2
import numpy as np
from ..augmentations import box_candidates
from ..general import resample_segments, segment2box
def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):
"""
Filters bounding box candidates by minimum width-height threshold `wh_thr` (pixels), aspect ratio threshold
`ar_thr`, and area ratio threshold `area_thr`.
box1(4,n) is before augmentation, box2(4,n) is after augmentation.
"""
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def segment2box(segment, width=640, height=640):
"""Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)."""
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
(
x,
y,
) = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def resample_segments(segments, n=1000):
"""Resamples an (n,2) segment to a fixed number of points for consistent representation."""
for i, s in enumerate(segments):
s = np.concatenate((s, s[0:1, :]), axis=0)
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def random_perspective(
im, targets=(), segments=(), degrees=10, translate=0.1, scale=0.1, shear=10, perspective=0.0, border=(0, 0)
):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = im.shape[0] + border[0] * 2 # shape(h,w,c)
width = im.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(im[:, :, ::-1]) # base
# ax[1].imshow(im2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
new_segments = []
if n:
new = np.zeros((n, 4))
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
new_segments.append(xy)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01)
targets = targets[i]
targets[:, 1:5] = new[i]
new_segments = np.array(new_segments)[i]
return im, targets, new_segments | null |
155,172 | import os
import random
import cv2
import numpy as np
import torch
from torch.utils.data import DataLoader, distributed
from ..augmentations import augment_hsv, copy_paste, letterbox
from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, SmartDistributedSampler, seed_worker
from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn
from ..torch_utils import torch_distributed_zero_first
from .augmentations import mixup, random_perspective
def polygon2mask(img_size, polygons, color=1, downsample_ratio=1):
"""
Args:
img_size (tuple): The image size.
polygons (np.ndarray): [N, M], N is the number of polygons,
M is the number of points(Be divided by 2).
"""
mask = np.zeros(img_size, dtype=np.uint8)
polygons = np.asarray(polygons)
polygons = polygons.astype(np.int32)
shape = polygons.shape
polygons = polygons.reshape(shape[0], -1, 2)
cv2.fillPoly(mask, polygons, color=color)
nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio)
# NOTE: fillPoly firstly then resize is trying the keep the same way
# of loss calculation when mask-ratio=1.
mask = cv2.resize(mask, (nw, nh))
return mask
The provided code snippet includes necessary dependencies for implementing the `polygons2masks` function. Write a Python function `def polygons2masks(img_size, polygons, color, downsample_ratio=1)` to solve the following problem:
Args: img_size (tuple): The image size. polygons (list[np.ndarray]): each polygon is [N, M], N is the number of polygons, M is the number of points(Be divided by 2).
Here is the function:
def polygons2masks(img_size, polygons, color, downsample_ratio=1):
"""
Args:
img_size (tuple): The image size.
polygons (list[np.ndarray]): each polygon is [N, M],
N is the number of polygons,
M is the number of points(Be divided by 2).
"""
masks = []
for si in range(len(polygons)):
mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio)
masks.append(mask)
return np.array(masks) | Args: img_size (tuple): The image size. polygons (list[np.ndarray]): each polygon is [N, M], N is the number of polygons, M is the number of points(Be divided by 2). |
155,173 | import os
import random
import cv2
import numpy as np
import torch
from torch.utils.data import DataLoader, distributed
from ..augmentations import augment_hsv, copy_paste, letterbox
from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, SmartDistributedSampler, seed_worker
from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn
from ..torch_utils import torch_distributed_zero_first
from .augmentations import mixup, random_perspective
def polygon2mask(img_size, polygons, color=1, downsample_ratio=1):
"""
Args:
img_size (tuple): The image size.
polygons (np.ndarray): [N, M], N is the number of polygons,
M is the number of points(Be divided by 2).
"""
mask = np.zeros(img_size, dtype=np.uint8)
polygons = np.asarray(polygons)
polygons = polygons.astype(np.int32)
shape = polygons.shape
polygons = polygons.reshape(shape[0], -1, 2)
cv2.fillPoly(mask, polygons, color=color)
nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio)
# NOTE: fillPoly firstly then resize is trying the keep the same way
# of loss calculation when mask-ratio=1.
mask = cv2.resize(mask, (nw, nh))
return mask
The provided code snippet includes necessary dependencies for implementing the `polygons2masks_overlap` function. Write a Python function `def polygons2masks_overlap(img_size, segments, downsample_ratio=1)` to solve the following problem:
Return a (640, 640) overlap mask.
Here is the function:
def polygons2masks_overlap(img_size, segments, downsample_ratio=1):
"""Return a (640, 640) overlap mask."""
masks = np.zeros(
(img_size[0] // downsample_ratio, img_size[1] // downsample_ratio),
dtype=np.int32 if len(segments) > 255 else np.uint8,
)
areas = []
ms = []
for si in range(len(segments)):
mask = polygon2mask(
img_size,
[segments[si].reshape(-1)],
downsample_ratio=downsample_ratio,
color=1,
)
ms.append(mask)
areas.append(mask.sum())
areas = np.asarray(areas)
index = np.argsort(-areas)
ms = np.array(ms)[index]
for i in range(len(segments)):
mask = ms[i] * (i + 1)
masks = masks + mask
masks = np.clip(masks, a_min=0, a_max=i + 1)
return masks, index | Return a (640, 640) overlap mask. |
155,174 | import cv2
import numpy as np
import torch
import torch.nn.functional as F
def crop_mask(masks, boxes):
"""
"Crop" predicted masks by zeroing out everything not in the predicted bbox. Vectorized by Chong (thanks Chong).
Args:
- masks should be a size [n, h, w] tensor of masks
- boxes should be a size [n, 4] tensor of bbox coords in relative point form
"""
n, h, w = masks.shape
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n)
r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1)
c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1)
return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
The provided code snippet includes necessary dependencies for implementing the `process_mask_upsample` function. Write a Python function `def process_mask_upsample(protos, masks_in, bboxes, shape)` to solve the following problem:
Crop after upsample. protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms shape: input_image_size, (h, w) return: h, w, n
Here is the function:
def process_mask_upsample(protos, masks_in, bboxes, shape):
"""
Crop after upsample.
protos: [mask_dim, mask_h, mask_w]
masks_in: [n, mask_dim], n is number of masks after nms
bboxes: [n, 4], n is number of masks after nms
shape: input_image_size, (h, w)
return: h, w, n
"""
c, mh, mw = protos.shape # CHW
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
masks = crop_mask(masks, bboxes) # CHW
return masks.gt_(0.5) | Crop after upsample. protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms shape: input_image_size, (h, w) return: h, w, n |
155,176 | import logging
import subprocess
import urllib
from pathlib import Path
import requests
import torch
The provided code snippet includes necessary dependencies for implementing the `is_url` function. Write a Python function `def is_url(url, check=True)` to solve the following problem:
Determines if a string is a URL and optionally checks its existence online, returning a boolean.
Here is the function:
def is_url(url, check=True):
"""Determines if a string is a URL and optionally checks its existence online, returning a boolean."""
try:
url = str(url)
result = urllib.parse.urlparse(url)
assert all([result.scheme, result.netloc]) # check if is url
return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online
except (AssertionError, urllib.request.HTTPError):
return False | Determines if a string is a URL and optionally checks its existence online, returning a boolean. |
155,177 | import logging
import subprocess
import urllib
from pathlib import Path
import requests
import torch
The provided code snippet includes necessary dependencies for implementing the `url_getsize` function. Write a Python function `def url_getsize(url="https://ultralytics.com/images/bus.jpg")` to solve the following problem:
Returns the size in bytes of a downloadable file at a given URL; defaults to -1 if not found.
Here is the function:
def url_getsize(url="https://ultralytics.com/images/bus.jpg"):
"""Returns the size in bytes of a downloadable file at a given URL; defaults to -1 if not found."""
response = requests.head(url, allow_redirects=True)
return int(response.headers.get("content-length", -1)) | Returns the size in bytes of a downloadable file at a given URL; defaults to -1 if not found. |
155,178 | import math
import os
import platform
import subprocess
import time
import warnings
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from utils.general import LOGGER, check_version, colorstr, file_date, git_describe
def check_version(current="0.0.0", minimum="0.0.0", name="version ", pinned=False, hard=False, verbose=False):
"""Checks if the current version meets the minimum required version, exits or warns based on parameters."""
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
s = f"WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed" # string
if hard:
assert result, emojis(s) # assert min requirements met
if verbose and not result:
LOGGER.warning(s)
return result
The provided code snippet includes necessary dependencies for implementing the `smart_inference_mode` function. Write a Python function `def smart_inference_mode(torch_1_9=check_version(torch.__version__, "1.9.0"))` to solve the following problem:
Applies torch.inference_mode() if torch>=1.9.0, else torch.no_grad() as a decorator for functions.
Here is the function:
def smart_inference_mode(torch_1_9=check_version(torch.__version__, "1.9.0")):
"""Applies torch.inference_mode() if torch>=1.9.0, else torch.no_grad() as a decorator for functions."""
def decorate(fn):
return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)
return decorate | Applies torch.inference_mode() if torch>=1.9.0, else torch.no_grad() as a decorator for functions. |
155,179 | import math
import os
import platform
import subprocess
import time
import warnings
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from utils.general import LOGGER, check_version, colorstr, file_date, git_describe
The provided code snippet includes necessary dependencies for implementing the `initialize_weights` function. Write a Python function `def initialize_weights(model)` to solve the following problem:
Initializes weights of Conv2d, BatchNorm2d, and activations (Hardswish, LeakyReLU, ReLU, ReLU6, SiLU) in the model.
Here is the function:
def initialize_weights(model):
"""Initializes weights of Conv2d, BatchNorm2d, and activations (Hardswish, LeakyReLU, ReLU, ReLU6, SiLU) in the
model.
"""
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
m.inplace = True | Initializes weights of Conv2d, BatchNorm2d, and activations (Hardswish, LeakyReLU, ReLU, ReLU6, SiLU) in the model. |
155,180 | import math
import os
import platform
import subprocess
import time
import warnings
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from utils.general import LOGGER, check_version, colorstr, file_date, git_describe
The provided code snippet includes necessary dependencies for implementing the `find_modules` function. Write a Python function `def find_modules(model, mclass=nn.Conv2d)` to solve the following problem:
Finds and returns list of layer indices in `model.module_list` matching the specified `mclass`.
Here is the function:
def find_modules(model, mclass=nn.Conv2d):
"""Finds and returns list of layer indices in `model.module_list` matching the specified `mclass`."""
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] | Finds and returns list of layer indices in `model.module_list` matching the specified `mclass`. |
155,181 | import math
import os
import platform
import subprocess
import time
import warnings
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from utils.general import LOGGER, check_version, colorstr, file_date, git_describe
def sparsity(model):
"""Calculates and returns the global sparsity of a model as the ratio of zero-valued parameters to total
parameters.
"""
a, b = 0, 0
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
LOGGER = logging.getLogger(LOGGING_NAME)
The provided code snippet includes necessary dependencies for implementing the `prune` function. Write a Python function `def prune(model, amount=0.3)` to solve the following problem:
Prunes Conv2d layers in a model to a specified sparsity using L1 unstructured pruning.
Here is the function:
def prune(model, amount=0.3):
"""Prunes Conv2d layers in a model to a specified sparsity using L1 unstructured pruning."""
import torch.nn.utils.prune as prune
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name="weight", amount=amount) # prune
prune.remove(m, "weight") # make permanent
LOGGER.info(f"Model pruned to {sparsity(model):.3g} global sparsity") | Prunes Conv2d layers in a model to a specified sparsity using L1 unstructured pruning. |
155,182 | import math
import os
import platform
import subprocess
import time
import warnings
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from utils.general import LOGGER, check_version, colorstr, file_date, git_describe
The provided code snippet includes necessary dependencies for implementing the `fuse_conv_and_bn` function. Write a Python function `def fuse_conv_and_bn(conv, bn)` to solve the following problem:
Fuses Conv2d and BatchNorm2d layers into a single Conv2d layer. See https://tehnokv.com/posts/fusing-batchnorm-and-conv/.
Here is the function:
def fuse_conv_and_bn(conv, bn):
"""
Fuses Conv2d and BatchNorm2d layers into a single Conv2d layer.
See https://tehnokv.com/posts/fusing-batchnorm-and-conv/.
"""
fusedconv = (
nn.Conv2d(
conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
dilation=conv.dilation,
groups=conv.groups,
bias=True,
)
.requires_grad_(False)
.to(conv.weight.device)
)
# Prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# Prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv | Fuses Conv2d and BatchNorm2d layers into a single Conv2d layer. See https://tehnokv.com/posts/fusing-batchnorm-and-conv/. |
155,183 | import math
import os
import platform
import subprocess
import time
import warnings
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from utils.general import LOGGER, check_version, colorstr, file_date, git_describe
The provided code snippet includes necessary dependencies for implementing the `scale_img` function. Write a Python function `def scale_img(img, ratio=1.0, same_shape=False, gs=32)` to solve the following problem:
Scales an image tensor `img` of shape (bs,3,y,x) by `ratio`, optionally maintaining the original shape, padded to multiples of `gs`.
Here is the function:
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
"""Scales an image tensor `img` of shape (bs,3,y,x) by `ratio`, optionally maintaining the original shape, padded to
multiples of `gs`.
"""
if ratio == 1.0:
return img
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode="bilinear", align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean | Scales an image tensor `img` of shape (bs,3,y,x) by `ratio`, optionally maintaining the original shape, padded to multiples of `gs`. |
155,184 | import math
import os
import platform
import subprocess
import time
import warnings
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from utils.general import LOGGER, check_version, colorstr, file_date, git_describe
The provided code snippet includes necessary dependencies for implementing the `copy_attr` function. Write a Python function `def copy_attr(a, b, include=(), exclude=())` to solve the following problem:
Copies attributes from object b to a, optionally filtering with include and exclude lists.
Here is the function:
def copy_attr(a, b, include=(), exclude=()):
"""Copies attributes from object b to a, optionally filtering with include and exclude lists."""
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith("_") or k in exclude:
continue
else:
setattr(a, k, v) | Copies attributes from object b to a, optionally filtering with include and exclude lists. |
155,185 | import math
import os
import platform
import subprocess
import time
import warnings
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from utils.general import LOGGER, check_version, colorstr, file_date, git_describe
def check_version(current="0.0.0", minimum="0.0.0", name="version ", pinned=False, hard=False, verbose=False):
"""Checks if the current version meets the minimum required version, exits or warns based on parameters."""
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
s = f"WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed" # string
if hard:
assert result, emojis(s) # assert min requirements met
if verbose and not result:
LOGGER.warning(s)
return result
The provided code snippet includes necessary dependencies for implementing the `smart_hub_load` function. Write a Python function `def smart_hub_load(repo="ultralytics/yolov5", model="yolov5s", **kwargs)` to solve the following problem:
YOLOv5 torch.hub.load() wrapper with smart error handling, adjusting torch arguments for compatibility.
Here is the function:
def smart_hub_load(repo="ultralytics/yolov5", model="yolov5s", **kwargs):
"""YOLOv5 torch.hub.load() wrapper with smart error handling, adjusting torch arguments for compatibility."""
if check_version(torch.__version__, "1.9.1"):
kwargs["skip_validation"] = True # validation causes GitHub API rate limit errors
if check_version(torch.__version__, "1.12.0"):
kwargs["trust_repo"] = True # argument required starting in torch 0.12
try:
return torch.hub.load(repo, model, **kwargs)
except Exception:
return torch.hub.load(repo, model, force_reload=True, **kwargs) | YOLOv5 torch.hub.load() wrapper with smart error handling, adjusting torch arguments for compatibility. |
155,186 | import math
import random
import cv2
import numpy as np
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from utils.metrics import bbox_ioa
IMAGENET_MEAN = 0.485, 0.456, 0.406
IMAGENET_STD = 0.229, 0.224, 0.225
The provided code snippet includes necessary dependencies for implementing the `normalize` function. Write a Python function `def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False)` to solve the following problem:
Applies ImageNet normalization to RGB images in BCHW format, modifying them in-place if specified. Example: y = (x - mean) / std
Here is the function:
def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
"""
Applies ImageNet normalization to RGB images in BCHW format, modifying them in-place if specified.
Example: y = (x - mean) / std
"""
return TF.normalize(x, mean, std, inplace=inplace) | Applies ImageNet normalization to RGB images in BCHW format, modifying them in-place if specified. Example: y = (x - mean) / std |
155,187 | import math
import random
import cv2
import numpy as np
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from utils.metrics import bbox_ioa
The provided code snippet includes necessary dependencies for implementing the `augment_hsv` function. Write a Python function `def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5)` to solve the following problem:
Applies HSV color-space augmentation to an image with random gains for hue, saturation, and value.
Here is the function:
def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
"""Applies HSV color-space augmentation to an image with random gains for hue, saturation, and value."""
if hgain or sgain or vgain:
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
dtype = im.dtype # uint8
x = np.arange(0, 256, dtype=r.dtype)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed | Applies HSV color-space augmentation to an image with random gains for hue, saturation, and value. |
155,188 | import math
import random
import cv2
import numpy as np
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from utils.metrics import bbox_ioa
The provided code snippet includes necessary dependencies for implementing the `hist_equalize` function. Write a Python function `def hist_equalize(im, clahe=True, bgr=False)` to solve the following problem:
Equalizes image histogram, with optional CLAHE, for BGR or RGB image with shape (n,m,3) and range 0-255.
Here is the function:
def hist_equalize(im, clahe=True, bgr=False):
"""Equalizes image histogram, with optional CLAHE, for BGR or RGB image with shape (n,m,3) and range 0-255."""
yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB | Equalizes image histogram, with optional CLAHE, for BGR or RGB image with shape (n,m,3) and range 0-255. |
155,189 | import math
import random
import cv2
import numpy as np
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from utils.metrics import bbox_ioa
The provided code snippet includes necessary dependencies for implementing the `replicate` function. Write a Python function `def replicate(im, labels)` to solve the following problem:
Replicates half of the smallest object labels in an image for data augmentation. Returns augmented image and labels.
Here is the function:
def replicate(im, labels):
"""
Replicates half of the smallest object labels in an image for data augmentation.
Returns augmented image and labels.
"""
h, w = im.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[: round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return im, labels | Replicates half of the smallest object labels in an image for data augmentation. Returns augmented image and labels. |
155,190 | import math
import random
import cv2
import numpy as np
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from utils.metrics import bbox_ioa
The provided code snippet includes necessary dependencies for implementing the `letterbox` function. Write a Python function `def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32)` to solve the following problem:
Resizes and pads image to new_shape with stride-multiple constraints, returns resized image, ratio, padding.
Here is the function:
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
"""Resizes and pads image to new_shape with stride-multiple constraints, returns resized image, ratio, padding."""
shape = im.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return im, ratio, (dw, dh) | Resizes and pads image to new_shape with stride-multiple constraints, returns resized image, ratio, padding. |
155,191 | import math
import random
import cv2
import numpy as np
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from utils.metrics import bbox_ioa
def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):
"""
Filters bounding box candidates by minimum width-height threshold `wh_thr` (pixels), aspect ratio threshold
`ar_thr`, and area ratio threshold `area_thr`.
box1(4,n) is before augmentation, box2(4,n) is after augmentation.
"""
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def segment2box(segment, width=640, height=640):
"""Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)."""
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
(
x,
y,
) = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def resample_segments(segments, n=1000):
"""Resamples an (n,2) segment to a fixed number of points for consistent representation."""
for i, s in enumerate(segments):
s = np.concatenate((s, s[0:1, :]), axis=0)
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def random_perspective(
im, targets=(), segments=(), degrees=10, translate=0.1, scale=0.1, shear=10, perspective=0.0, border=(0, 0)
):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = im.shape[0] + border[0] * 2 # shape(h,w,c)
width = im.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(im[:, :, ::-1]) # base
# ax[1].imshow(im2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments) and len(segments) == n
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return im, targets | null |
155,192 | import math
import random
import cv2
import numpy as np
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from utils.metrics import bbox_ioa
def bbox_ioa(box1, box2, eps=1e-7):
"""
Returns the intersection over box2 area given box1, box2.
Boxes are x1y1x2y2
box1: np.array of shape(4)
box2: np.array of shape(nx4)
returns: np.array of shape(n)
"""
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1
b2_x1, b2_y1, b2_x2, b2_y2 = box2.T
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * (
np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)
).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps
# Intersection over box2 area
return inter_area / box2_area
The provided code snippet includes necessary dependencies for implementing the `copy_paste` function. Write a Python function `def copy_paste(im, labels, segments, p=0.5)` to solve the following problem:
Applies Copy-Paste augmentation by flipping and merging segments and labels on an image. Details at https://arxiv.org/abs/2012.07177.
Here is the function:
def copy_paste(im, labels, segments, p=0.5):
"""
Applies Copy-Paste augmentation by flipping and merging segments and labels on an image.
Details at https://arxiv.org/abs/2012.07177.
"""
n = len(segments)
if p and n:
h, w, c = im.shape # height, width, channels
im_new = np.zeros(im.shape, np.uint8)
for j in random.sample(range(n), k=round(p * n)):
l, s = labels[j], segments[j]
box = w - l[3], l[2], w - l[1], l[4]
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
if (ioa < 0.30).all(): # allow 30% obscuration of existing labels
labels = np.concatenate((labels, [[l[0], *box]]), 0)
segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED)
result = cv2.flip(im, 1) # augment segments (flip left-right)
i = cv2.flip(im_new, 1).astype(bool)
im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug
return im, labels, segments | Applies Copy-Paste augmentation by flipping and merging segments and labels on an image. Details at https://arxiv.org/abs/2012.07177. |
155,193 | import math
import random
import cv2
import numpy as np
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from utils.metrics import bbox_ioa
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
"""Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x
y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y
y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x
y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y
return y
def bbox_ioa(box1, box2, eps=1e-7):
"""
Returns the intersection over box2 area given box1, box2.
Boxes are x1y1x2y2
box1: np.array of shape(4)
box2: np.array of shape(nx4)
returns: np.array of shape(n)
"""
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1
b2_x1, b2_y1, b2_x2, b2_y2 = box2.T
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * (
np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)
).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps
# Intersection over box2 area
return inter_area / box2_area
The provided code snippet includes necessary dependencies for implementing the `cutout` function. Write a Python function `def cutout(im, labels, p=0.5)` to solve the following problem:
Applies cutout augmentation to an image with optional label adjustment, using random masks of varying sizes. Details at https://arxiv.org/abs/1708.04552.
Here is the function:
def cutout(im, labels, p=0.5):
"""
Applies cutout augmentation to an image with optional label adjustment, using random masks of varying sizes.
Details at https://arxiv.org/abs/1708.04552.
"""
if random.random() < p:
h, w = im.shape[:2]
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s)) # create random masks
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h)) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels | Applies cutout augmentation to an image with optional label adjustment, using random masks of varying sizes. Details at https://arxiv.org/abs/1708.04552. |
155,194 | import math
import random
import cv2
import numpy as np
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from utils.metrics import bbox_ioa
The provided code snippet includes necessary dependencies for implementing the `mixup` function. Write a Python function `def mixup(im, labels, im2, labels2)` to solve the following problem:
Applies MixUp augmentation by blending images and labels. See https://arxiv.org/pdf/1710.09412.pdf for details.
Here is the function:
def mixup(im, labels, im2, labels2):
"""
Applies MixUp augmentation by blending images and labels.
See https://arxiv.org/pdf/1710.09412.pdf for details.
"""
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
im = (im * r + im2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
return im, labels | Applies MixUp augmentation by blending images and labels. See https://arxiv.org/pdf/1710.09412.pdf for details. |
155,195 | import math
import random
import cv2
import numpy as np
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
from utils.metrics import bbox_ioa
IMAGENET_MEAN = 0.485, 0.456, 0.406
IMAGENET_STD = 0.229, 0.224, 0.225
class CenterCrop:
def __init__(self, size=640):
def __call__(self, im):
LOGGER = logging.getLogger(LOGGING_NAME)
def check_version(current="0.0.0", minimum="0.0.0", name="version ", pinned=False, hard=False, verbose=False):
def colorstr(*input):
def classify_albumentations(
augment=True,
size=224,
scale=(0.08, 1.0),
ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33
hflip=0.5,
vflip=0.0,
jitter=0.4,
mean=IMAGENET_MEAN,
std=IMAGENET_STD,
auto_aug=False,
):
# YOLOv5 classification Albumentations (optional, only used if package is installed)
prefix = colorstr("albumentations: ")
try:
import albumentations as A
from albumentations.pytorch import ToTensorV2
check_version(A.__version__, "1.0.3", hard=True) # version requirement
if augment: # Resize and crop
T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)]
if auto_aug:
# TODO: implement AugMix, AutoAug & RandAug in albumentation
LOGGER.info(f"{prefix}auto augmentations are currently not supported")
else:
if hflip > 0:
T += [A.HorizontalFlip(p=hflip)]
if vflip > 0:
T += [A.VerticalFlip(p=vflip)]
if jitter > 0:
color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue
T += [A.ColorJitter(*color_jitter, 0)]
else: # Use fixed crop for eval set (reproducibility)
T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]
T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor
LOGGER.info(prefix + ", ".join(f"{x}".replace("always_apply=False, ", "") for x in T if x.p))
return A.Compose(T)
except ImportError: # package not installed, skip
LOGGER.warning(f"{prefix}⚠️ not found, install with `pip install albumentations` (recommended)")
except Exception as e:
LOGGER.info(f"{prefix}{e}") | null |
155,196 | import torch
import torch.nn as nn
from utils.metrics import bbox_iou
from utils.torch_utils import de_parallel
The provided code snippet includes necessary dependencies for implementing the `smooth_BCE` function. Write a Python function `def smooth_BCE(eps=0.1)` to solve the following problem:
Returns label smoothing BCE targets for reducing overfitting; pos: `1.0 - 0.5*eps`, neg: `0.5*eps`. For details see https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
Here is the function:
def smooth_BCE(eps=0.1):
"""Returns label smoothing BCE targets for reducing overfitting; pos: `1.0 - 0.5*eps`, neg: `0.5*eps`. For details see https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441"""
return 1.0 - 0.5 * eps, 0.5 * eps | Returns label smoothing BCE targets for reducing overfitting; pos: `1.0 - 0.5*eps`, neg: `0.5*eps`. For details see https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 |
155,197 | import contextlib
import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from utils import TryExcept, threaded
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
LOGGER = logging.getLogger(LOGGING_NAME)
The provided code snippet includes necessary dependencies for implementing the `feature_visualization` function. Write a Python function `def feature_visualization(x, module_type, stage, n=32, save_dir=Path("runs/detect/exp"))` to solve the following problem:
x: Features to be visualized module_type: Module type stage: Module stage within model n: Maximum number of feature maps to plot save_dir: Directory to save results
Here is the function:
def feature_visualization(x, module_type, stage, n=32, save_dir=Path("runs/detect/exp")):
"""
x: Features to be visualized
module_type: Module type
stage: Module stage within model
n: Maximum number of feature maps to plot
save_dir: Directory to save results
"""
if ("Detect" not in module_type) and (
"Segment" not in module_type
): # 'Detect' for Object Detect task,'Segment' for Segment task
batch, channels, height, width = x.shape # batch, channels, height, width
if height > 1 and width > 1:
f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
n = min(n, channels) # number of plots
fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
ax = ax.ravel()
plt.subplots_adjust(wspace=0.05, hspace=0.05)
for i in range(n):
ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
ax[i].axis("off")
LOGGER.info(f"Saving {f}... ({n}/{channels})")
plt.savefig(f, dpi=300, bbox_inches="tight")
plt.close()
np.save(str(f.with_suffix(".npy")), x[0].cpu().numpy()) # npy save | x: Features to be visualized module_type: Module type stage: Module stage within model n: Maximum number of feature maps to plot save_dir: Directory to save results |
155,198 | import contextlib
import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from utils import TryExcept, threaded
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
The provided code snippet includes necessary dependencies for implementing the `butter_lowpass_filtfilt` function. Write a Python function `def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5)` to solve the following problem:
Applies a low-pass Butterworth filter to `data` with specified `cutoff`, `fs`, and `order`.
Here is the function:
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
"""Applies a low-pass Butterworth filter to `data` with specified `cutoff`, `fs`, and `order`."""
from scipy.signal import butter, filtfilt
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
def butter_lowpass(cutoff, fs, order):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
return butter(order, normal_cutoff, btype="low", analog=False)
b, a = butter_lowpass(cutoff, fs, order=order)
return filtfilt(b, a, data) # forward-backward filter | Applies a low-pass Butterworth filter to `data` with specified `cutoff`, `fs`, and `order`. |
155,199 | import contextlib
import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from utils import TryExcept, threaded
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
The provided code snippet includes necessary dependencies for implementing the `plot_lr_scheduler` function. Write a Python function `def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir="")` to solve the following problem:
Plots learning rate schedule for given optimizer and scheduler, saving plot to `save_dir`.
Here is the function:
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=""):
"""Plots learning rate schedule for given optimizer and scheduler, saving plot to `save_dir`."""
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
y = []
for _ in range(epochs):
scheduler.step()
y.append(optimizer.param_groups[0]["lr"])
plt.plot(y, ".-", label="LR")
plt.xlabel("epoch")
plt.ylabel("LR")
plt.grid()
plt.xlim(0, epochs)
plt.ylim(0)
plt.savefig(Path(save_dir) / "LR.png", dpi=200)
plt.close() | Plots learning rate schedule for given optimizer and scheduler, saving plot to `save_dir`. |
155,200 | import contextlib
import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from utils import TryExcept, threaded
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
def hist2d(x, y, n=100):
"""
Generates a logarithmic 2D histogram, useful for visualizing label or evolution distributions.
Used in used in labels.png and evolve.png.
"""
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
def xyxy2xywh(x):
"""Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center
y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center
y[..., 2] = x[..., 2] - x[..., 0] # width
y[..., 3] = x[..., 3] - x[..., 1] # height
return y
The provided code snippet includes necessary dependencies for implementing the `plot_val_txt` function. Write a Python function `def plot_val_txt()` to solve the following problem:
Plots 2D and 1D histograms of bounding box centers from 'val.txt' using matplotlib, saving as 'hist2d.png' and 'hist1d.png'. Example: from utils.plots import *; plot_val()
Here is the function:
def plot_val_txt():
"""
Plots 2D and 1D histograms of bounding box centers from 'val.txt' using matplotlib, saving as 'hist2d.png' and
'hist1d.png'.
Example: from utils.plots import *; plot_val()
"""
x = np.loadtxt("val.txt", dtype=np.float32)
box = xyxy2xywh(x[:, :4])
cx, cy = box[:, 0], box[:, 1]
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
ax.set_aspect("equal")
plt.savefig("hist2d.png", dpi=300)
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
ax[0].hist(cx, bins=600)
ax[1].hist(cy, bins=600)
plt.savefig("hist1d.png", dpi=200) | Plots 2D and 1D histograms of bounding box centers from 'val.txt' using matplotlib, saving as 'hist2d.png' and 'hist1d.png'. Example: from utils.plots import *; plot_val() |
155,201 | import contextlib
import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from utils import TryExcept, threaded
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
The provided code snippet includes necessary dependencies for implementing the `plot_targets_txt` function. Write a Python function `def plot_targets_txt()` to solve the following problem:
Plots histograms of object detection targets from 'targets.txt', saving the figure as 'targets.jpg'. Example: from utils.plots import *; plot_targets_txt()
Here is the function:
def plot_targets_txt():
"""
Plots histograms of object detection targets from 'targets.txt', saving the figure as 'targets.jpg'.
Example: from utils.plots import *; plot_targets_txt()
"""
x = np.loadtxt("targets.txt", dtype=np.float32).T
s = ["x targets", "y targets", "width targets", "height targets"]
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
for i in range(4):
ax[i].hist(x[i], bins=100, label=f"{x[i].mean():.3g} +/- {x[i].std():.3g}")
ax[i].legend()
ax[i].set_title(s[i])
plt.savefig("targets.jpg", dpi=200) | Plots histograms of object detection targets from 'targets.txt', saving the figure as 'targets.jpg'. Example: from utils.plots import *; plot_targets_txt() |
155,202 | import contextlib
import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from utils import TryExcept, threaded
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
The provided code snippet includes necessary dependencies for implementing the `plot_val_study` function. Write a Python function `def plot_val_study(file="", dir="", x=None)` to solve the following problem:
Plots validation study results from 'study*.txt' files in a directory or a specific file, comparing model performance and speed. Example: from utils.plots import *; plot_val_study()
Here is the function:
def plot_val_study(file="", dir="", x=None):
"""
Plots validation study results from 'study*.txt' files in a directory or a specific file, comparing model
performance and speed.
Example: from utils.plots import *; plot_val_study()
"""
save_dir = Path(file).parent if file else Path(dir)
plot2 = False # plot additional results
if plot2:
ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
# for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
for f in sorted(save_dir.glob("study*.txt")):
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
x = np.arange(y.shape[1]) if x is None else np.array(x)
if plot2:
s = ["P", "R", "mAP@.5", "mAP@.5:.95", "t_preprocess (ms/img)", "t_inference (ms/img)", "t_NMS (ms/img)"]
for i in range(7):
ax[i].plot(x, y[i], ".-", linewidth=2, markersize=8)
ax[i].set_title(s[i])
j = y[3].argmax() + 1
ax2.plot(
y[5, 1:j],
y[3, 1:j] * 1e2,
".-",
linewidth=2,
markersize=8,
label=f.stem.replace("study_coco_", "").replace("yolo", "YOLO"),
)
ax2.plot(
1e3 / np.array([209, 140, 97, 58, 35, 18]),
[34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
"k.-",
linewidth=2,
markersize=8,
alpha=0.25,
label="EfficientDet",
)
ax2.grid(alpha=0.2)
ax2.set_yticks(np.arange(20, 60, 5))
ax2.set_xlim(0, 57)
ax2.set_ylim(25, 55)
ax2.set_xlabel("GPU Speed (ms/img)")
ax2.set_ylabel("COCO AP val")
ax2.legend(loc="lower right")
f = save_dir / "study.png"
print(f"Saving {f}...")
plt.savefig(f, dpi=300) | Plots validation study results from 'study*.txt' files in a directory or a specific file, comparing model performance and speed. Example: from utils.plots import *; plot_val_study() |
155,203 | import contextlib
import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from utils import TryExcept, threaded
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
matplotlib.rc("font", **{"size": 11})
matplotlib.use("Agg")
def hist2d(x, y, n=100):
"""
Generates a logarithmic 2D histogram, useful for visualizing label or evolution distributions.
Used in used in labels.png and evolve.png.
"""
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
def fitness(x):
"""Calculates fitness of a model using weighted sum of metrics P, R, mAP@0.5, mAP@0.5:0.95."""
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
return (x[:, :4] * w).sum(1)
The provided code snippet includes necessary dependencies for implementing the `plot_evolve` function. Write a Python function `def plot_evolve(evolve_csv="path/to/evolve.csv")` to solve the following problem:
Plots hyperparameter evolution results from a given CSV, saving the plot and displaying best results. Example: from utils.plots import *; plot_evolve()
Here is the function:
def plot_evolve(evolve_csv="path/to/evolve.csv"):
"""
Plots hyperparameter evolution results from a given CSV, saving the plot and displaying best results.
Example: from utils.plots import *; plot_evolve()
"""
evolve_csv = Path(evolve_csv)
data = pd.read_csv(evolve_csv)
keys = [x.strip() for x in data.columns]
x = data.values
f = fitness(x)
j = np.argmax(f) # max fitness index
plt.figure(figsize=(10, 12), tight_layout=True)
matplotlib.rc("font", **{"size": 8})
print(f"Best results from row {j} of {evolve_csv}:")
for i, k in enumerate(keys[7:]):
v = x[:, 7 + i]
mu = v[j] # best single result
plt.subplot(6, 5, i + 1)
plt.scatter(v, f, c=hist2d(v, f, 20), cmap="viridis", alpha=0.8, edgecolors="none")
plt.plot(mu, f.max(), "k+", markersize=15)
plt.title(f"{k} = {mu:.3g}", fontdict={"size": 9}) # limit to 40 characters
if i % 5 != 0:
plt.yticks([])
print(f"{k:>15}: {mu:.3g}")
f = evolve_csv.with_suffix(".png") # filename
plt.savefig(f, dpi=200)
plt.close()
print(f"Saved {f}") | Plots hyperparameter evolution results from a given CSV, saving the plot and displaying best results. Example: from utils.plots import *; plot_evolve() |
155,204 | import contextlib
import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from utils import TryExcept, threaded
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
LOGGER = logging.getLogger(LOGGING_NAME)
The provided code snippet includes necessary dependencies for implementing the `plot_results` function. Write a Python function `def plot_results(file="path/to/results.csv", dir="")` to solve the following problem:
Plots training results from a 'results.csv' file; accepts file path and directory as arguments. Example: from utils.plots import *; plot_results('path/to/results.csv')
Here is the function:
def plot_results(file="path/to/results.csv", dir=""):
"""
Plots training results from a 'results.csv' file; accepts file path and directory as arguments.
Example: from utils.plots import *; plot_results('path/to/results.csv')
"""
save_dir = Path(file).parent if file else Path(dir)
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
ax = ax.ravel()
files = list(save_dir.glob("results*.csv"))
assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
for f in files:
try:
data = pd.read_csv(f)
s = [x.strip() for x in data.columns]
x = data.values[:, 0]
for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
y = data.values[:, j].astype("float")
# y[y == 0] = np.nan # don't show zero values
ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=8) # actual results
ax[i].plot(x, gaussian_filter1d(y, sigma=3), ":", label="smooth", linewidth=2) # smoothing line
ax[i].set_title(s[j], fontsize=12)
# if j in [8, 9, 10]: # share train and val loss y axes
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
except Exception as e:
LOGGER.info(f"Warning: Plotting error for {f}: {e}")
ax[1].legend()
fig.savefig(save_dir / "results.png", dpi=200)
plt.close() | Plots training results from a 'results.csv' file; accepts file path and directory as arguments. Example: from utils.plots import *; plot_results('path/to/results.csv') |
155,205 | import contextlib
import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from utils import TryExcept, threaded
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
The provided code snippet includes necessary dependencies for implementing the `profile_idetection` function. Write a Python function `def profile_idetection(start=0, stop=0, labels=(), save_dir="")` to solve the following problem:
Plots per-image iDetection logs, comparing metrics like storage and performance over time. Example: from utils.plots import *; profile_idetection()
Here is the function:
def profile_idetection(start=0, stop=0, labels=(), save_dir=""):
"""
Plots per-image iDetection logs, comparing metrics like storage and performance over time.
Example: from utils.plots import *; profile_idetection()
"""
ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
s = ["Images", "Free Storage (GB)", "RAM Usage (GB)", "Battery", "dt_raw (ms)", "dt_smooth (ms)", "real-world FPS"]
files = list(Path(save_dir).glob("frames*.txt"))
for fi, f in enumerate(files):
try:
results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
n = results.shape[1] # number of rows
x = np.arange(start, min(stop, n) if stop else n)
results = results[:, x]
t = results[0] - results[0].min() # set t0=0s
results[0] = x
for i, a in enumerate(ax):
if i < len(results):
label = labels[fi] if len(labels) else f.stem.replace("frames_", "")
a.plot(t, results[i], marker=".", label=label, linewidth=1, markersize=5)
a.set_title(s[i])
a.set_xlabel("time (s)")
# if fi == len(files) - 1:
# a.set_ylim(bottom=0)
for side in ["top", "right"]:
a.spines[side].set_visible(False)
else:
a.remove()
except Exception as e:
print(f"Warning: Plotting error for {f}; {e}")
ax[1].legend()
plt.savefig(Path(save_dir) / "idetection_profile.png", dpi=200) | Plots per-image iDetection logs, comparing metrics like storage and performance over time. Example: from utils.plots import *; profile_idetection() |
155,206 | import contextlib
import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter1d
from ultralytics.utils.plotting import Annotator
from utils import TryExcept, threaded
from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
def xyxy2xywh(x):
"""Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center
y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center
y[..., 2] = x[..., 2] - x[..., 0] # width
y[..., 3] = x[..., 3] - x[..., 1] # height
return y
def xywh2xyxy(x):
"""Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x
y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y
y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x
y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y
return y
def clip_boxes(boxes, shape):
"""Clips bounding box coordinates (xyxy) to fit within the specified image shape (height, width)."""
if isinstance(boxes, torch.Tensor): # faster individually
boxes[..., 0].clamp_(0, shape[1]) # x1
boxes[..., 1].clamp_(0, shape[0]) # y1
boxes[..., 2].clamp_(0, shape[1]) # x2
boxes[..., 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2
def increment_path(path, exist_ok=False, sep="", mkdir=False):
"""
Generates an incremented file or directory path if it exists, with optional mkdir; args: path, exist_ok=False,
sep="", mkdir=False.
Example: runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc
"""
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
path, suffix = (path.with_suffix(""), path.suffix) if path.is_file() else (path, "")
# Method 1
for n in range(2, 9999):
p = f"{path}{sep}{n}{suffix}" # increment path
if not os.path.exists(p): #
break
path = Path(p)
# Method 2 (deprecated)
# dirs = glob.glob(f"{path}{sep}*") # similar paths
# matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs]
# i = [int(m.groups()[0]) for m in matches if m] # indices
# n = max(i) + 1 if i else 2 # increment number
# path = Path(f"{path}{sep}{n}{suffix}") # increment path
if mkdir:
path.mkdir(parents=True, exist_ok=True) # make directory
return path
The provided code snippet includes necessary dependencies for implementing the `save_one_box` function. Write a Python function `def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False, BGR=False, save=True)` to solve the following problem:
Crops and saves an image from bounding box `xyxy`, applied with `gain` and `pad`, optionally squares and adjusts for BGR.
Here is the function:
def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False, BGR=False, save=True):
"""Crops and saves an image from bounding box `xyxy`, applied with `gain` and `pad`, optionally squares and adjusts
for BGR.
"""
xyxy = torch.tensor(xyxy).view(-1, 4)
b = xyxy2xywh(xyxy) # boxes
if square:
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
xyxy = xywh2xyxy(b).long()
clip_boxes(xyxy, im.shape)
crop = im[int(xyxy[0, 1]) : int(xyxy[0, 3]), int(xyxy[0, 0]) : int(xyxy[0, 2]), :: (1 if BGR else -1)]
if save:
file.parent.mkdir(parents=True, exist_ok=True) # make directory
f = str(increment_path(file).with_suffix(".jpg"))
# cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue
Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB
return crop | Crops and saves an image from bounding box `xyxy`, applied with `gain` and `pad`, optionally squares and adjusts for BGR. |
155,207 | import contextlib
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torchvision
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import (
Albumentations,
augment_hsv,
classify_albumentations,
classify_transforms,
copy_paste,
letterbox,
mixup,
random_perspective,
)
from utils.general import (
DATASETS_DIR,
LOGGER,
NUM_THREADS,
TQDM_BAR_FORMAT,
check_dataset,
check_requirements,
check_yaml,
clean_str,
cv2,
is_colab,
is_kaggle,
segments2boxes,
unzip_file,
xyn2xy,
xywh2xyxy,
xywhn2xyxy,
xyxy2xywhn,
)
from utils.torch_utils import torch_distributed_zero_first
The provided code snippet includes necessary dependencies for implementing the `get_hash` function. Write a Python function `def get_hash(paths)` to solve the following problem:
Generates a single SHA256 hash for a list of file or directory paths by combining their sizes and paths.
Here is the function:
def get_hash(paths):
"""Generates a single SHA256 hash for a list of file or directory paths by combining their sizes and paths."""
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.sha256(str(size).encode()) # hash sizes
h.update("".join(paths).encode()) # hash paths
return h.hexdigest() # return hash | Generates a single SHA256 hash for a list of file or directory paths by combining their sizes and paths. |
155,208 | import contextlib
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torchvision
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import (
Albumentations,
augment_hsv,
classify_albumentations,
classify_transforms,
copy_paste,
letterbox,
mixup,
random_perspective,
)
from utils.general import (
DATASETS_DIR,
LOGGER,
NUM_THREADS,
TQDM_BAR_FORMAT,
check_dataset,
check_requirements,
check_yaml,
clean_str,
cv2,
is_colab,
is_kaggle,
segments2boxes,
unzip_file,
xyn2xy,
xywh2xyxy,
xywhn2xyxy,
xyxy2xywhn,
)
from utils.torch_utils import torch_distributed_zero_first
DATASETS_DIR = Path(os.getenv("YOLOv5_DATASETS_DIR", ROOT.parent / "datasets"))
The provided code snippet includes necessary dependencies for implementing the `flatten_recursive` function. Write a Python function `def flatten_recursive(path=DATASETS_DIR / "coco128")` to solve the following problem:
Flattens a directory by copying all files from subdirectories to a new top-level directory, preserving filenames.
Here is the function:
def flatten_recursive(path=DATASETS_DIR / "coco128"):
"""Flattens a directory by copying all files from subdirectories to a new top-level directory, preserving
filenames.
"""
new_path = Path(f"{str(path)}_flat")
if os.path.exists(new_path):
shutil.rmtree(new_path) # delete output folder
os.makedirs(new_path) # make new output folder
for file in tqdm(glob.glob(f"{str(Path(path))}/**/*.*", recursive=True)):
shutil.copyfile(file, new_path / Path(file).name) | Flattens a directory by copying all files from subdirectories to a new top-level directory, preserving filenames. |
155,209 | import contextlib
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torchvision
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import (
Albumentations,
augment_hsv,
classify_albumentations,
classify_transforms,
copy_paste,
letterbox,
mixup,
random_perspective,
)
from utils.general import (
DATASETS_DIR,
LOGGER,
NUM_THREADS,
TQDM_BAR_FORMAT,
check_dataset,
check_requirements,
check_yaml,
clean_str,
cv2,
is_colab,
is_kaggle,
segments2boxes,
unzip_file,
xyn2xy,
xywh2xyxy,
xywhn2xyxy,
xyxy2xywhn,
)
from utils.torch_utils import torch_distributed_zero_first
IMG_FORMATS = "bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm"
def img2label_paths(img_paths):
"""Generates label file paths from corresponding image file paths by replacing `/images/` with `/labels/` and
extension with `.txt`.
"""
sa, sb = f"{os.sep}images{os.sep}", f"{os.sep}labels{os.sep}" # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit(".", 1)[0] + ".txt" for x in img_paths]
import cv2
DATASETS_DIR = Path(os.getenv("YOLOv5_DATASETS_DIR", ROOT.parent / "datasets"))
cv2.setNumThreads(0)
def xywh2xyxy(x):
"""Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x
y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y
y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x
y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y
return y
The provided code snippet includes necessary dependencies for implementing the `extract_boxes` function. Write a Python function `def extract_boxes(path=DATASETS_DIR / "coco128")` to solve the following problem:
Converts a detection dataset to a classification dataset, creating a directory for each class and extracting bounding boxes. Example: from utils.dataloaders import *; extract_boxes()
Here is the function:
def extract_boxes(path=DATASETS_DIR / "coco128"):
"""
Converts a detection dataset to a classification dataset, creating a directory for each class and extracting
bounding boxes.
Example: from utils.dataloaders import *; extract_boxes()
"""
path = Path(path) # images dir
shutil.rmtree(path / "classification") if (path / "classification").is_dir() else None # remove existing
files = list(path.rglob("*.*"))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / "classification") / f"{c}" / f"{path.stem}_{im_file.stem}_{j}.jpg" # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1] : b[3], b[0] : b[2]]), f"box failure in {f}" | Converts a detection dataset to a classification dataset, creating a directory for each class and extracting bounding boxes. Example: from utils.dataloaders import *; extract_boxes() |
155,210 | import contextlib
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torchvision
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import (
Albumentations,
augment_hsv,
classify_albumentations,
classify_transforms,
copy_paste,
letterbox,
mixup,
random_perspective,
)
from utils.general import (
DATASETS_DIR,
LOGGER,
NUM_THREADS,
TQDM_BAR_FORMAT,
check_dataset,
check_requirements,
check_yaml,
clean_str,
cv2,
is_colab,
is_kaggle,
segments2boxes,
unzip_file,
xyn2xy,
xywh2xyxy,
xywhn2xyxy,
xyxy2xywhn,
)
from utils.torch_utils import torch_distributed_zero_first
IMG_FORMATS = "bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm"
def img2label_paths(img_paths):
"""Generates label file paths from corresponding image file paths by replacing `/images/` with `/labels/` and
extension with `.txt`.
"""
sa, sb = f"{os.sep}images{os.sep}", f"{os.sep}labels{os.sep}" # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit(".", 1)[0] + ".txt" for x in img_paths]
DATASETS_DIR = Path(os.getenv("YOLOv5_DATASETS_DIR", ROOT.parent / "datasets"))
The provided code snippet includes necessary dependencies for implementing the `autosplit` function. Write a Python function `def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0.0), annotated_only=False)` to solve the following problem:
Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.dataloaders import *; autosplit() Arguments path: Path to images directory weights: Train, val, test weights (list, tuple) annotated_only: Only use images with an annotated txt file
Here is the function:
def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0.0), annotated_only=False):
"""Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.dataloaders import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ["autosplit_train.txt", "autosplit_val.txt", "autosplit_test.txt"] # 3 txt files
for x in txt:
if (path.parent / x).exists():
(path.parent / x).unlink() # remove existing
print(f"Autosplitting images from {path}" + ", using *.txt labeled images only" * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], "a") as f:
f.write(f"./{img.relative_to(path.parent).as_posix()}" + "\n") # add image to txt file | Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.dataloaders import *; autosplit() Arguments path: Path to images directory weights: Train, val, test weights (list, tuple) annotated_only: Only use images with an annotated txt file |
155,211 | import contextlib
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torchvision
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import (
Albumentations,
augment_hsv,
classify_albumentations,
classify_transforms,
copy_paste,
letterbox,
mixup,
random_perspective,
)
from utils.general import (
DATASETS_DIR,
LOGGER,
NUM_THREADS,
TQDM_BAR_FORMAT,
check_dataset,
check_requirements,
check_yaml,
clean_str,
cv2,
is_colab,
is_kaggle,
segments2boxes,
unzip_file,
xyn2xy,
xywh2xyxy,
xywhn2xyxy,
xyxy2xywhn,
)
from utils.torch_utils import torch_distributed_zero_first
IMG_FORMATS = "bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm"
def exif_size(img):
"""Returns corrected PIL image size (width, height) considering EXIF orientation."""
s = img.size # (width, height)
with contextlib.suppress(Exception):
rotation = dict(img._getexif().items())[orientation]
if rotation in [6, 8]: # rotation 270 or 90
s = (s[1], s[0])
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def segments2boxes(segments):
"""Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)."""
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
The provided code snippet includes necessary dependencies for implementing the `verify_image_label` function. Write a Python function `def verify_image_label(args)` to solve the following problem:
Verifies a single image-label pair, ensuring image format, size, and legal label values.
Here is the function:
def verify_image_label(args):
"""Verifies a single image-label pair, ensuring image format, size, and legal label values."""
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, "", [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels"
assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}"
if im.format.lower() in ("jpg", "jpeg"):
with open(im_file, "rb") as f:
f.seek(-2, 2)
if f.read() != b"\xff\xd9": # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, "JPEG", subsampling=0, quality=100)
msg = f"{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved"
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any(len(x) > 6 for x in lb): # is segment
classes = np.array([x[0] for x in lb], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
nl = len(lb)
if nl:
assert lb.shape[1] == 5, f"labels require 5 columns, {lb.shape[1]} columns detected"
assert (lb >= 0).all(), f"negative label values {lb[lb < 0]}"
assert (lb[:, 1:] <= 1).all(), f"non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}"
_, i = np.unique(lb, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
segments = [segments[x] for x in i]
msg = f"{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed"
else:
ne = 1 # label empty
lb = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
lb = np.zeros((0, 5), dtype=np.float32)
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f"{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}"
return [None, None, None, None, nm, nf, ne, nc, msg] | Verifies a single image-label pair, ensuring image format, size, and legal label values. |
155,212 | import glob
import re
from pathlib import Path
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import yaml
from ultralytics.utils.plotting import Annotator, colors
The provided code snippet includes necessary dependencies for implementing the `construct_dataset` function. Write a Python function `def construct_dataset(clearml_info_string)` to solve the following problem:
Load in a clearml dataset and fill the internal data_dict with its contents.
Here is the function:
def construct_dataset(clearml_info_string):
"""Load in a clearml dataset and fill the internal data_dict with its contents."""
dataset_id = clearml_info_string.replace("clearml://", "")
dataset = Dataset.get(dataset_id=dataset_id)
dataset_root_path = Path(dataset.get_local_copy())
# We'll search for the yaml file definition in the dataset
yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml")))
if len(yaml_filenames) > 1:
raise ValueError(
"More than one yaml file was found in the dataset root, cannot determine which one contains "
"the dataset definition this way."
)
elif not yaml_filenames:
raise ValueError(
"No yaml definition found in dataset root path, check that there is a correct yaml file "
"inside the dataset root path."
)
with open(yaml_filenames[0]) as f:
dataset_definition = yaml.safe_load(f)
assert set(
dataset_definition.keys()
).issuperset(
{"train", "test", "val", "nc", "names"}
), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')"
data_dict = {}
data_dict["train"] = (
str((dataset_root_path / dataset_definition["train"]).resolve()) if dataset_definition["train"] else None
)
data_dict["test"] = (
str((dataset_root_path / dataset_definition["test"]).resolve()) if dataset_definition["test"] else None
)
data_dict["val"] = (
str((dataset_root_path / dataset_definition["val"]).resolve()) if dataset_definition["val"] else None
)
data_dict["nc"] = dataset_definition["nc"]
data_dict["names"] = dataset_definition["names"]
return data_dict | Load in a clearml dataset and fill the internal data_dict with its contents. |
155,213 | import argparse
import json
import logging
import os
import sys
from pathlib import Path
import comet_ml
ROOT = FILE.parents[3]
from train import train
from utils.callbacks import Callbacks
from utils.general import increment_path
from utils.torch_utils import select_device
The provided code snippet includes necessary dependencies for implementing the `get_args` function. Write a Python function `def get_args(known=False)` to solve the following problem:
Parses command-line arguments for YOLOv5 training, supporting configuration of weights, data paths, hyperparameters, and more.
Here is the function:
def get_args(known=False):
"""Parses command-line arguments for YOLOv5 training, supporting configuration of weights, data paths,
hyperparameters, and more.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path")
parser.add_argument("--cfg", type=str, default="", help="model.yaml path")
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
parser.add_argument("--hyp", type=str, default=ROOT / "data/hyps/hyp.scratch-low.yaml", help="hyperparameters path")
parser.add_argument("--epochs", type=int, default=300, help="total training epochs")
parser.add_argument("--batch-size", type=int, default=16, help="total batch size for all GPUs, -1 for autobatch")
parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="train, val image size (pixels)")
parser.add_argument("--rect", action="store_true", help="rectangular training")
parser.add_argument("--resume", nargs="?", const=True, default=False, help="resume most recent training")
parser.add_argument("--nosave", action="store_true", help="only save final checkpoint")
parser.add_argument("--noval", action="store_true", help="only validate final epoch")
parser.add_argument("--noautoanchor", action="store_true", help="disable AutoAnchor")
parser.add_argument("--noplots", action="store_true", help="save no plot files")
parser.add_argument("--evolve", type=int, nargs="?", const=300, help="evolve hyperparameters for x generations")
parser.add_argument("--bucket", type=str, default="", help="gsutil bucket")
parser.add_argument("--cache", type=str, nargs="?", const="ram", help='--cache images in "ram" (default) or "disk"')
parser.add_argument("--image-weights", action="store_true", help="use weighted image selection for training")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--multi-scale", action="store_true", help="vary img-size +/- 50%%")
parser.add_argument("--single-cls", action="store_true", help="train multi-class data as single-class")
parser.add_argument("--optimizer", type=str, choices=["SGD", "Adam", "AdamW"], default="SGD", help="optimizer")
parser.add_argument("--sync-bn", action="store_true", help="use SyncBatchNorm, only available in DDP mode")
parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)")
parser.add_argument("--project", default=ROOT / "runs/train", help="save to project/name")
parser.add_argument("--name", default="exp", help="save to project/name")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--quad", action="store_true", help="quad dataloader")
parser.add_argument("--cos-lr", action="store_true", help="cosine LR scheduler")
parser.add_argument("--label-smoothing", type=float, default=0.0, help="Label smoothing epsilon")
parser.add_argument("--patience", type=int, default=100, help="EarlyStopping patience (epochs without improvement)")
parser.add_argument("--freeze", nargs="+", type=int, default=[0], help="Freeze layers: backbone=10, first3=0 1 2")
parser.add_argument("--save-period", type=int, default=-1, help="Save checkpoint every x epochs (disabled if < 1)")
parser.add_argument("--seed", type=int, default=0, help="Global training seed")
parser.add_argument("--local_rank", type=int, default=-1, help="Automatic DDP Multi-GPU argument, do not modify")
# Weights & Biases arguments
parser.add_argument("--entity", default=None, help="W&B: Entity")
parser.add_argument("--upload_dataset", nargs="?", const=True, default=False, help='W&B: Upload data, "val" option')
parser.add_argument("--bbox_interval", type=int, default=-1, help="W&B: Set bounding-box image logging interval")
parser.add_argument("--artifact_alias", type=str, default="latest", help="W&B: Version of dataset artifact to use")
# Comet Arguments
parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.")
parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.")
parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.")
parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.")
parser.add_argument(
"--comet_optimizer_workers",
type=int,
default=1,
help="Comet: Number of Parallel Workers to use with the Comet Optimizer.",
)
return parser.parse_known_args()[0] if known else parser.parse_args() | Parses command-line arguments for YOLOv5 training, supporting configuration of weights, data paths, hyperparameters, and more. |
155,214 | import argparse
import json
import logging
import os
import sys
from pathlib import Path
import comet_ml
from train import train
from utils.callbacks import Callbacks
from utils.general import increment_path
from utils.torch_utils import select_device
def train(hyp, opt, device, callbacks):
"""
Trains YOLOv5 model with given hyperparameters, options, and device, managing datasets, model architecture, loss
computation, and optimizer steps.
`hyp` argument is path/to/hyp.yaml or hyp dictionary.
"""
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = (
Path(opt.save_dir),
opt.epochs,
opt.batch_size,
opt.weights,
opt.single_cls,
opt.evolve,
opt.data,
opt.cfg,
opt.resume,
opt.noval,
opt.nosave,
opt.workers,
opt.freeze,
)
callbacks.run("on_pretrain_routine_start")
# Directories
w = save_dir / "weights" # weights dir
(w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
last, best = w / "last.pt", w / "best.pt"
# Hyperparameters
if isinstance(hyp, str):
with open(hyp, errors="ignore") as f:
hyp = yaml.safe_load(f) # load hyps dict
LOGGER.info(colorstr("hyperparameters: ") + ", ".join(f"{k}={v}" for k, v in hyp.items()))
opt.hyp = hyp.copy() # for saving hyps to checkpoints
# Save run settings
if not evolve:
yaml_save(save_dir / "hyp.yaml", hyp)
yaml_save(save_dir / "opt.yaml", vars(opt))
# Loggers
data_dict = None
if RANK in {-1, 0}:
include_loggers = list(LOGGERS)
if getattr(opt, "ndjson_console", False):
include_loggers.append("ndjson_console")
if getattr(opt, "ndjson_file", False):
include_loggers.append("ndjson_file")
loggers = Loggers(
save_dir=save_dir,
weights=weights,
opt=opt,
hyp=hyp,
logger=LOGGER,
include=tuple(include_loggers),
)
# Register actions
for k in methods(loggers):
callbacks.register_action(k, callback=getattr(loggers, k))
# Process custom dataset artifact link
data_dict = loggers.remote_dataset
if resume: # If resuming runs from remote artifact
weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
# Config
plots = not evolve and not opt.noplots # create plots
cuda = device.type != "cpu"
init_seeds(opt.seed + 1 + RANK, deterministic=True)
with torch_distributed_zero_first(LOCAL_RANK):
data_dict = data_dict or check_dataset(data) # check if None
train_path, val_path = data_dict["train"], data_dict["val"]
nc = 1 if single_cls else int(data_dict["nc"]) # number of classes
names = {0: "item"} if single_cls and len(data_dict["names"]) != 1 else data_dict["names"] # class names
is_coco = isinstance(val_path, str) and val_path.endswith("coco/val2017.txt") # COCO dataset
# Model
check_suffix(weights, ".pt") # check weights
pretrained = weights.endswith(".pt")
if pretrained:
with torch_distributed_zero_first(LOCAL_RANK):
weights = attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location="cpu") # load checkpoint to CPU to avoid CUDA memory leak
model = Model(cfg or ckpt["model"].yaml, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device) # create
exclude = ["anchor"] if (cfg or hyp.get("anchors")) and not resume else [] # exclude keys
csd = ckpt["model"].float().state_dict() # checkpoint state_dict as FP32
csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(csd, strict=False) # load
LOGGER.info(f"Transferred {len(csd)}/{len(model.state_dict())} items from {weights}") # report
else:
model = Model(cfg, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device) # create
amp = check_amp(model) # check AMP
# Freeze
freeze = [f"model.{x}." for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
# v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
if any(x in k for x in freeze):
LOGGER.info(f"freezing {k}")
v.requires_grad = False
# Image size
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
# Batch size
if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
batch_size = check_train_batch_size(model, imgsz, amp)
loggers.on_params_update({"batch_size": batch_size})
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
hyp["weight_decay"] *= batch_size * accumulate / nbs # scale weight_decay
optimizer = smart_optimizer(model, opt.optimizer, hyp["lr0"], hyp["momentum"], hyp["weight_decay"])
# Scheduler
if opt.cos_lr:
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
else:
lf = lambda x: (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if RANK in {-1, 0} else None
# Resume
best_fitness, start_epoch = 0.0, 0
if pretrained:
if resume:
best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
del ckpt, csd
# DP mode
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
LOGGER.warning(
"WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n"
"See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started."
)
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and RANK != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
LOGGER.info("Using SyncBatchNorm()")
# Trainloader
train_loader, dataset = create_dataloader(
train_path,
imgsz,
batch_size // WORLD_SIZE,
gs,
single_cls,
hyp=hyp,
augment=True,
cache=None if opt.cache == "val" else opt.cache,
rect=opt.rect,
rank=LOCAL_RANK,
workers=workers,
image_weights=opt.image_weights,
quad=opt.quad,
prefix=colorstr("train: "),
shuffle=True,
seed=opt.seed,
)
labels = np.concatenate(dataset.labels, 0)
mlc = int(labels[:, 0].max()) # max label class
assert mlc < nc, f"Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}"
# Process 0
if RANK in {-1, 0}:
val_loader = create_dataloader(
val_path,
imgsz,
batch_size // WORLD_SIZE * 2,
gs,
single_cls,
hyp=hyp,
cache=None if noval else opt.cache,
rect=True,
rank=-1,
workers=workers * 2,
pad=0.5,
prefix=colorstr("val: "),
)[0]
if not resume:
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz) # run AutoAnchor
model.half().float() # pre-reduce anchor precision
callbacks.run("on_pretrain_routine_end", labels, names)
# DDP mode
if cuda and RANK != -1:
model = smart_DDP(model)
# Model attributes
nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
hyp["box"] *= 3 / nl # scale to layers
hyp["cls"] *= nc / 80 * 3 / nl # scale to classes and layers
hyp["obj"] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
hyp["label_smoothing"] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nb = len(train_loader) # number of batches
nw = max(round(hyp["warmup_epochs"] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
last_opt_step = -1
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = torch.cuda.amp.GradScaler(enabled=amp)
stopper, stop = EarlyStopping(patience=opt.patience), False
compute_loss = ComputeLoss(model) # init loss class
callbacks.run("on_train_start")
LOGGER.info(
f'Image sizes {imgsz} train, {imgsz} val\n'
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
f"Logging results to {colorstr('bold', save_dir)}\n"
f'Starting training for {epochs} epochs...'
)
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
callbacks.run("on_train_epoch_start")
model.train()
# Update image weights (optional, single-GPU only)
if opt.image_weights:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Update mosaic border (optional)
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(3, device=device) # mean losses
if RANK != -1:
train_loader.sampler.set_epoch(epoch)
pbar = enumerate(train_loader)
LOGGER.info(("\n" + "%11s" * 7) % ("Epoch", "GPU_mem", "box_loss", "obj_loss", "cls_loss", "Instances", "Size"))
if RANK in {-1, 0}:
pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
callbacks.run("on_train_batch_start")
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(ni, xi, [hyp["warmup_bias_lr"] if j == 0 else 0.0, x["initial_lr"] * lf(epoch)])
if "momentum" in x:
x["momentum"] = np.interp(ni, xi, [hyp["warmup_momentum"], hyp["momentum"]])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
# Forward
with torch.cuda.amp.autocast(amp):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if RANK != -1:
loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.0
# Backward
scaler.scale(loss).backward()
# Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
if ni - last_opt_step >= accumulate:
scaler.unscale_(optimizer) # unscale gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
last_opt_step = ni
# Log
if RANK in {-1, 0}:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
pbar.set_description(
("%11s" * 2 + "%11.4g" * 5)
% (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1])
)
callbacks.run("on_train_batch_end", model, ni, imgs, targets, paths, list(mloss))
if callbacks.stop_training:
return
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
lr = [x["lr"] for x in optimizer.param_groups] # for loggers
scheduler.step()
if RANK in {-1, 0}:
# mAP
callbacks.run("on_train_epoch_end", epoch=epoch)
ema.update_attr(model, include=["yaml", "nc", "hyp", "names", "stride", "class_weights"])
final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
if not noval or final_epoch: # Calculate mAP
results, maps, _ = validate.run(
data_dict,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
half=amp,
model=ema.ema,
single_cls=single_cls,
dataloader=val_loader,
save_dir=save_dir,
plots=False,
callbacks=callbacks,
compute_loss=compute_loss,
)
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
stop = stopper(epoch=epoch, fitness=fi) # early stop check
if fi > best_fitness:
best_fitness = fi
log_vals = list(mloss) + list(results) + lr
callbacks.run("on_fit_epoch_end", log_vals, epoch, best_fitness, fi)
# Save model
if (not nosave) or (final_epoch and not evolve): # if save
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"model": deepcopy(de_parallel(model)).half(),
"ema": deepcopy(ema.ema).half(),
"updates": ema.updates,
"optimizer": optimizer.state_dict(),
"opt": vars(opt),
"git": GIT_INFO, # {remote, branch, commit} if a git repo
"date": datetime.now().isoformat(),
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
if opt.save_period > 0 and epoch % opt.save_period == 0:
torch.save(ckpt, w / f"epoch{epoch}.pt")
del ckpt
callbacks.run("on_model_save", last, epoch, final_epoch, best_fitness, fi)
# EarlyStopping
if RANK != -1: # if DDP training
broadcast_list = [stop if RANK == 0 else None]
dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
if RANK != 0:
stop = broadcast_list[0]
if stop:
break # must break all DDP ranks
# end epoch ----------------------------------------------------------------------------------------------------
# end training -----------------------------------------------------------------------------------------------------
if RANK in {-1, 0}:
LOGGER.info(f"\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.")
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if f is best:
LOGGER.info(f"\nValidating {f}...")
results, _, _ = validate.run(
data_dict,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
model=attempt_load(f, device).half(),
iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65
single_cls=single_cls,
dataloader=val_loader,
save_dir=save_dir,
save_json=is_coco,
verbose=True,
plots=plots,
callbacks=callbacks,
compute_loss=compute_loss,
) # val best model with plots
if is_coco:
callbacks.run("on_fit_epoch_end", list(mloss) + list(results) + lr, epoch, best_fitness, fi)
callbacks.run("on_train_end", last, best, epoch, results)
torch.cuda.empty_cache()
return results
class Callbacks:
"""Handles all registered callbacks for YOLOv5 Hooks."""
def __init__(self):
"""Initializes a Callbacks object to manage registered YOLOv5 training event hooks."""
self._callbacks = {
"on_pretrain_routine_start": [],
"on_pretrain_routine_end": [],
"on_train_start": [],
"on_train_epoch_start": [],
"on_train_batch_start": [],
"optimizer_step": [],
"on_before_zero_grad": [],
"on_train_batch_end": [],
"on_train_epoch_end": [],
"on_val_start": [],
"on_val_batch_start": [],
"on_val_image_end": [],
"on_val_batch_end": [],
"on_val_end": [],
"on_fit_epoch_end": [], # fit = train + val
"on_model_save": [],
"on_train_end": [],
"on_params_update": [],
"teardown": [],
}
self.stop_training = False # set True to interrupt training
def register_action(self, hook, name="", callback=None):
"""
Register a new action to a callback hook.
Args:
hook: The callback hook name to register the action to
name: The name of the action for later reference
callback: The callback to fire
"""
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
assert callable(callback), f"callback '{callback}' is not callable"
self._callbacks[hook].append({"name": name, "callback": callback})
def get_registered_actions(self, hook=None):
"""
Returns all the registered actions by callback hook.
Args:
hook: The name of the hook to check, defaults to all
"""
return self._callbacks[hook] if hook else self._callbacks
def run(self, hook, *args, thread=False, **kwargs):
"""
Loop through the registered actions and fire all callbacks on main thread.
Args:
hook: The name of the hook to check, defaults to all
args: Arguments to receive from YOLOv5
thread: (boolean) Run callbacks in daemon thread
kwargs: Keyword Arguments to receive from YOLOv5
"""
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
for logger in self._callbacks[hook]:
if thread:
threading.Thread(target=logger["callback"], args=args, kwargs=kwargs, daemon=True).start()
else:
logger["callback"](*args, **kwargs)
def increment_path(path, exist_ok=False, sep="", mkdir=False):
"""
Generates an incremented file or directory path if it exists, with optional mkdir; args: path, exist_ok=False,
sep="", mkdir=False.
Example: runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc
"""
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
path, suffix = (path.with_suffix(""), path.suffix) if path.is_file() else (path, "")
# Method 1
for n in range(2, 9999):
p = f"{path}{sep}{n}{suffix}" # increment path
if not os.path.exists(p): #
break
path = Path(p)
# Method 2 (deprecated)
# dirs = glob.glob(f"{path}{sep}*") # similar paths
# matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs]
# i = [int(m.groups()[0]) for m in matches if m] # indices
# n = max(i) + 1 if i else 2 # increment number
# path = Path(f"{path}{sep}{n}{suffix}") # increment path
if mkdir:
path.mkdir(parents=True, exist_ok=True) # make directory
return path
def select_device(device="", batch_size=0, newline=True):
"""Selects computing device (CPU, CUDA GPU, MPS) for YOLOv5 model deployment, logging device info."""
s = f"YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} "
device = str(device).strip().lower().replace("cuda:", "").replace("none", "") # to string, 'cuda:0' to '0'
cpu = device == "cpu"
mps = device == "mps" # Apple Metal Performance Shaders (MPS)
if cpu or mps:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable - must be before assert is_available()
assert torch.cuda.is_available() and torch.cuda.device_count() >= len(
device.replace(",", "")
), f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)"
if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available
devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7
n = len(devices) # device count
if n > 1 and batch_size > 0: # check batch_size is divisible by device_count
assert batch_size % n == 0, f"batch-size {batch_size} not multiple of GPU count {n}"
space = " " * (len(s) + 1)
for i, d in enumerate(devices):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB
arg = "cuda:0"
elif mps and getattr(torch, "has_mps", False) and torch.backends.mps.is_available(): # prefer MPS if available
s += "MPS\n"
arg = "mps"
else: # revert to CPU
s += "CPU\n"
arg = "cpu"
if not newline:
s = s.rstrip()
LOGGER.info(s)
return torch.device(arg)
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run(parameters, opt)` to solve the following problem:
Executes YOLOv5 training with given hyperparameters and options, setting up device and training directories.
Here is the function:
def run(parameters, opt):
"""Executes YOLOv5 training with given hyperparameters and options, setting up device and training directories."""
hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]}
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
opt.batch_size = parameters.get("batch_size")
opt.epochs = parameters.get("epochs")
device = select_device(opt.device, batch_size=opt.batch_size)
train(hyp_dict, opt, device, callbacks=Callbacks()) | Executes YOLOv5 training with given hyperparameters and options, setting up device and training directories. |
155,215 | import logging
import os
from urllib.parse import urlparse
try:
import comet_ml
except ImportError:
comet_ml = None
import yaml
COMET_PREFIX = "comet://"
def download_model_checkpoint(opt, experiment):
"""Downloads YOLOv5 model checkpoint from Comet ML experiment, updating `opt.weights` with download path."""
model_dir = f"{opt.project}/{experiment.name}"
os.makedirs(model_dir, exist_ok=True)
model_name = COMET_MODEL_NAME
model_asset_list = experiment.get_model_asset_list(model_name)
if len(model_asset_list) == 0:
logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}")
return
model_asset_list = sorted(
model_asset_list,
key=lambda x: x["step"],
reverse=True,
)
logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list}
resource_url = urlparse(opt.weights)
checkpoint_filename = resource_url.query
if checkpoint_filename:
asset_id = logged_checkpoint_map.get(checkpoint_filename)
else:
asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)
checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME
if asset_id is None:
logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment")
return
try:
logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}")
asset_filename = checkpoint_filename
model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False)
model_download_path = f"{model_dir}/{asset_filename}"
with open(model_download_path, "wb") as f:
f.write(model_binary)
opt.weights = model_download_path
except Exception as e:
logger.warning("COMET WARNING: Unable to download checkpoint from Comet")
logger.exception(e)
try:
import comet_ml
# Project Configuration
config = comet_ml.config.get_config()
COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5")
except ImportError:
comet_ml = None
COMET_PROJECT_NAME = None
The provided code snippet includes necessary dependencies for implementing the `check_comet_weights` function. Write a Python function `def check_comet_weights(opt)` to solve the following problem:
Downloads model weights from Comet and updates the weights path to point to saved weights location. Args: opt (argparse.Namespace): Command Line arguments passed to YOLOv5 training script Returns: None/bool: Return True if weights are successfully downloaded else return None
Here is the function:
def check_comet_weights(opt):
"""
Downloads model weights from Comet and updates the weights path to point to saved weights location.
Args:
opt (argparse.Namespace): Command Line arguments passed
to YOLOv5 training script
Returns:
None/bool: Return True if weights are successfully downloaded
else return None
"""
if comet_ml is None:
return
if isinstance(opt.weights, str) and opt.weights.startswith(COMET_PREFIX):
api = comet_ml.API()
resource = urlparse(opt.weights)
experiment_path = f"{resource.netloc}{resource.path}"
experiment = api.get(experiment_path)
download_model_checkpoint(opt, experiment)
return True
return None | Downloads model weights from Comet and updates the weights path to point to saved weights location. Args: opt (argparse.Namespace): Command Line arguments passed to YOLOv5 training script Returns: None/bool: Return True if weights are successfully downloaded else return None |
155,216 | import logging
import os
from urllib.parse import urlparse
try:
import comet_ml
except ImportError:
comet_ml = None
import yaml
COMET_PREFIX = "comet://"
def download_model_checkpoint(opt, experiment):
"""Downloads YOLOv5 model checkpoint from Comet ML experiment, updating `opt.weights` with download path."""
model_dir = f"{opt.project}/{experiment.name}"
os.makedirs(model_dir, exist_ok=True)
model_name = COMET_MODEL_NAME
model_asset_list = experiment.get_model_asset_list(model_name)
if len(model_asset_list) == 0:
logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}")
return
model_asset_list = sorted(
model_asset_list,
key=lambda x: x["step"],
reverse=True,
)
logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list}
resource_url = urlparse(opt.weights)
checkpoint_filename = resource_url.query
if checkpoint_filename:
asset_id = logged_checkpoint_map.get(checkpoint_filename)
else:
asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)
checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME
if asset_id is None:
logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment")
return
try:
logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}")
asset_filename = checkpoint_filename
model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False)
model_download_path = f"{model_dir}/{asset_filename}"
with open(model_download_path, "wb") as f:
f.write(model_binary)
opt.weights = model_download_path
except Exception as e:
logger.warning("COMET WARNING: Unable to download checkpoint from Comet")
logger.exception(e)
def set_opt_parameters(opt, experiment):
"""
Update the opts Namespace with parameters from Comet's ExistingExperiment when resuming a run.
Args:
opt (argparse.Namespace): Namespace of command line options
experiment (comet_ml.APIExperiment): Comet API Experiment object
"""
asset_list = experiment.get_asset_list()
resume_string = opt.resume
for asset in asset_list:
if asset["fileName"] == "opt.yaml":
asset_id = asset["assetId"]
asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False)
opt_dict = yaml.safe_load(asset_binary)
for key, value in opt_dict.items():
setattr(opt, key, value)
opt.resume = resume_string
# Save hyperparameters to YAML file
# Necessary to pass checks in training script
save_dir = f"{opt.project}/{experiment.name}"
os.makedirs(save_dir, exist_ok=True)
hyp_yaml_path = f"{save_dir}/hyp.yaml"
with open(hyp_yaml_path, "w") as f:
yaml.dump(opt.hyp, f)
opt.hyp = hyp_yaml_path
try:
import comet_ml
# Project Configuration
config = comet_ml.config.get_config()
COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5")
except ImportError:
comet_ml = None
COMET_PROJECT_NAME = None
The provided code snippet includes necessary dependencies for implementing the `check_comet_resume` function. Write a Python function `def check_comet_resume(opt)` to solve the following problem:
Restores run parameters to its original state based on the model checkpoint and logged Experiment parameters. Args: opt (argparse.Namespace): Command Line arguments passed to YOLOv5 training script Returns: None/bool: Return True if the run is restored successfully else return None
Here is the function:
def check_comet_resume(opt):
"""
Restores run parameters to its original state based on the model checkpoint and logged Experiment parameters.
Args:
opt (argparse.Namespace): Command Line arguments passed
to YOLOv5 training script
Returns:
None/bool: Return True if the run is restored successfully
else return None
"""
if comet_ml is None:
return
if isinstance(opt.resume, str) and opt.resume.startswith(COMET_PREFIX):
api = comet_ml.API()
resource = urlparse(opt.resume)
experiment_path = f"{resource.netloc}{resource.path}"
experiment = api.get(experiment_path)
set_opt_parameters(opt, experiment)
download_model_checkpoint(opt, experiment)
return True
return None | Restores run parameters to its original state based on the model checkpoint and logged Experiment parameters. Args: opt (argparse.Namespace): Command Line arguments passed to YOLOv5 training script Returns: None/bool: Return True if the run is restored successfully else return None |
155,217 | import logging
import os
import sys
from contextlib import contextmanager
from pathlib import Path
from utils.general import LOGGER, colorstr
The provided code snippet includes necessary dependencies for implementing the `all_logging_disabled` function. Write a Python function `def all_logging_disabled(highest_level=logging.CRITICAL)` to solve the following problem:
source - https://gist.github.com/simon-weber/7853144 A context manager that will prevent any logging messages triggered during the body from being processed. :param highest_level: the maximum logging level in use. This would only need to be changed if a custom level greater than CRITICAL is defined.
Here is the function:
def all_logging_disabled(highest_level=logging.CRITICAL):
"""source - https://gist.github.com/simon-weber/7853144
A context manager that will prevent any logging messages triggered during the body from being processed.
:param highest_level: the maximum logging level in use.
This would only need to be changed if a custom level greater than CRITICAL is defined.
"""
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level) | source - https://gist.github.com/simon-weber/7853144 A context manager that will prevent any logging messages triggered during the body from being processed. :param highest_level: the maximum logging level in use. This would only need to be changed if a custom level greater than CRITICAL is defined. |
155,218 | import argparse
import io
import torch
from flask import Flask, request
from PIL import Image
models = {}
The provided code snippet includes necessary dependencies for implementing the `predict` function. Write a Python function `def predict(model)` to solve the following problem:
Predict and return object detections in JSON format given an image and model name via a Flask REST API POST request.
Here is the function:
def predict(model):
"""Predict and return object detections in JSON format given an image and model name via a Flask REST API POST
request.
"""
if request.method != "POST":
return
if request.files.get("image"):
# Method 1
# with request.files["image"] as f:
# im = Image.open(io.BytesIO(f.read()))
# Method 2
im_file = request.files["image"]
im_bytes = im_file.read()
im = Image.open(io.BytesIO(im_bytes))
if model in models:
results = models[model](im, size=640) # reduce size=320 for faster inference
return results.pandas().xyxy[0].to_json(orient="records") | Predict and return object detections in JSON format given an image and model name via a Flask REST API POST request. |
155,219 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
The provided code snippet includes necessary dependencies for implementing the `is_chinese` function. Write a Python function `def is_chinese(s="人工智能")` to solve the following problem:
Determines if a string `s` contains any Chinese characters; returns `True` if so, otherwise `False`.
Here is the function:
def is_chinese(s="人工智能"):
"""Determines if a string `s` contains any Chinese characters; returns `True` if so, otherwise `False`."""
return bool(re.search("[\u4e00-\u9fff]", str(s))) | Determines if a string `s` contains any Chinese characters; returns `True` if so, otherwise `False`. |
155,220 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
The provided code snippet includes necessary dependencies for implementing the `is_colab` function. Write a Python function `def is_colab()` to solve the following problem:
Checks if the current environment is a Google Colab instance; returns `True` for Colab, otherwise `False`.
Here is the function:
def is_colab():
"""Checks if the current environment is a Google Colab instance; returns `True` for Colab, otherwise `False`."""
return "google.colab" in sys.modules | Checks if the current environment is a Google Colab instance; returns `True` for Colab, otherwise `False`. |
155,221 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS)
os.environ["OMP_NUM_THREADS"] = "1" if platform.system() == "darwin" else str(NUM_THREADS)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
The provided code snippet includes necessary dependencies for implementing the `is_kaggle` function. Write a Python function `def is_kaggle()` to solve the following problem:
Checks if the current environment is a Kaggle Notebook by validating environment variables.
Here is the function:
def is_kaggle():
"""Checks if the current environment is a Kaggle Notebook by validating environment variables."""
return os.environ.get("PWD") == "/kaggle/working" and os.environ.get("KAGGLE_URL_BASE") == "https://www.kaggle.com" | Checks if the current environment is a Kaggle Notebook by validating environment variables. |
155,222 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS)
os.environ["OMP_NUM_THREADS"] = "1" if platform.system() == "darwin" else str(NUM_THREADS)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
LOGGING_NAME = "yolov5"
The provided code snippet includes necessary dependencies for implementing the `set_logging` function. Write a Python function `def set_logging(name=LOGGING_NAME, verbose=True)` to solve the following problem:
Configures logging with specified verbosity; `name` sets the logger's name, `verbose` controls logging level.
Here is the function:
def set_logging(name=LOGGING_NAME, verbose=True):
"""Configures logging with specified verbosity; `name` sets the logger's name, `verbose` controls logging level."""
rank = int(os.getenv("RANK", -1)) # rank in world for Multi-GPU trainings
level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {name: {"format": "%(message)s"}},
"handlers": {
name: {
"class": "logging.StreamHandler",
"formatter": name,
"level": level,
}
},
"loggers": {
name: {
"level": level,
"handlers": [name],
"propagate": False,
}
},
}
) | Configures logging with specified verbosity; `name` sets the logger's name, `verbose` controls logging level. |
155,223 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS)
os.environ["OMP_NUM_THREADS"] = "1" if platform.system() == "darwin" else str(NUM_THREADS)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
def is_writeable(dir, test=False):
"""Checks if a directory is writable, optionally testing by creating a temporary file if `test=True`."""
if not test:
return os.access(dir, os.W_OK) # possible issues on Windows
file = Path(dir) / "tmp.txt"
try:
with open(file, "w"): # open file with write permissions
pass
file.unlink() # remove file
return True
except OSError:
return False
if platform.system() == "Windows":
for fn in LOGGER.info, LOGGER.warning:
setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging
if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename:
cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine
The provided code snippet includes necessary dependencies for implementing the `user_config_dir` function. Write a Python function `def user_config_dir(dir="Ultralytics", env_var="YOLOV5_CONFIG_DIR")` to solve the following problem:
Returns user configuration directory path, preferring environment variable `YOLOV5_CONFIG_DIR` if set, else OS- specific.
Here is the function:
def user_config_dir(dir="Ultralytics", env_var="YOLOV5_CONFIG_DIR"):
"""Returns user configuration directory path, preferring environment variable `YOLOV5_CONFIG_DIR` if set, else OS-
specific.
"""
env = os.getenv(env_var)
if env:
path = Path(env) # use environment variable
else:
cfg = {"Windows": "AppData/Roaming", "Linux": ".config", "Darwin": "Library/Application Support"} # 3 OS dirs
path = Path.home() / cfg.get(platform.system(), "") # OS-specific config dir
path = (path if is_writeable(path) else Path("/tmp")) / dir # GCP and AWS lambda fix, only /tmp is writeable
path.mkdir(exist_ok=True) # make if required
return path | Returns user configuration directory path, preferring environment variable `YOLOV5_CONFIG_DIR` if set, else OS- specific. |
155,224 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS)
os.environ["OMP_NUM_THREADS"] = "1" if platform.system() == "darwin" else str(NUM_THREADS)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
The provided code snippet includes necessary dependencies for implementing the `get_latest_run` function. Write a Python function `def get_latest_run(search_dir=".")` to solve the following problem:
Returns the path to the most recent 'last.pt' file in /runs to resume from, searches in `search_dir`.
Here is the function:
def get_latest_run(search_dir="."):
"""Returns the path to the most recent 'last.pt' file in /runs to resume from, searches in `search_dir`."""
last_list = glob.glob(f"{search_dir}/**/last*.pt", recursive=True)
return max(last_list, key=os.path.getctime) if last_list else "" | Returns the path to the most recent 'last.pt' file in /runs to resume from, searches in `search_dir`. |
155,225 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename:
cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine
The provided code snippet includes necessary dependencies for implementing the `file_age` function. Write a Python function `def file_age(path=__file__)` to solve the following problem:
Calculates and returns the age of a file in days based on its last modification time.
Here is the function:
def file_age(path=__file__):
"""Calculates and returns the age of a file in days based on its last modification time."""
dt = datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime) # delta
return dt.days # + dt.seconds / 86400 # fractional days | Calculates and returns the age of a file in days based on its last modification time. |
155,226 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
LOGGER = logging.getLogger(LOGGING_NAME)
def check_online():
"""Checks internet connectivity by attempting to create a connection to "1.1.1.1" on port 443, retries once if the
first attempt fails.
"""
import socket
def run_once():
# Check once
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility
return True
except OSError:
return False
return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues
def colorstr(*input):
"""
Colors a string using ANSI escape codes, e.g., colorstr('blue', 'hello world').
See https://en.wikipedia.org/wiki/ANSI_escape_code.
"""
*args, string = input if len(input) > 1 else ("blue", "bold", input[0]) # color arguments, string
colors = {
"black": "\033[30m", # basic colors
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"magenta": "\033[35m",
"cyan": "\033[36m",
"white": "\033[37m",
"bright_black": "\033[90m", # bright colors
"bright_red": "\033[91m",
"bright_green": "\033[92m",
"bright_yellow": "\033[93m",
"bright_blue": "\033[94m",
"bright_magenta": "\033[95m",
"bright_cyan": "\033[96m",
"bright_white": "\033[97m",
"end": "\033[0m", # misc
"bold": "\033[1m",
"underline": "\033[4m",
}
return "".join(colors[x] for x in args) + f"{string}" + colors["end"]
if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename:
cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine
The provided code snippet includes necessary dependencies for implementing the `check_git_status` function. Write a Python function `def check_git_status(repo="ultralytics/yolov5", branch="master")` to solve the following problem:
Checks if YOLOv5 code is up-to-date with the repository, advising 'git pull' if behind; errors return informative messages.
Here is the function:
def check_git_status(repo="ultralytics/yolov5", branch="master"):
"""Checks if YOLOv5 code is up-to-date with the repository, advising 'git pull' if behind; errors return informative
messages.
"""
url = f"https://github.com/{repo}"
msg = f", for updates see {url}"
s = colorstr("github: ") # string
assert Path(".git").exists(), s + "skipping check (not a git repository)" + msg
assert check_online(), s + "skipping check (offline)" + msg
splits = re.split(pattern=r"\s", string=check_output("git remote -v", shell=True).decode())
matches = [repo in s for s in splits]
if any(matches):
remote = splits[matches.index(True) - 1]
else:
remote = "ultralytics"
check_output(f"git remote add {remote} {url}", shell=True)
check_output(f"git fetch {remote}", shell=True, timeout=5) # git fetch
local_branch = check_output("git rev-parse --abbrev-ref HEAD", shell=True).decode().strip() # checked out
n = int(check_output(f"git rev-list {local_branch}..{remote}/{branch} --count", shell=True)) # commits behind
if n > 0:
pull = "git pull" if remote == "origin" else f"git pull {remote} {branch}"
s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use '{pull}' or 'git clone {url}' to update."
else:
s += f"up to date with {url} ✅"
LOGGER.info(s) | Checks if YOLOv5 code is up-to-date with the repository, advising 'git pull' if behind; errors return informative messages. |
155,227 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
The provided code snippet includes necessary dependencies for implementing the `check_git_info` function. Write a Python function `def check_git_info(path=".")` to solve the following problem:
Checks YOLOv5 git info, returning a dict with remote URL, branch name, and commit hash.
Here is the function:
def check_git_info(path="."):
"""Checks YOLOv5 git info, returning a dict with remote URL, branch name, and commit hash."""
check_requirements("gitpython")
import git
try:
repo = git.Repo(path)
remote = repo.remotes.origin.url.replace(".git", "") # i.e. 'https://github.com/ultralytics/yolov5'
commit = repo.head.commit.hexsha # i.e. '3134699c73af83aac2a481435550b968d5792c0d'
try:
branch = repo.active_branch.name # i.e. 'main'
except TypeError: # not on any branch
branch = None # i.e. 'detached HEAD' state
return {"remote": remote, "branch": branch, "commit": commit}
except git.exc.InvalidGitRepositoryError: # path is not a git dir
return {"remote": None, "branch": None, "commit": None} | Checks YOLOv5 git info, returning a dict with remote URL, branch name, and commit hash. |
155,228 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
if platform.system() == "Windows":
for fn in LOGGER.info, LOGGER.warning:
setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging
def check_version(current="0.0.0", minimum="0.0.0", name="version ", pinned=False, hard=False, verbose=False):
"""Checks if the current version meets the minimum required version, exits or warns based on parameters."""
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
s = f"WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed" # string
if hard:
assert result, emojis(s) # assert min requirements met
if verbose and not result:
LOGGER.warning(s)
return result
The provided code snippet includes necessary dependencies for implementing the `check_python` function. Write a Python function `def check_python(minimum="3.8.0")` to solve the following problem:
Checks if current Python version meets the minimum required version, exits if not.
Here is the function:
def check_python(minimum="3.8.0"):
"""Checks if current Python version meets the minimum required version, exits if not."""
check_version(platform.python_version(), minimum, name="Python ", hard=True) | Checks if current Python version meets the minimum required version, exits if not. |
155,229 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
The provided code snippet includes necessary dependencies for implementing the `clean_str` function. Write a Python function `def clean_str(s)` to solve the following problem:
Cleans a string by replacing special characters with underscore, e.g., `clean_str('#example!')` returns '_example_'.
Here is the function:
def clean_str(s):
"""Cleans a string by replacing special characters with underscore, e.g., `clean_str('#example!')` returns
'_example_'.
"""
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) | Cleans a string by replacing special characters with underscore, e.g., `clean_str('#example!')` returns '_example_'. |
155,230 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
torch.set_printoptions(linewidth=320, precision=5, profile="long")
np.set_printoptions(linewidth=320, formatter={"float_kind": "{:11.5g}".format})
def clip_boxes(boxes, shape):
"""Clips bounding box coordinates (xyxy) to fit within the specified image shape (height, width)."""
if isinstance(boxes, torch.Tensor): # faster individually
boxes[..., 0].clamp_(0, shape[1]) # x1
boxes[..., 1].clamp_(0, shape[0]) # y1
boxes[..., 2].clamp_(0, shape[1]) # x2
boxes[..., 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2
The provided code snippet includes necessary dependencies for implementing the `xyxy2xywhn` function. Write a Python function `def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0)` to solve the following problem:
Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right.
Here is the function:
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
"""Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right."""
if clip:
clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center
y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center
y[..., 2] = (x[..., 2] - x[..., 0]) / w # width
y[..., 3] = (x[..., 3] - x[..., 1]) / h # height
return y | Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right. |
155,231 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
torch.set_printoptions(linewidth=320, precision=5, profile="long")
np.set_printoptions(linewidth=320, formatter={"float_kind": "{:11.5g}".format})
The provided code snippet includes necessary dependencies for implementing the `xyn2xy` function. Write a Python function `def xyn2xy(x, w=640, h=640, padw=0, padh=0)` to solve the following problem:
Convert normalized segments into pixel segments, shape (n,2).
Here is the function:
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
"""Convert normalized segments into pixel segments, shape (n,2)."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = w * x[..., 0] + padw # top left x
y[..., 1] = h * x[..., 1] + padh # top left y
return y | Convert normalized segments into pixel segments, shape (n,2). |
155,232 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
np.set_printoptions(linewidth=320, formatter={"float_kind": "{:11.5g}".format})
pd.options.display.max_columns = 10
LOGGER = logging.getLogger(LOGGING_NAME)
def colorstr(*input):
"""
Colors a string using ANSI escape codes, e.g., colorstr('blue', 'hello world').
See https://en.wikipedia.org/wiki/ANSI_escape_code.
"""
*args, string = input if len(input) > 1 else ("blue", "bold", input[0]) # color arguments, string
colors = {
"black": "\033[30m", # basic colors
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"magenta": "\033[35m",
"cyan": "\033[36m",
"white": "\033[37m",
"bright_black": "\033[90m", # bright colors
"bright_red": "\033[91m",
"bright_green": "\033[92m",
"bright_yellow": "\033[93m",
"bright_blue": "\033[94m",
"bright_magenta": "\033[95m",
"bright_cyan": "\033[96m",
"bright_white": "\033[97m",
"end": "\033[0m", # misc
"bold": "\033[1m",
"underline": "\033[4m",
}
return "".join(colors[x] for x in args) + f"{string}" + colors["end"]
def gsutil_getsize(url=""):
"""
Returns the size in bytes of a file at a Google Cloud Storage URL using `gsutil du`.
Returns 0 if the command fails or output is empty.
"""
output = subprocess.check_output(["gsutil", "du", url], shell=True, encoding="utf-8")
return int(output.split()[0]) if output else 0
def fitness(x):
"""Calculates fitness of a model using weighted sum of metrics P, R, mAP@0.5, mAP@0.5:0.95."""
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
return (x[:, :4] * w).sum(1)
The provided code snippet includes necessary dependencies for implementing the `print_mutation` function. Write a Python function `def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr("evolve: "))` to solve the following problem:
Logs evolution results and saves to CSV and YAML in `save_dir`, optionally syncs with `bucket`.
Here is the function:
def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr("evolve: ")):
"""Logs evolution results and saves to CSV and YAML in `save_dir`, optionally syncs with `bucket`."""
evolve_csv = save_dir / "evolve.csv"
evolve_yaml = save_dir / "hyp_evolve.yaml"
keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps]
keys = tuple(x.strip() for x in keys)
vals = results + tuple(hyp.values())
n = len(keys)
# Download (optional)
if bucket:
url = f"gs://{bucket}/evolve.csv"
if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0):
subprocess.run(["gsutil", "cp", f"{url}", f"{save_dir}"]) # download evolve.csv if larger than local
# Log to evolve.csv
s = "" if evolve_csv.exists() else (("%20s," * n % keys).rstrip(",") + "\n") # add header
with open(evolve_csv, "a") as f:
f.write(s + ("%20.5g," * n % vals).rstrip(",") + "\n")
# Save yaml
with open(evolve_yaml, "w") as f:
data = pd.read_csv(evolve_csv, skipinitialspace=True)
data = data.rename(columns=lambda x: x.strip()) # strip keys
i = np.argmax(fitness(data.values[:, :4])) #
generations = len(data)
f.write(
"# YOLOv5 Hyperparameter Evolution Results\n"
+ f"# Best generation: {i}\n"
+ f"# Last generation: {generations - 1}\n"
+ "# "
+ ", ".join(f"{x.strip():>20s}" for x in keys[:7])
+ "\n"
+ "# "
+ ", ".join(f"{x:>20.5g}" for x in data.values[i, :7])
+ "\n\n"
)
yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False)
# Print to screen
LOGGER.info(
prefix
+ f"{generations} generations finished, current result:\n"
+ prefix
+ ", ".join(f"{x.strip():>20s}" for x in keys)
+ "\n"
+ prefix
+ ", ".join(f"{x:20.5g}" for x in vals)
+ "\n\n"
)
if bucket:
subprocess.run(["gsutil", "cp", f"{evolve_csv}", f"{evolve_yaml}", f"gs://{bucket}"]) # upload | Logs evolution results and saves to CSV and YAML in `save_dir`, optionally syncs with `bucket`. |
155,233 | import contextlib
import glob
import inspect
import logging
import logging.config
import math
import os
import platform
import random
import re
import signal
import subprocess
import sys
import time
import urllib
from copy import deepcopy
from datetime import datetime
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import check_output
from tarfile import is_tarfile
from typing import Optional
from zipfile import ZipFile, is_zipfile
import cv2
import numpy as np
import pandas as pd
import pkg_resources as pkg
import torch
import torchvision
import yaml
from ultralytics.utils.checks import check_requirements
from utils import TryExcept, emojis
from utils.downloads import curl_download, gsutil_getsize
from utils.metrics import box_iou, fitness
torch.set_printoptions(linewidth=320, precision=5, profile="long")
np.set_printoptions(linewidth=320, formatter={"float_kind": "{:11.5g}".format})
cv2.setNumThreads(0)
def xyxy2xywh(x):
"""Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center
y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center
y[..., 2] = x[..., 2] - x[..., 0] # width
y[..., 3] = x[..., 3] - x[..., 1] # height
return y
def xywh2xyxy(x):
"""Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right."""
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x
y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y
y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x
y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y
return y
def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
"""Rescales (xyxy) bounding boxes from img1_shape to img0_shape, optionally using provided `ratio_pad`."""
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
boxes[..., [0, 2]] -= pad[0] # x padding
boxes[..., [1, 3]] -= pad[1] # y padding
boxes[..., :4] /= gain
clip_boxes(boxes, img0_shape)
return boxes
The provided code snippet includes necessary dependencies for implementing the `apply_classifier` function. Write a Python function `def apply_classifier(x, model, img, im0)` to solve the following problem:
Applies second-stage classifier to YOLO outputs, filtering detections by class match.
Here is the function:
def apply_classifier(x, model, img, im0):
"""Applies second-stage classifier to YOLO outputs, filtering detections by class match."""
# Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_boxes(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for a in d:
cutout = im0[i][int(a[1]) : int(a[3]), int(a[0]) : int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x | Applies second-stage classifier to YOLO outputs, filtering detections by class match. |
155,234 | import math
import warnings
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
from utils import TryExcept, threaded
The provided code snippet includes necessary dependencies for implementing the `bbox_iou` function. Write a Python function `def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7)` to solve the following problem:
Calculates IoU, GIoU, DIoU, or CIoU between two boxes, supporting xywh/xyxy formats. Input shapes are box1(1,4) to box2(n,4).
Here is the function:
def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
"""
Calculates IoU, GIoU, DIoU, or CIoU between two boxes, supporting xywh/xyxy formats.
Input shapes are box1(1,4) to box2(n,4).
"""
# Get the coordinates of bounding boxes
if xywh: # transform from xywh to xyxy
(x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1)
w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2
b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_
b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_
else: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1)
b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1)
w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps)
w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps)
# Intersection area
inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * (
b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)
).clamp(0)
# Union Area
union = w1 * h1 + w2 * h2 - inter + eps
# IoU
iou = inter / union
if CIoU or DIoU or GIoU:
cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width
ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw**2 + ch**2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2
if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi**2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2)
with torch.no_grad():
alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
return iou - rho2 / c2 # DIoU
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf
return iou # IoU | Calculates IoU, GIoU, DIoU, or CIoU between two boxes, supporting xywh/xyxy formats. Input shapes are box1(1,4) to box2(n,4). |
155,235 | import math
import warnings
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
from utils import TryExcept, threaded
The provided code snippet includes necessary dependencies for implementing the `wh_iou` function. Write a Python function `def wh_iou(wh1, wh2, eps=1e-7)` to solve the following problem:
Calculates the Intersection over Union (IoU) for two sets of widths and heights; `wh1` and `wh2` should be nx2 and mx2 tensors.
Here is the function:
def wh_iou(wh1, wh2, eps=1e-7):
"""Calculates the Intersection over Union (IoU) for two sets of widths and heights; `wh1` and `wh2` should be nx2
and mx2 tensors.
"""
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter) | Calculates the Intersection over Union (IoU) for two sets of widths and heights; `wh1` and `wh2` should be nx2 and mx2 tensors. |
155,236 | import torch
from collections import OrderedDict
from argparse import ArgumentParser
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate import dispatch_model
from accelerate.utils import get_balanced_memory, infer_auto_device_map
def parse_args():
parser = ArgumentParser()
parser.add_argument("--multi-gpu", default=False, action="store_true", help="Whether to use multi gpu.")
parser.add_argument("--device-map", default=None, help="the device map for accelerate.")
parser.add_argument("--delta", default=None, type=str, help="The delta path.")
args = parser.parse_args()
return args | null |
155,237 | import torch
from collections import OrderedDict
from argparse import ArgumentParser
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate import dispatch_model
from accelerate.utils import get_balanced_memory, infer_auto_device_map
def load_delta(delta_path):
delta_dict = torch.load(delta_path)
delta_with_prefix = OrderedDict()
for k, v in delta_dict.items():
# CpmBeeModel -> CpmBeeForCasualLM
if k.startswith("encoder.") or k.startswith("input_embedding.") or k.startswith("position_bias."):
delta_with_prefix["cpmbee."+k] = v
else:
delta_with_prefix[k] = v
del delta_dict
return delta_with_prefix | null |
155,238 | import json
import time
from typing import Any, Dict, List, Union
import torch
import bmtrain as bmt
import os
from cpm_live.arguments import get_args
from cpm_live.models import CPMBee, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
from cpm_live.utils import allgather_objects, LogManager
from cpm_live.training_tasks.bee import MixedDataset
def get_tokenizer(args):
tokenizer = CPMBeeTokenizer()
return tokenizer
def get_model(args):
config = CPMBeeConfig.from_json_file(args.model_config)
model = CPMBee(config)
if args.load is not None:
bmt.load(model, args.load)
else:
bmt.init_parameters(model)
return model
def get_optimizer(args, model):
optimizer = bmt.optim.AdamOffloadOptimizer(
model.parameters(), weight_decay=args.weight_decay
)
if args.load is not None:
if os.path.exists(os.path.join(args.save, args.save_name + (".rank-%d.opt" % 0))):
# optimizer state exists
states = torch.load(
os.path.join(args.save, args.save_name + (".rank-%d.opt" % bmt.rank()))
)
optimizer.load_state_dict(states)
return optimizer
def get_learning_rate_scheduler(args, optimizer):
if args.lr_decay_iters is None:
args.lr_decay_iters = args.train_iters
lr_scheduler = bmt.lr_scheduler.Noam(
optimizer,
start_lr=args.lr,
warmup_iter=args.warmup_iters,
end_iter=args.lr_decay_iters,
num_iter=args.start_step,
)
return lr_scheduler
def setup_model_and_optimizer(args):
model = get_model(args)
tokenizer = get_tokenizer(args)
bmt.synchronize()
optimizer = get_optimizer(args, model)
lr_scheduler = get_learning_rate_scheduler(args, optimizer)
bmt.synchronize()
optim_manager = bmt.optim.OptimManager(
loss_scale=args.loss_scale,
loss_scale_factor=2,
loss_scale_steps=512,
)
optim_manager.add_optimizer(optimizer, lr_scheduler)
return tokenizer, model, optimizer, lr_scheduler, optim_manager | null |
155,239 | import json
import time
from typing import Any, Dict, List, Union
import torch
import bmtrain as bmt
import os
from cpm_live.arguments import get_args
from cpm_live.models import CPMBee, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
from cpm_live.utils import allgather_objects, LogManager
from cpm_live.training_tasks.bee import MixedDataset
def pretrain(
args,
tokenizer: CPMBeeTokenizer,
model: CPMBee,
optimizer: bmt.optim.AdamOffloadOptimizer,
lr_scheduler: bmt.lr_scheduler.WarmupLRScheduler,
optim_manager: bmt.optim.OptimManager,
):
average_time = bmt.utils.AverageRecorder()
loss_func = bmt.loss.FusedCrossEntropy(ignore_index=-100)
start_step = args.start_step
lsd = LossSpikeDetector("debug/spile.%d.log" % bmt.rank())
if args.tensorboard is not None and bmt.rank() == 0:
from torch.utils.tensorboard import SummaryWriter
import distutils.version # noqa: F401
if not os.path.exists(args.tensorboard):
os.makedirs(args.tensorboard)
writer = SummaryWriter(log_dir=args.tensorboard)
if args.log_dir is not None and bmt.rank() == 0:
log_mgr = LogManager(args.log_dir)
global_token_pass = 0.0
global_world_size = bmt.world_size()
dataloader = MixedDataset(
args.dataset, args.batch_size, args.max_length, tokenizer, max_depth=8
)
if os.path.exists(os.path.join(args.save, args.save_name + ("-%d.data.pt" % start_step))):
# load dataset states if exists
dataset_states = torch.load(
os.path.join(args.save, args.save_name + ("-%d.data.pt" % start_step))
)
missing = dataloader.load_state_dict(dataset_states)
if len(missing) > 0:
bmt.print_rank("Missing keys when loading dataset states: ", missing)
dataloader.start()
try:
for iteration, data in enumerate(dataloader):
iteration = iteration + start_step + 1
assert data["inputs"].shape[0] == args.batch_size
input_ids = torch.from_numpy(data["inputs"]).cuda().to(torch.int32)
input_ids_sub = torch.from_numpy(data["inputs_sub"]).cuda().to(torch.int32)
input_length = torch.from_numpy(data["length"]).cuda().to(torch.int32)
input_context = torch.from_numpy(data["context"]).cuda().bool()
input_sample_ids = torch.from_numpy(data["sample_ids"]).cuda().to(torch.int32)
input_num_segments = torch.from_numpy(data["num_segments"]).cuda().to(torch.int32)
input_segment_ids = torch.from_numpy(data["segment_ids"]).cuda().to(torch.int32)
input_segment_rel_offset = (
torch.from_numpy(data["segment_rel_offset"]).cuda().to(torch.int32)
)
input_segment_rel = torch.from_numpy(data["segment_rel"]).cuda().to(torch.int32)
input_span = torch.from_numpy(data["spans"]).cuda().to(torch.int32)
targets = torch.from_numpy(data["target"]).cuda().to(torch.int32)
ext_table_ids = torch.from_numpy(data["ext_ids"]).cuda().to(torch.int32)
ext_table_sub = torch.from_numpy(data["ext_sub"]).cuda().to(torch.int32)
task_ids = torch.from_numpy(data["task_ids"]).cuda().to(torch.int32)
task_names = data["task_names"]
lsd.update_data(data["raw_data"])
# ===========
optim_manager.zero_grad()
# torch.cuda.empty_cache()
mem_usage = {}
tim_usage = {}
mem_usage, tim_usage = add_mem_time("init", mem_usage, tim_usage)
# ===========
logits, _ = model(
input_ids,
input_ids_sub,
input_length,
input_context,
input_sample_ids,
input_num_segments,
input_segment_ids,
input_segment_rel_offset,
input_segment_rel,
input_span,
ext_table_ids,
ext_table_sub,
)
loss = loss_func(logits.view(-1, logits.size(-1)), targets.view(-1))
global_loss = bmt.sum_loss(loss).item()
mem_usage, tim_usage = add_mem_time("forward", mem_usage, tim_usage)
# ===========
optim_manager.backward(loss)
mem_usage, tim_usage = add_mem_time("backward", mem_usage, tim_usage)
# ===========
current_stream = torch.cuda.current_stream()
# some reduce ops of distributed parameter were launched on load stream
current_stream.wait_stream(bmt.config['load_stream'])
grad_norm = optim_manager.clip_grad_norm(optimizer.param_groups, max_norm=1.0)
optim_manager.step()
mem_usage, tim_usage = add_mem_time("optim", mem_usage, tim_usage)
# ==========
iteration_time = tim_usage["optim"] - tim_usage["init"]
average_time.record(iteration_time)
with torch.no_grad():
task_num = len(task_names)
targets_tmp = targets.expand(task_num, -1, -1)
task = torch.arange(task_num, dtype=torch.int32, device="cuda")[:, None, None]
targets_tmp = torch.where(
task_ids == task,
targets_tmp,
torch.scalar_tensor(-100, dtype=torch.int32, device="cuda"),
)
task_loss_map: Dict[str, float] = {}
for i in range(task_num):
task_loss = loss_func(
logits.view(-1, logits.size(-1)), targets_tmp[i, :].view(-1)
)
# global_task_loss = float(bmt.sum_loss(task_loss).item())
task_loss_map[task_names[i]] = task_loss.item()
gatherd_task_loss_map: List[Dict[str, float]] = allgather_objects(task_loss_map)
global_task_loss_map: Dict[str, Union[List[float], float]] = {}
for local_task_loss_map in gatherd_task_loss_map:
for task_name, task_loss in local_task_loss_map.items():
if task_name not in global_task_loss_map:
global_task_loss_map[task_name] = []
global_task_loss_map[task_name].append(task_loss)
task_loss_map = {}
for task_name in sorted(list(global_task_loss_map.keys())):
avg_loss = sum(global_task_loss_map[task_name]) / len(
global_task_loss_map[task_name]
)
task_loss_map[task_name] = avg_loss
local_total_rate = torch.Tensor([input_length.float().mean() / args.max_length]).cuda()
local_total_rate = bmt.sum_loss(local_total_rate).item()
global_token_pass += (
global_world_size * local_total_rate * args.max_length * args.batch_size
)
avg_time = average_time.value
lsd.update_loss(iteration, task_loss_map)
train_info = {
"time": tim_usage["init"],
"iteration": iteration,
"loss": global_loss,
"lr": lr_scheduler.current_lr,
"lr_scale": int(optim_manager.loss_scale),
"time_usage": tim_usage,
"mem_usage": mem_usage,
"avg_time": avg_time,
"token_max": local_total_rate,
"token_pass": global_token_pass,
"throughout": args.max_length * args.batch_size * local_total_rate / avg_time,
"grad_norm": grad_norm.item(),
"mask_max": ((targets >= 0).sum(-1).float().mean() / args.max_length).item(),
"num_gpus": global_world_size,
"task_loss": task_loss_map,
}
bmt.print_rank(
(
"| Iter: {:6d} | loss: {:.4f} | lr: {:.4e}, scale: {:10.4f} | time: {:.4f} |"
+ " token/max: {:.4f} | mask/max: {:.4f} | grad_norm: {:.4f}"
).format(
iteration,
global_loss,
lr_scheduler.current_lr,
int(optim_manager.loss_scale),
avg_time,
input_length.float().mean() / args.max_length,
(targets >= 0).sum(-1).float().mean() / args.max_length,
grad_norm,
)
)
bmt.print_rank(
"| "
+ " | ".join(
[
"{} loss: {:.4f}".format(task_name, loss)
for task_name, loss in task_loss_map.items()
]
)
)
if iteration % args.inspect_iters == 0:
model_inspect = bmt.inspect.inspect_model(model, "*")
bmt.print_rank(bmt.inspect.format_summary(model_inspect))
train_info["model_inspect"] = model_inspect
# write log here
if args.log_dir is not None and bmt.rank() == 0:
log_mgr.write(**train_info)
if args.tensorboard is not None and bmt.rank() == 0:
writer.add_scalar("Loss/train", global_loss, iteration)
writer.add_scalar("Optimizer/lr", lr_scheduler.current_lr, iteration)
writer.add_scalar("Optimizer/scale", optim_manager.loss_scale, iteration)
writer.add_scalar("Optimizer/grad_norm", grad_norm.item(), iteration)
for task_name, loss in task_loss_map.items():
writer.add_scalar("Loss/train/{}".format(task_name), loss, iteration)
if args.save is not None and iteration % args.save_iters == 0:
bmt.save(model, os.path.join(args.save, args.save_name + ("-%d.pt" % iteration)))
torch.save(
optimizer.state_dict(),
os.path.join(args.save, args.save_name + (".rank-%d.opt" % bmt.rank())),
)
all_states = dataloader.state_dict()
if bmt.rank() == 0:
# rank 0 writes the dataloader state
torch.save(
all_states,
os.path.join(args.save, args.save_name + ("-%d.data.pt" % iteration)),
)
del all_states
finally:
dataloader.close()
bmt.save(model, os.path.join(args.save, args.save_name + ".pt"))
def get_args(pretrain: bool = False, finetune: bool = False):
parser = argparse.ArgumentParser()
parser = add_model_config_args(parser)
parser = add_training_args(parser)
if pretrain:
parser = add_pretrain_args(parser)
if finetune:
parser = add_finetune_args(parser)
args = parser.parse_args()
return args
def initialize():
os.environ["MASTER_PORT"] = str(int(os.environ["MASTER_PORT"]) + 2333)
args = get_args(pretrain=True)
bmt.init_distributed(seed=args.seed)
if args.save is not None:
os.makedirs(args.save, exist_ok=True)
return args | null |
155,240 | from argparse import ArgumentParser
from cpm_live.generation.bee import CPMBeeBeamSearch
from cpm_live.models import CPMBeeTorch, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
from opendelta import LoraModel
import torch
def parse_args():
parser = ArgumentParser()
parser.add_argument("--use-bminf", default=False, action="store_true", help="Whether to use BMInf")
parser.add_argument("--memory-limit", type=int, default=20, help="GPU Memory limit, in GB")
parser.add_argument("--delta", default=None, type=str, help="The path to lora.")
parser.add_argument("--device", default="cuda:0", type=str, help="The target device.")
args = parser.parse_args()
return args | null |
155,241 | import os
from cpm_live.dataset import build_dataset, shuffle_dataset
import shutil
from tqdm import tqdm
import json
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, help="raw dataset path", required=True)
parser.add_argument("--output_path", type=str, help="output dataset path", required=True)
parser.add_argument("--output_name", type=str, help="output dataset name", required=True)
args = parser.parse_args()
return args | null |
155,242 | import os
from cpm_live.dataset import build_dataset, shuffle_dataset
import shutil
from tqdm import tqdm
import json
import argparse
The provided code snippet includes necessary dependencies for implementing the `reformat_data` function. Write a Python function `def reformat_data(data)` to solve the following problem:
set your data format
Here is the function:
def reformat_data(data):
"""set your data format"""
return data | set your data format |
155,243 | import time
from typing import Dict, List, Union
import torch
import bmtrain as bmt
import os
from opendelta import LoraModel
from cpm_live.arguments import get_args
from cpm_live.models import CPMBee, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
from cpm_live.utils import allgather_objects
from cpm_live.training_tasks.bee import FinetuneDataset
def get_tokenizer(args):
tokenizer = CPMBeeTokenizer()
return tokenizer
def get_model(args):
config = CPMBeeConfig.from_json_file(args.model_config)
model = CPMBee(config)
model.config = config
if args.load is not None:
bmt.load(model, args.load)
else:
bmt.init_parameters(model)
# insert LoRA
if args.use_delta:
delta_model = LoraModel(
backbone_model=model, modified_modules=["project_q", "project_v"], backend="bmt"
)
delta_model.freeze_module(exclude=["deltas"], set_state_dict=True)
delta_model.log()
return model
def get_optimizer(args, model):
optimizer = bmt.optim.AdamOffloadOptimizer(
model.parameters(), weight_decay=args.weight_decay
)
return optimizer
def get_learning_rate_scheduler(args, optimizer):
if args.lr_decay_iters is None:
args.lr_decay_iters = args.train_iters
lr_scheduler = bmt.lr_scheduler.Noam(
optimizer,
start_lr=args.lr,
warmup_iter=args.warmup_iters,
end_iter=args.lr_decay_iters,
num_iter=args.start_step,
)
return lr_scheduler
def setup_model_and_optimizer(args):
model = get_model(args)
tokenizer = get_tokenizer(args)
bmt.synchronize()
optimizer = get_optimizer(args, model)
lr_scheduler = get_learning_rate_scheduler(args, optimizer)
bmt.synchronize()
optim_manager = bmt.optim.OptimManager(
loss_scale=args.loss_scale,
loss_scale_factor=2,
loss_scale_steps=512,
)
optim_manager.add_optimizer(optimizer, lr_scheduler)
return tokenizer, model, optimizer, lr_scheduler, optim_manager | null |
155,244 | import time
from typing import Dict, List, Union
import torch
import bmtrain as bmt
import os
from opendelta import LoraModel
from cpm_live.arguments import get_args
from cpm_live.models import CPMBee, CPMBeeConfig
from cpm_live.tokenizers import CPMBeeTokenizer
from cpm_live.utils import allgather_objects
from cpm_live.training_tasks.bee import FinetuneDataset
def finetune(
args,
tokenizer: CPMBeeTokenizer,
model: CPMBee,
optimizer: bmt.optim.AdamOffloadOptimizer,
lr_scheduler: bmt.lr_scheduler.WarmupLRScheduler,
optim_manager: bmt.optim.OptimManager,
):
average_time = bmt.utils.AverageRecorder()
if model.config.dtype == torch.half:
loss_func = bmt.loss.FusedCrossEntropy(ignore_index=-100)
else:
loss_func = torch.nn.CrossEntropyLoss(ignore_index=-100)
if args.tensorboard is not None and bmt.rank() == 0:
from torch.utils.tensorboard import SummaryWriter
import distutils.version # noqa: F401
if not os.path.exists(args.tensorboard):
os.makedirs(args.tensorboard)
writer = SummaryWriter(log_dir=args.tensorboard)
best_eval_loss, eval_loss_increase = 1e9, 0
global_token_pass = 0.0
global_steps = 0
global_world_size = bmt.world_size()
dataloader = FinetuneDataset(
args.dataset,
args.batch_size,
args.max_length,
tokenizer,
max_depth=8,
task_name=args.task_name,
drop_last=args.drop_last,
)
for epoch in range(args.epoch):
epoch = epoch + 1
last_data = None
for iteration, data in enumerate(dataloader):
iteration = iteration + 1
global_steps = global_steps + 1
skip_this_batch = False
if data is None:
if last_data is None:
raise RuntimeError(
"Dataset is too small, please use a smaller batch size or sequence length!"
)
data = last_data # use last data
skip_this_batch = True
else:
last_data = data
input_ids = torch.from_numpy(data["inputs"]).cuda().to(torch.int32)
input_ids_sub = torch.from_numpy(data["inputs_sub"]).cuda().to(torch.int32)
input_length = torch.from_numpy(data["length"]).cuda().to(torch.int32)
input_context = torch.from_numpy(data["context"]).cuda().bool()
input_sample_ids = torch.from_numpy(data["sample_ids"]).cuda().to(torch.int32)
input_num_segments = torch.from_numpy(data["num_segments"]).cuda().to(torch.int32)
input_segment_ids = torch.from_numpy(data["segment_ids"]).cuda().to(torch.int32)
input_segment_rel_offset = (
torch.from_numpy(data["segment_rel_offset"]).cuda().to(torch.int32)
)
input_segment_rel = torch.from_numpy(data["segment_rel"]).cuda().to(torch.int32)
input_span = torch.from_numpy(data["spans"]).cuda().to(torch.int32)
targets = torch.from_numpy(data["target"]).cuda().to(torch.int32)
ext_table_ids = torch.from_numpy(data["ext_ids"]).cuda().to(torch.int32)
ext_table_sub = torch.from_numpy(data["ext_sub"]).cuda().to(torch.int32)
task_ids = torch.from_numpy(data["task_ids"]).cuda().to(torch.int32)
task_names = data["task_names"]
# ===========
optim_manager.zero_grad()
mem_usage = {}
tim_usage = {}
mem_usage, tim_usage = add_mem_time("init", mem_usage, tim_usage)
# ===========
logits, _ = model(
input_ids,
input_ids_sub,
input_length,
input_context,
input_sample_ids,
input_num_segments,
input_segment_ids,
input_segment_rel_offset,
input_segment_rel,
input_span,
ext_table_ids,
ext_table_sub,
)
loss = loss_func(logits.view(-1, logits.size(-1)), targets.long().view(-1))
if skip_this_batch:
loss = loss * 0
mem_usage, tim_usage = add_mem_time("forward", mem_usage, tim_usage)
# ===========
optim_manager.backward(loss)
mem_usage, tim_usage = add_mem_time("backward", mem_usage, tim_usage)
# ===========
grad_norm = optim_manager.clip_grad_norm(optimizer.param_groups, max_norm=1.0)
optim_manager.step()
mem_usage, tim_usage = add_mem_time("optim", mem_usage, tim_usage)
# ==========
iteration_time = tim_usage["optim"] - tim_usage["init"]
average_time.record(iteration_time)
with torch.no_grad():
task_num = len(task_names)
targets_tmp = targets.expand(task_num, -1, -1)
task = torch.arange(task_num, dtype=torch.int32, device="cuda")[:, None, None]
targets_tmp = torch.where(
task_ids == task,
targets_tmp,
torch.scalar_tensor(-100, dtype=torch.int32, device="cuda"),
)
task_loss_map: Dict[str, float] = {}
if not skip_this_batch:
for i in range(task_num):
task_loss = loss_func(
logits.view(-1, logits.size(-1)), targets_tmp[i, :].long().view(-1)
)
task_loss_map[task_names[i]] = task_loss.item()
gatherd_task_loss_map: List[Dict[str, float]] = allgather_objects(task_loss_map)
global_task_loss_map: Dict[str, Union[List[float], float]] = {}
for local_task_loss_map in gatherd_task_loss_map:
for task_name, task_loss in local_task_loss_map.items():
if task_name not in global_task_loss_map:
global_task_loss_map[task_name] = []
global_task_loss_map[task_name].append(task_loss)
task_loss_map = {}
for task_name in sorted(list(global_task_loss_map.keys())):
avg_loss = sum(global_task_loss_map[task_name]) / len(
global_task_loss_map[task_name]
)
task_loss_map[task_name] = avg_loss
local_total_rate = torch.Tensor([input_length.float().mean() / args.max_length]).cuda()
local_total_rate = bmt.sum_loss(local_total_rate).item()
global_token_pass += (
global_world_size * local_total_rate * args.max_length * args.batch_size
)
avg_time = average_time.value
train_info = {
"time": tim_usage["init"],
"epoch": epoch,
"iteration": iteration,
"loss": task_loss_map[args.task_name],
"lr": lr_scheduler.current_lr,
"lr_scale": int(optim_manager.loss_scale),
"time_usage": tim_usage,
"mem_usage": mem_usage,
"avg_time": avg_time,
"token_max": local_total_rate,
"token_pass": global_token_pass,
"throughout": args.max_length * args.batch_size * local_total_rate / avg_time,
"grad_norm": grad_norm.item(),
"mask_max": ((targets >= 0).sum(-1).float().mean() / args.max_length).item(),
"num_gpus": global_world_size,
"task_loss": task_loss_map,
}
bmt.print_rank(
(
"| Epoch: {:3d} | Iter: {:6d} | loss: {:.4f} "
+ "| lr: {:.4e}, scale: {:10.4f} | time: {:.4f} |"
+ " token/max: {:.4f} | mask/max: {:.4f} | grad_norm: {:.4f}"
).format(
epoch,
iteration,
task_loss_map[args.task_name],
lr_scheduler.current_lr,
int(optim_manager.loss_scale),
avg_time,
input_length.float().mean() / args.max_length,
(targets >= 0).sum(-1).float().mean() / args.max_length,
grad_norm,
)
)
bmt.print_rank(
"| "
+ " | ".join(
[
"{} loss: {:.4f}".format(task_name, loss)
for task_name, loss in task_loss_map.items()
]
)
)
if iteration % args.inspect_iters == 0:
model_inspect = bmt.inspect.inspect_model(model, "*")
bmt.print_rank(bmt.inspect.format_summary(model_inspect))
train_info["model_inspect"] = model_inspect
# write log here
if args.tensorboard is not None and bmt.rank() == 0:
writer.add_scalar("Loss/train", task_loss_map[args.task_name], global_steps)
for task_name, loss in task_loss_map.items():
writer.add_scalar("Loss/train/{}".format(task_name), loss, global_steps)
# evaluation
if global_steps % args.eval_interval == 0:
eval_loss = evaluation(model, args, tokenizer, loss_func)
if args.tensorboard is not None and bmt.rank() == 0:
writer.add_scalar("Loss/eval", eval_loss, global_steps)
if eval_loss < best_eval_loss:
best_eval_loss = eval_loss
eval_loss_increase = 0
if args.save is not None:
if not args.use_delta:
bmt.save(model, os.path.join(args.save, args.save_name + "-best.pt"))
else:
state_dict = model.state_dict()
if bmt.rank() == 0:
torch.save(state_dict, os.path.join(args.save, args.save_name + "-delta-best.pt"))
else:
eval_loss_increase += 1
bmt.print_rank(
"| Eval loss: {:.4f} | Increase: {:2d}".format(eval_loss, eval_loss_increase)
)
if eval_loss_increase == args.early_stop_patience:
bmt.print_rank(
"Eval loss has increased {:d} times, the finetune loop early stopped."
.format(eval_loss_increase)
)
return
# end of finetune
def get_args(pretrain: bool = False, finetune: bool = False):
parser = argparse.ArgumentParser()
parser = add_model_config_args(parser)
parser = add_training_args(parser)
if pretrain:
parser = add_pretrain_args(parser)
if finetune:
parser = add_finetune_args(parser)
args = parser.parse_args()
return args
def initialize():
args = get_args(finetune=True)
bmt.init_distributed(seed=args.seed)
if args.save is not None:
os.makedirs(args.save, exist_ok=True)
return args | null |
155,245 | import torch
import torch.nn.functional as F
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float("inf")):
# This function has been mostly taken from huggingface conversational ai code at
# https://medium.com/huggingface/how-to-build-a-state-of-the-art-conversational-ai-with-transfer-learning-2d818ac26313
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
batch_size = logits.size()[0]
if top_p > 0.0:
logits = logits.view(batch_size, -1).contiguous()
for index in range(len(logits)):
sorted_logits, sorted_indices = torch.sort(logits[index].view(-1), descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[index][indices_to_remove] = filter_value
logits = logits.view(batch_size, -1).contiguous()
return logits | null |
155,246 | import torch
import torch.nn.functional as F
def apply_repetition_penalty(
logits,
batch_size,
num_beams,
prev_output_tokens,
repetition_penalty,
start_idx=None,
end_idx=None,
window_size=None,
):
# only conduct repetition penalty for the output
assert repetition_penalty >= 1, "repetition penalty coefficient should >= 1"
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
for i in range(batch_size * num_beams):
if start_idx is None or end_idx is None:
output_tokens = prev_output_tokens[i].tolist()
else:
if end_idx >= start_idx:
if window_size:
output_tokens = prev_output_tokens[i][
max(start_idx, end_idx + 1 - window_size) : end_idx + 1
].tolist()
else:
output_tokens = prev_output_tokens[i][start_idx : end_idx + 1].tolist()
else:
output_tokens = []
for previous_token in set(output_tokens):
# if score < 0 then repetition penalty has to
# multiplied to reduce the previous token probability
if logits[i, previous_token] < 0:
logits[i, previous_token] *= repetition_penalty
else:
logits[i, previous_token] /= repetition_penalty | null |
155,247 | import torch
import bmtrain as bmt
def rms_layernorm(hidden: torch.Tensor, weight: torch.Tensor, eps: float):
old_dtype = hidden.dtype
variance = hidden.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)
hidden = (hidden * torch.rsqrt(variance + eps)).to(old_dtype)
return hidden * weight | null |
155,248 | import torch
def rms_layernorm(hidden: torch.Tensor, weight: torch.Tensor, eps: float):
old_dtype = hidden.dtype
variance = hidden.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)
hidden = (hidden * torch.rsqrt(variance + eps)).to(old_dtype)
return hidden * weight | null |
155,249 | import os
import struct
from typing import List, Optional
from .distributed_dataset import (
SimpleDataset,
build_dataset,
_read_info_list,
_write_info_list,
_random_string,
_DEFAULT_BLOCK_SIZE,
FileInfo,
)
from .serializer import RawSerializer
import random
import shutil
try:
from tqdm import tqdm
support_tqdm = True
except ModuleNotFoundError:
support_tqdm = False
_DEFAULT_SHUFFLE_BUCKET_SIZE = 1 << 30
def _random_string():
return "".join(random.choices(string.ascii_uppercase + string.digits, k=8))
_DEFAULT_BLOCK_SIZE = 16 << 20
class SimpleDataset(DistributedDataset):
def __init__(
self,
path: str,
serializer: Optional[Serializer] = None,
shuffle: bool = True,
) -> None:
super().__init__(
path,
0,
1,
serializer=serializer,
max_repeat_times=1,
shuffle=shuffle,
)
def __iter__(self):
while True:
try:
data = self.read()
except EOFError:
self._repeat_times = 0
break
yield data
def __len__(self):
return self._nlines
def build_dataset(
path: str,
dbname: str,
block_size: int = _DEFAULT_BLOCK_SIZE,
serializer: Optional[Serializer] = None,
):
"""Open the dataset in write mode and returns a writer.
Args:
path (str): Path to dataset.
dbname (str): The name of the file to which the data will be written. The `dbname` needs to be unique in this `dataset`.
block_size (int): Size of each block in bytes. All files in the same dataset should have the same block size. Default: 16MB
Example:
>>> with build_dataset("/path/to/dataset", "data_part_1") as writer:
>>> for i in range(10):
>>> writer.write( { "anything you want" } )
""" # noqa: E501
return DatasetBuilder(path, dbname, block_size=block_size, serializer=serializer)
class RawSerializer(Serializer):
def __init__(self) -> None:
pass
def serialize(self, obj) -> bytes:
return obj
def deserialize(self, data: bytes):
return data
The provided code snippet includes necessary dependencies for implementing the `shuffle_dataset` function. Write a Python function `def shuffle_dataset( path_src: str, path_tgt: str, block_size: int = _DEFAULT_BLOCK_SIZE, bucket_size: int = _DEFAULT_SHUFFLE_BUCKET_SIZE, progress_bar: bool = False, output_name: Optional[str] = None, )` to solve the following problem:
Shuffle one distributed datataset, write results to another dataset. Args: path_str (str): path to source dataset path_tgt (str): path to write results block_size (int): dataset block size (default: 16MB) bucket_size (int): shuffle algorithm bucket size (default: 1GB) progress_bar (bool): show progress bar Example: >>> shuffle_dataset("/path/to/source", "/path/to/output")
Here is the function:
def shuffle_dataset(
path_src: str,
path_tgt: str,
block_size: int = _DEFAULT_BLOCK_SIZE,
bucket_size: int = _DEFAULT_SHUFFLE_BUCKET_SIZE,
progress_bar: bool = False,
output_name: Optional[str] = None,
):
"""Shuffle one distributed datataset, write results to another dataset.
Args:
path_str (str): path to source dataset
path_tgt (str): path to write results
block_size (int): dataset block size (default: 16MB)
bucket_size (int): shuffle algorithm bucket size (default: 1GB)
progress_bar (bool): show progress bar
Example:
>>> shuffle_dataset("/path/to/source", "/path/to/output")
"""
if progress_bar and not support_tqdm:
raise RuntimeError("Requires `tqdm` to enable progress bar.")
ds = SimpleDataset(path_src, serializer=RawSerializer())
num_buckets = (ds.nbytes + bucket_size - 1) // bucket_size
tmp_files = [os.path.join(path_src, ".tmp.%s" % _random_string()) for _ in range(num_buckets)]
try:
# Step 1: write to bucket randomly
f_tmp = [open(fname, "wb") for fname in tmp_files]
try:
iterator = ds
if progress_bar:
iterator = tqdm(ds, desc="Shuffle step 1/2")
for data in iterator:
bucket_id = int(random.random() * num_buckets)
len_data = len(data)
f_tmp[bucket_id].write(struct.pack("I", len_data) + data)
finally:
# close all files
for fp in f_tmp:
if not fp.closed:
fp.close()
f_tmp = []
# Step 2: shuffle inside bucket
if output_name is None:
output_name = "%s.shuffle" % _random_string()
with build_dataset(
path_tgt,
output_name,
block_size=block_size,
serializer=RawSerializer(),
) as writer:
iterator = tmp_files
if progress_bar:
iterator = tqdm(tmp_files, desc="Shuffle step 2/2")
for fname in iterator:
fp = open(fname, "rb")
data_in_bucket = []
while True:
try:
raw_data = fp.read(4)
if len(raw_data) == 0:
# EOF
break
len_data = struct.unpack("I", raw_data)[0]
data_in_bucket.append(fp.read(len_data))
except EOFError:
break
random.shuffle(data_in_bucket)
for data in data_in_bucket:
writer.write(data)
fp.close()
os.unlink(fname)
finally:
# cleanup
for fname in tmp_files:
if os.path.exists(fname):
os.unlink(fname) | Shuffle one distributed datataset, write results to another dataset. Args: path_str (str): path to source dataset path_tgt (str): path to write results block_size (int): dataset block size (default: 16MB) bucket_size (int): shuffle algorithm bucket size (default: 1GB) progress_bar (bool): show progress bar Example: >>> shuffle_dataset("/path/to/source", "/path/to/output") |
155,250 | import os
import struct
from typing import List, Optional
from .distributed_dataset import (
SimpleDataset,
build_dataset,
_read_info_list,
_write_info_list,
_random_string,
_DEFAULT_BLOCK_SIZE,
FileInfo,
)
from .serializer import RawSerializer
import random
import shutil
class FileInfo:
def __init__(
self,
file_name: str = "",
block_begin: int = 0,
block_end: int = 0,
nbytes: int = 0,
nlines: int = 0,
mask: bool = False,
block_size: int = _DEFAULT_BLOCK_SIZE,
) -> None:
self.file_name = file_name
self.block_begin = block_begin
self.block_end = block_end
self.nbytes = nbytes
self.nlines = nlines
self.mask = mask
self.block_size = block_size
def state_dict(self):
return {
"file_name": self.file_name,
"block_begin": self.block_begin,
"block_end": self.block_end,
"nbytes": self.nbytes,
"nlines": self.nlines,
"mask": self.mask,
"block_size": self.block_size,
}
def load_state_dict(self, d):
self.file_name = d["file_name"]
self.block_begin = d["block_begin"]
self.block_end = d["block_end"]
self.nbytes = d["nbytes"]
self.nlines = d["nlines"]
self.mask = d["mask"]
self.block_size = d["block_size"]
def dumps(self) -> str:
return json.dumps(self.state_dict())
def loads(self, data: str) -> "FileInfo":
self.load_state_dict(json.loads(data))
return self
def dump(self, fp: io.TextIOWrapper) -> "FileInfo":
fp.write(self.dumps())
return self
def load(self, fp: io.TextIOWrapper) -> "FileInfo":
self.loads(fp.read())
return self
def _read_info_list(meta_path: str) -> List[FileInfo]:
info: List[FileInfo] = []
while True:
try:
with open(meta_path, "r", encoding="utf-8") as f:
for line in f.readlines():
line = line.strip()
if len(line) > 0:
info.append(FileInfo().loads(line))
return info
except Exception as e:
print("Error: reading info list in _read_info_list!,meta_path={path}, err={err}".
format(path=meta_path, err=str(e)))
time.sleep(10)
def _write_info_list(meta_path: str, info: List[FileInfo]):
base_path = os.path.dirname(meta_path)
random_fname = os.path.join(base_path, ".meta.bin.%s" % _random_string())
while True:
try:
with open(random_fname, "w", encoding="utf-8") as f:
for v in info:
f.write(v.dumps() + "\n")
os.rename(random_fname, meta_path)
return
except Exception:
print("Error: writing info list!")
time.sleep(10)
The provided code snippet includes necessary dependencies for implementing the `compact_dataset` function. Write a Python function `def compact_dataset(path: str)` to solve the following problem:
Compact the dataset, removes blocks which the files were deleted. **Note** This may affect the existing dataset state dict. Args: path (str): path to dataset Example: >>> compact_dataset("/path/to/dataset")
Here is the function:
def compact_dataset(path: str):
"""Compact the dataset, removes blocks which the files were deleted.
**Note** This may affect the existing dataset state dict.
Args:
path (str): path to dataset
Example:
>>> compact_dataset("/path/to/dataset")
"""
meta_path = os.path.join(path, "meta.bin")
info: List[FileInfo] = []
if os.path.exists(meta_path):
info = _read_info_list(meta_path)
else:
raise ValueError("Dataset not exists")
nw_info: List[FileInfo] = []
curr_block = 0
for v in info:
if not os.path.exists(v.file_name):
# file is deleted
pass
else:
num_file_block = v.block_end - v.block_begin
nw_info.append(
FileInfo(
v.file_name,
curr_block,
curr_block + num_file_block,
v.nbytes,
v.nlines,
v.mask,
v.block_size,
)
)
curr_block += num_file_block
_write_info_list(meta_path, nw_info) | Compact the dataset, removes blocks which the files were deleted. **Note** This may affect the existing dataset state dict. Args: path (str): path to dataset Example: >>> compact_dataset("/path/to/dataset") |
155,251 | import os
import struct
from typing import List, Optional
from .distributed_dataset import (
SimpleDataset,
build_dataset,
_read_info_list,
_write_info_list,
_random_string,
_DEFAULT_BLOCK_SIZE,
FileInfo,
)
from .serializer import RawSerializer
import random
import shutil
class FileInfo:
def __init__(
self,
file_name: str = "",
block_begin: int = 0,
block_end: int = 0,
nbytes: int = 0,
nlines: int = 0,
mask: bool = False,
block_size: int = _DEFAULT_BLOCK_SIZE,
) -> None:
self.file_name = file_name
self.block_begin = block_begin
self.block_end = block_end
self.nbytes = nbytes
self.nlines = nlines
self.mask = mask
self.block_size = block_size
def state_dict(self):
return {
"file_name": self.file_name,
"block_begin": self.block_begin,
"block_end": self.block_end,
"nbytes": self.nbytes,
"nlines": self.nlines,
"mask": self.mask,
"block_size": self.block_size,
}
def load_state_dict(self, d):
self.file_name = d["file_name"]
self.block_begin = d["block_begin"]
self.block_end = d["block_end"]
self.nbytes = d["nbytes"]
self.nlines = d["nlines"]
self.mask = d["mask"]
self.block_size = d["block_size"]
def dumps(self) -> str:
return json.dumps(self.state_dict())
def loads(self, data: str) -> "FileInfo":
self.load_state_dict(json.loads(data))
return self
def dump(self, fp: io.TextIOWrapper) -> "FileInfo":
fp.write(self.dumps())
return self
def load(self, fp: io.TextIOWrapper) -> "FileInfo":
self.loads(fp.read())
return self
def _read_info_list(meta_path: str) -> List[FileInfo]:
info: List[FileInfo] = []
while True:
try:
with open(meta_path, "r", encoding="utf-8") as f:
for line in f.readlines():
line = line.strip()
if len(line) > 0:
info.append(FileInfo().loads(line))
return info
except Exception as e:
print("Error: reading info list in _read_info_list!,meta_path={path}, err={err}".
format(path=meta_path, err=str(e)))
time.sleep(10)
def _write_info_list(meta_path: str, info: List[FileInfo]):
base_path = os.path.dirname(meta_path)
random_fname = os.path.join(base_path, ".meta.bin.%s" % _random_string())
while True:
try:
with open(random_fname, "w", encoding="utf-8") as f:
for v in info:
f.write(v.dumps() + "\n")
os.rename(random_fname, meta_path)
return
except Exception:
print("Error: writing info list!")
time.sleep(10)
The provided code snippet includes necessary dependencies for implementing the `mask_dataset` function. Write a Python function `def mask_dataset(path: str, dbname: str, mask: bool = True)` to solve the following problem:
Mask one file in dataset. Blocks in masked datasets won't be read later. Args: path (str): path to dataset dbname (str): file name in this dataset which you want to mask mask (bool): True for mask, False for unmask Example: >>> mask_dataset("/path/to/dataset", "data_part_1", mask=True)
Here is the function:
def mask_dataset(path: str, dbname: str, mask: bool = True):
"""Mask one file in dataset. Blocks in masked datasets won't be read later.
Args:
path (str): path to dataset
dbname (str): file name in this dataset which you want to mask
mask (bool): True for mask, False for unmask
Example:
>>> mask_dataset("/path/to/dataset", "data_part_1", mask=True)
"""
meta_path = os.path.join(path, "meta.bin")
info: List[FileInfo] = []
if os.path.exists(meta_path):
info = _read_info_list(meta_path)
else:
raise ValueError("Dataset not exists")
for v in info:
if v.file_name == dbname:
v.mask = mask
_write_info_list(meta_path, info) | Mask one file in dataset. Blocks in masked datasets won't be read later. Args: path (str): path to dataset dbname (str): file name in this dataset which you want to mask mask (bool): True for mask, False for unmask Example: >>> mask_dataset("/path/to/dataset", "data_part_1", mask=True) |
155,252 | import os
import struct
from typing import List, Optional
from .distributed_dataset import (
SimpleDataset,
build_dataset,
_read_info_list,
_write_info_list,
_random_string,
_DEFAULT_BLOCK_SIZE,
FileInfo,
)
from .serializer import RawSerializer
import random
import shutil
class FileInfo:
def __init__(
self,
file_name: str = "",
block_begin: int = 0,
block_end: int = 0,
nbytes: int = 0,
nlines: int = 0,
mask: bool = False,
block_size: int = _DEFAULT_BLOCK_SIZE,
) -> None:
self.file_name = file_name
self.block_begin = block_begin
self.block_end = block_end
self.nbytes = nbytes
self.nlines = nlines
self.mask = mask
self.block_size = block_size
def state_dict(self):
return {
"file_name": self.file_name,
"block_begin": self.block_begin,
"block_end": self.block_end,
"nbytes": self.nbytes,
"nlines": self.nlines,
"mask": self.mask,
"block_size": self.block_size,
}
def load_state_dict(self, d):
self.file_name = d["file_name"]
self.block_begin = d["block_begin"]
self.block_end = d["block_end"]
self.nbytes = d["nbytes"]
self.nlines = d["nlines"]
self.mask = d["mask"]
self.block_size = d["block_size"]
def dumps(self) -> str:
return json.dumps(self.state_dict())
def loads(self, data: str) -> "FileInfo":
self.load_state_dict(json.loads(data))
return self
def dump(self, fp: io.TextIOWrapper) -> "FileInfo":
fp.write(self.dumps())
return self
def load(self, fp: io.TextIOWrapper) -> "FileInfo":
self.loads(fp.read())
return self
def _read_info_list(meta_path: str) -> List[FileInfo]:
info: List[FileInfo] = []
while True:
try:
with open(meta_path, "r", encoding="utf-8") as f:
for line in f.readlines():
line = line.strip()
if len(line) > 0:
info.append(FileInfo().loads(line))
return info
except Exception as e:
print("Error: reading info list in _read_info_list!,meta_path={path}, err={err}".
format(path=meta_path, err=str(e)))
time.sleep(10)
def _write_info_list(meta_path: str, info: List[FileInfo]):
base_path = os.path.dirname(meta_path)
random_fname = os.path.join(base_path, ".meta.bin.%s" % _random_string())
while True:
try:
with open(random_fname, "w", encoding="utf-8") as f:
for v in info:
f.write(v.dumps() + "\n")
os.rename(random_fname, meta_path)
return
except Exception:
print("Error: writing info list!")
time.sleep(10)
def merge_dataset(dst: str, src: str):
meta_path_src = os.path.join(src, "meta.bin")
meta_path_dst = os.path.join(dst, "meta.bin")
info_src: List[FileInfo] = []
if os.path.exists(meta_path_src):
info_src = _read_info_list(meta_path_src)
else:
raise ValueError("Dataset not exists")
info_dst: List[FileInfo] = []
if os.path.exists(meta_path_dst):
info_dst = _read_info_list(meta_path_dst)
else:
raise ValueError("Dataset not exists")
curr_block = 0
nw_info: List[FileInfo] = []
for v in info_dst:
num_file_block = v.block_end - v.block_begin
nw_info.append(
FileInfo(
v.file_name,
curr_block,
curr_block + num_file_block,
v.nbytes,
v.nlines,
v.mask,
v.block_size,
)
)
curr_block += num_file_block
for v in info_src:
num_file_block = v.block_end - v.block_begin
dst_db_name = os.path.join(dst, v.file_name)
nw_fname = v.file_name
if os.path.exists(dst_db_name):
idx = 0
while os.path.exists(dst_db_name + "_{}".format(idx)):
idx += 1
dst_db_name = dst_db_name + "_{}".format(idx)
nw_fname = nw_fname + "_{}".format(idx)
shutil.copy(os.path.join(src, v.file_name), dst_db_name)
nw_info.append(
FileInfo(
nw_fname,
curr_block,
curr_block + num_file_block,
v.nbytes,
v.nlines,
v.mask,
v.block_size,
)
)
curr_block += num_file_block
_write_info_list(meta_path_dst, nw_info) | null |
155,253 | import io
import os
import struct
from typing import List, Optional, Set
import torch
import bisect
import bmtrain as bmt
import json
from .serializer import Serializer, PickleSerializer
import random
import string
import time
def _filtered_range(
begin: int, end: int, rank: int, world_size: int, filter_set: Optional[Set[int]] = None
):
begin = begin + (rank + (world_size - (begin % world_size))) % world_size
if filter_set is not None:
return [i for i in range(begin, end, world_size) if i in filter_set]
else:
return [i for i in range(begin, end, world_size)] | null |
155,254 | import pkg_resources
import io
from typing import IO, Dict, List, Optional, Tuple
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(fp: IO[bytes]) -> Dict[str, int]` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(fp: IO[bytes]) -> Dict[str, int]:
"""Loads a vocabulary file into a dictionary."""
vocab: Dict[str, int] = {}
reader = io.TextIOWrapper(fp, encoding="utf-8")
for token in reader.readlines():
if token[-1] == "\n":
token = token[:-1]
if len(token) == 0:
continue
vocab[token] = len(vocab)
return vocab | Loads a vocabulary file into a dictionary. |
155,255 | import jieba
import pkg_resources
import io
from typing import IO
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(fp: IO[bytes])` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(fp: IO[bytes]):
"""Loads a vocabulary file into a dictionary."""
vocab = {}
reader = io.TextIOWrapper(fp, encoding="utf-8")
for token in reader.readlines():
token = token.strip()
if len(token) == 0:
continue
vocab[token] = len(vocab)
return vocab | Loads a vocabulary file into a dictionary. |
155,256 | from collections import OrderedDict
import multiprocessing
import os
from queue import Empty
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
from typing_extensions import TypedDict
from ...dataset import DistributedDataset
from ...tokenizers import CPMBeeTokenizer
from ...utils.config import load_dataset_config
import numpy as np
import time
from numpy.typing import NDArray
import torch
import bmtrain as bmt
import importlib.machinery
import importlib.util
import types
import random
CPMBeeInputType = Union[str, Dict[str, "CPMBeeInputType"]]
class _DictTree(TypedDict):
value: str
children: List["_DictTree"]
depth: int
segment_id: int
need_predict: bool
class _PrevExtTableStates(TypedDict):
ext_table: Dict[int, str]
token_id_table: Dict[str, Dict[int, int]]
def rel_to_bucket(n_up: int, n_down: int, max_depth: int = 8):
ret = n_up * max_depth + n_down
if ret == 0:
return ret
else:
# bucket 1 is reserved for incontext samples
return ret + 1
def convert_data_to_id(
tokenizer: CPMBeeTokenizer,
data: Any,
prev_ext_states: Optional[_PrevExtTableStates] = None,
shuffle_answer: bool = True,
max_depth: int = 8,
):
root: _DictTree = {
"value": "<root>",
"children": [],
"depth": 0,
"segment_id": 0,
"need_predict": False,
}
segments = [root]
def _build_dict_tree(data: CPMBeeInputType, depth: int, need_predict: bool) -> List[_DictTree]:
if isinstance(data, dict):
ret_list: List[_DictTree] = []
curr_items = list(data.items())
if need_predict and shuffle_answer:
access_idx = np.arange(len(curr_items))
np.random.shuffle(access_idx)
curr_items = [curr_items[idx] for idx in access_idx]
for k, v in curr_items:
child_info: _DictTree = {
"value": k,
"children": [],
"depth": depth,
"segment_id": len(segments),
"need_predict": False, # only leaves are contexts
}
segments.append(child_info)
child_info["children"] = _build_dict_tree(
v, depth + 1, need_predict or (depth == 1 and k == "<ans>")
) # elements in <root>.<ans>
ret_list.append(child_info)
return ret_list
else:
assert isinstance(data, str), "Invalid data {}".format(data)
ret: _DictTree = {
"value": data,
"children": [],
"depth": depth,
"segment_id": len(segments),
"need_predict": need_predict,
}
segments.append(ret)
return [ret]
root["children"] = _build_dict_tree(data, 1, False)
num_segments = len(segments)
segment_rel = np.zeros((num_segments * num_segments,), dtype=np.int32)
def _build_segment_rel(node: _DictTree) -> List[Tuple[int, int]]:
ret: List[Tuple[int, int]] = [(node["segment_id"], node["depth"])]
for child in node["children"]:
sub = _build_segment_rel(child)
for seg_id_1, depth_1 in sub:
for seg_id_2, depth_2 in ret:
n_up = min(depth_1 - node["depth"], max_depth - 1)
n_down = min(depth_2 - node["depth"], max_depth - 1)
segment_rel[seg_id_1 * num_segments + seg_id_2] = rel_to_bucket(
n_up, n_down, max_depth=max_depth
)
segment_rel[seg_id_2 * num_segments + seg_id_1] = rel_to_bucket(
n_down, n_up, max_depth=max_depth
)
ret.extend(sub)
return ret
_build_segment_rel(root)
input_ids: List[int] = []
input_id_subs: List[int] = []
segment_bound: List[Tuple[int, int]] = []
ext_table: Dict[int, str] = {}
token_id_table: Dict[str, Dict[int, int]] = {}
if prev_ext_states is not None:
ext_table = prev_ext_states["ext_table"]
token_id_table = prev_ext_states["token_id_table"]
for seg in segments:
tokens, ext_table = tokenizer.encode(seg["value"], ext_table)
token_id_subs = []
reid_token_ids = []
for idx in tokens:
if idx in ext_table:
# unk or special token
token = ext_table[idx]
if token.startswith("<") and token.endswith(">"):
# special token
if "_" in token:
token_name = token[1:-1].split("_", maxsplit=1)[0]
else:
token_name = token[1:-1]
token_name = "<{}>".format(token_name)
else:
token_name = "<unk>"
if token_name not in token_id_table:
token_id_table[token_name] = {}
if idx not in token_id_table[token_name]:
token_id_table[token_name][idx] = len(token_id_table[token_name])
if token_name not in tokenizer.encoder:
raise ValueError("Invalid token {}".format(token))
reid_token_ids.append(tokenizer.encoder[token_name])
token_id_subs.append(token_id_table[token_name][idx])
else:
reid_token_ids.append(idx)
token_id_subs.append(0)
tokens = [tokenizer.bos_id] + reid_token_ids
token_id_subs = [0] + token_id_subs
if not seg["need_predict"]:
tokens = tokens + [tokenizer.eos_id]
token_id_subs = token_id_subs + [0]
else:
# no eos
pass
begin = len(input_ids)
input_ids.extend(tokens)
input_id_subs.extend(token_id_subs)
end = len(input_ids)
segment_bound.append((begin, end))
ids = np.array(input_ids, dtype=np.int32)
id_subs = np.array(input_id_subs, dtype=np.int32)
segs = np.zeros((ids.shape[0],), dtype=np.int32)
context = np.zeros((ids.shape[0],), dtype=np.int8)
for i, (begin, end) in enumerate(segment_bound):
if not segments[i]["need_predict"]:
context[begin:end] = 1
segs[begin:end] = i
curr_ext_table_states: _PrevExtTableStates = {
"ext_table": ext_table,
"token_id_table": token_id_table,
}
return ids, id_subs, context, segs, segment_rel, num_segments, curr_ext_table_states | null |
155,257 | from collections import OrderedDict
import multiprocessing
import os
from queue import Empty
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
from typing_extensions import TypedDict
from ...dataset import DistributedDataset
from ...tokenizers import CPMBeeTokenizer
from ...utils.config import load_dataset_config
import numpy as np
import time
from numpy.typing import NDArray
import torch
import bmtrain as bmt
import importlib.machinery
import importlib.util
import types
import random
class _MixedDatasetConfig(TypedDict):
weight: float
path: str
transforms: Union[List[Dict[str, Any]], str]
task_name: str
dataset_name: str
incontext_weight: List[float]
lines: int
dataset: DistributedDataset
def _dataset_identity(c: _MixedDatasetConfig):
return "{}.{}".format(c["task_name"], c["dataset_name"])
class _MixedDatasetBatchPacker:
def __init__(
self,
batch_size: int,
max_length: int,
tokenizer: CPMBeeTokenizer,
max_depth: int = 16,
) -> None:
self._batch_size = batch_size
self._max_length = max_length
self._max_depth = max_depth
self.tokenizer = tokenizer
self._transform_func_table: Dict[str, _TransformFuncDict] = {}
self._inputs: List[NDArray[np.int32]] = []
self._inputs_sub: List[NDArray[np.int32]] = []
self._context: List[NDArray[np.int8]] = []
self._sample_ids: List[NDArray[np.int32]] = []
self._segments: List[NDArray[np.int32]] = []
self._num_segments: List[NDArray[np.int32]] = []
self._segment_rel_offset: List[NDArray[np.int32]] = []
self._segment_rel: List[NDArray[np.int32]] = []
self._spans: List[List[int]] = []
self._task_ids: List[List[str]] = []
self._raw_data: List[List[Any]] = []
def __len__(self):
return len(self._inputs)
def apply_transform(
self,
data: CPMBeeInputType,
transform: Union[Dict[str, Any], Callable[[CPMBeeInputType], CPMBeeInputType], None],
) -> CPMBeeInputType:
if transform is None:
return data
if not isinstance(transform, dict):
# transform function
return transform(data)
mapping_list: List[Tuple[str, str]] = []
def _walk_transform_dict(data: Union[Dict[str, Any], str], prefix: str = ""):
if isinstance(data, dict):
for k, v in data.items():
if len(prefix) > 0:
_walk_transform_dict(v, prefix + "." + k)
else:
_walk_transform_dict(v, k)
else:
assert isinstance(data, str), "Invalid transform {}".format(data)
mapping_list.append((prefix, data))
_walk_transform_dict(transform)
expanded_mapping_list: List[Tuple[str, Any]] = []
def _expand_mapping(
data: CPMBeeInputType, stars: List[str], path: List[str], target: List[str]
):
if len(path) == 0:
num_stars = 0
for it in target:
if it == "*":
num_stars += 1
if num_stars != len(stars):
raise ValueError("Invalid transform {}".format(".".join(target)))
nw_tgt = []
num_stars = 0
for it in target:
if it == "*":
nw_tgt.append(stars[num_stars])
num_stars += 1
else:
nw_tgt.append(it)
expanded_mapping_list.append((".".join(nw_tgt), data))
else:
if not isinstance(data, dict):
raise ValueError("Invalid data {}".format(data))
if path[0] == "*":
for k, v in data.items():
_expand_mapping(v, stars + [k], path[1:], target)
else:
_expand_mapping(data[path[0]], stars, path[1:], target)
# expand mapping list
for tgt, src in mapping_list:
if src.startswith("$"):
# copy from src
_expand_mapping(data, [], src[1:].split("."), tgt.split("."))
else:
if "*" in tgt:
raise ValueError("Constant value is not allowed to have `*` in prefix")
expanded_mapping_list.append((tgt, src))
ret = {}
for tgt, val in expanded_mapping_list:
tgt = tgt.split(".")
cur = ret
while len(tgt) > 1:
cur = cur[tgt[0]]
tgt = tgt[1:]
cur[tgt[0]] = val
return ret
def data_to_id(
self,
data: Any,
prev_ext_states: Optional[_PrevExtTableStates] = None,
shuffle_answer: bool = True,
):
return convert_data_to_id(
self.tokenizer, data, prev_ext_states, shuffle_answer, self._max_depth
)
def _ensure_transform_function(
self, module_name: str, transform_script_path: str
) -> _TransformFunction:
module_name = "cpm_live.transforms.{}".format(module_name)
if transform_script_path not in self._transform_func_table:
loader = importlib.machinery.SourceFileLoader(module_name, transform_script_path)
spec = importlib.util.spec_from_loader(loader.name, loader)
if spec is None:
raise RuntimeError("spec is none! {}".format(module_name))
mod = importlib.util.module_from_spec(spec)
self._transform_func_table[transform_script_path] = {
"loader": loader,
"module": mod,
"last_m": 0,
}
transform_script_info = self._transform_func_table[transform_script_path]
curr_m_time = float(
transform_script_info["loader"].path_stats(transform_script_path)["mtime"]
)
if curr_m_time > transform_script_info["last_m"]:
transform_script_info["last_m"] = curr_m_time
transform_script_info["loader"].exec_module(transform_script_info["module"])
transform_func = getattr(transform_script_info["module"], "transform", None)
if transform_func is None:
def _empty_transform_func(data: CPMBeeInputType, num_sample: int, r: random.Random):
raise NotImplementedError(
"Transform func for dataset {} not implemented".format(module_name)
)
return _empty_transform_func
else:
return transform_func
def build_instance(self, config: _MixedDatasetConfig):
_sample_weight = np.array(config["incontext_weight"], dtype=np.float32)
_sample_weight = _sample_weight / _sample_weight.sum()
num_incontext = np.random.choice(_sample_weight.shape[0], p=_sample_weight)
ds = config["dataset"]
transforms = config["transforms"]
if isinstance(transforms, str):
while True:
try:
if not os.path.exists(transforms):
raise RuntimeError(
"transform script file {} not exists".format(transforms)
)
# load transform script
transform_func = self._ensure_transform_function(
_dataset_identity(config), transforms
)
seed = random.random()
break
except Exception as e:
print(e)
time.sleep(10)
def _transform(data: CPMBeeInputType):
r = random.Random(seed)
return transform_func(data, num_incontext, r)
transform = _transform
elif len(transforms) == 0:
transform = None
else:
transform = transforms[np.random.choice(len(transforms))]
raw_data = {}
while True:
inp = ds.read()
inp = self.apply_transform(inp, transform)
(
input_ids,
input_id_subs,
context,
segment_ids,
segment_rel,
n_segments,
table_states,
) = self.data_to_id(inp)
if input_ids.shape[0] > self._max_length:
# too long
continue
input_ids = input_ids[: self._max_length]
context = context[: self._max_length]
segment_ids = segment_ids[: self._max_length]
raw_data["input"] = inp
raw_data["samples"] = []
break
sample_ids = np.zeros(input_ids.shape, dtype=np.int32)
segment_rel_offset = np.zeros(input_ids.shape, dtype=np.int32)
num_segments = np.full(input_ids.shape, n_segments, dtype=np.int32)
for i in range(num_incontext):
if input_ids.shape[0] >= self._max_length:
# early break
break
sample = ds.read()
sample = self.apply_transform(sample, transform)
(
sample_input_ids,
sample_id_subs,
_,
sample_segments,
sample_rel,
n_segments,
table_states,
) = self.data_to_id(sample, table_states)
if input_ids.shape[0] + sample_input_ids.shape[0] > self._max_length:
# too long, break
break
raw_data["samples"].append(sample)
input_ids = np.concatenate([input_ids, sample_input_ids], axis=0)
input_id_subs = np.concatenate([input_id_subs, sample_id_subs], axis=0)
context = np.concatenate(
[context, np.ones(sample_input_ids.shape, dtype=np.int8)], axis=0
)
segment_ids = np.concatenate([segment_ids, sample_segments], axis=0)
segment_rel_offset = np.concatenate(
[
segment_rel_offset,
np.full(sample_input_ids.shape, segment_rel.shape[0], dtype=np.int32),
],
axis=0,
)
segment_rel = np.concatenate([segment_rel, sample_rel], axis=0)
sample_ids = np.concatenate(
[sample_ids, np.full(sample_input_ids.shape, i + 1, dtype=np.int32)], axis=0
)
num_segments = np.concatenate(
[num_segments, np.full(sample_input_ids.shape, n_segments, dtype=np.int32)], axis=0
)
return (
input_ids,
input_id_subs,
context,
segment_ids,
segment_rel_offset,
segment_rel,
sample_ids,
num_segments,
raw_data,
)
def pack_batch(self, force: bool = False) -> CPMBeeBatch:
# pack batch
if len(self._inputs) < self._batch_size:
if not force:
raise RuntimeError("Batch insufficient")
batch_size = len(self._inputs)
else:
batch_size = self._batch_size
inputs = np.zeros((batch_size, self._max_length), dtype=np.int32)
inputs_sub = np.zeros((batch_size, self._max_length), dtype=np.int32)
context = np.zeros((batch_size, self._max_length), dtype=np.int8)
sample_ids = np.zeros((batch_size, self._max_length), dtype=np.int32)
segments = np.zeros((batch_size, self._max_length), dtype=np.int32)
num_segments = np.zeros((batch_size, self._max_length), dtype=np.int32)
segment_rel_offset = np.zeros((batch_size, self._max_length), dtype=np.int32)
tgt = np.full((batch_size, self._max_length), -100, dtype=np.int32)
max_rel = 0
for i in range(batch_size):
max_rel = max(max_rel, self._segment_rel[i].shape[0])
segment_rel = np.zeros((batch_size, max_rel), dtype=np.int32)
spans = np.zeros((batch_size, self._max_length), dtype=np.int32)
length = np.zeros((batch_size,), dtype=np.int32)
task_ids = np.zeros((batch_size, self._max_length), dtype=np.int32)
all_task_names: Set[str] = set()
for i in range(batch_size):
for task_name in self._task_ids[i]:
all_task_names.add(task_name)
task_names: List[str] = list(all_task_names)
task_name_to_id = {name: i for i, name in enumerate(task_names)}
batch_ext_table_map: Dict[Tuple[int, int], int] = {}
batch_ext_table_ids: List[int] = []
batch_ext_table_sub: List[int] = []
raw_data_list: List[Any] = []
for i in range(batch_size):
instance_length = self._inputs[i].shape[0]
rel_size = self._segment_rel[i].shape[0]
inputs[i, :instance_length] = self._inputs[i]
inputs_sub[i, :instance_length] = self._inputs_sub[i]
context[i, :instance_length] = self._context[i]
sample_ids[i, :instance_length] = self._sample_ids[i]
segments[i, :instance_length] = self._segments[i]
num_segments[i, :instance_length] = self._num_segments[i]
segment_rel_offset[i, :instance_length] = self._segment_rel_offset[i]
segment_rel[i, :rel_size] = self._segment_rel[i]
span_begin = 0
for span_id, (span_end, task_name) in enumerate(zip(self._spans[i], self._task_ids[i])):
spans[i, span_begin:span_end] = span_id
task_ids[i, span_begin:span_end] = task_name_to_id[task_name]
span_begin = span_end
length[i] = instance_length
raw_data_list.extend(self._raw_data[i])
for j in range(instance_length):
idx, idx_sub = self._inputs[i][j], self._inputs_sub[i][j]
tgt_idx = idx
if idx_sub > 0:
# need to be in ext table
if (idx, idx_sub) not in batch_ext_table_map:
batch_ext_table_map[(idx, idx_sub)] = len(batch_ext_table_map)
batch_ext_table_ids.append(idx)
batch_ext_table_sub.append(idx_sub)
tgt_idx = batch_ext_table_map[(idx, idx_sub)] + self.tokenizer.vocab_size
if j > 1 and context[i, j - 1] == 0:
if idx != self.tokenizer.bos_id:
tgt[i, j - 1] = tgt_idx
else:
tgt[i, j - 1] = self.tokenizer.eos_id
if context[i, instance_length - 1] == 0:
tgt[i, instance_length - 1] = self.tokenizer.eos_id
if len(batch_ext_table_map) == 0:
# placeholder
batch_ext_table_ids.append(0)
batch_ext_table_sub.append(1)
self._inputs = self._inputs[batch_size:]
self._inputs_sub = self._inputs_sub[batch_size:]
self._context = self._context[batch_size:]
self._sample_ids = self._sample_ids[batch_size:]
self._segments = self._segments[batch_size:]
self._num_segments = self._num_segments[batch_size:]
self._segment_rel_offset = self._segment_rel_offset[batch_size:]
self._segment_rel = self._segment_rel[batch_size:]
self._spans = self._spans[batch_size:]
self._task_ids = self._task_ids[batch_size:]
self._raw_data = self._raw_data[batch_size:]
return {
"inputs": inputs,
"inputs_sub": inputs_sub,
"length": length,
"context": context > 0,
"sample_ids": sample_ids,
"num_segments": num_segments,
"segment_ids": segments,
"segment_rel_offset": segment_rel_offset,
"segment_rel": segment_rel,
"spans": spans,
"target": tgt,
"ext_ids": np.array(batch_ext_table_ids, dtype=np.int32),
"ext_sub": np.array(batch_ext_table_sub, dtype=np.int32),
"task_ids": task_ids,
"task_names": task_names,
"raw_data": raw_data_list,
}
def add_data(self, config: _MixedDatasetConfig) -> Optional[CPMBeeBatch]:
(
input_ids,
input_id_subs,
context,
segment_ids,
segment_rel_offset,
segment_rel,
sample_ids,
num_segments,
raw_data,
) = self.build_instance(config)
# add to batch
best_fit: Union[None, int] = None
best_fit_space: Union[None, int] = None
for i in range(len(self._inputs)):
space = self._max_length - self._inputs[i].shape[0]
if input_ids.shape[0] <= space:
if best_fit_space is None:
best_fit = i
best_fit_space = space
elif best_fit_space > space:
best_fit = i
best_fit_space = space
if best_fit is None:
# add a new instance
self._inputs.append(input_ids)
self._inputs_sub.append(input_id_subs)
self._context.append(context)
self._sample_ids.append(sample_ids)
self._segments.append(segment_ids)
self._num_segments.append(num_segments)
self._segment_rel_offset.append(segment_rel_offset)
self._segment_rel.append(segment_rel)
self._spans.append([input_ids.shape[0]])
self._task_ids.append([config["task_name"]])
self._raw_data.append([raw_data])
else:
# add to existing instance
self._inputs[best_fit] = np.concatenate([self._inputs[best_fit], input_ids], axis=0)
self._inputs_sub[best_fit] = np.concatenate(
[self._inputs_sub[best_fit], input_id_subs], axis=0
)
self._context[best_fit] = np.concatenate([self._context[best_fit], context], axis=0)
self._sample_ids[best_fit] = np.concatenate(
[self._sample_ids[best_fit], sample_ids], axis=0
)
self._segments[best_fit] = np.concatenate(
[self._segments[best_fit], segment_ids], axis=0
)
self._num_segments[best_fit] = np.concatenate(
[self._num_segments[best_fit], num_segments], axis=0
)
self._segment_rel_offset[best_fit] = np.concatenate(
[
self._segment_rel_offset[best_fit],
segment_rel_offset + self._segment_rel[best_fit].shape[0],
],
axis=0,
)
self._segment_rel[best_fit] = np.concatenate(
[self._segment_rel[best_fit], segment_rel], axis=0
)
self._spans[best_fit].append(self._inputs[best_fit].shape[0])
self._task_ids[best_fit].append(config["task_name"])
self._raw_data[best_fit].append(raw_data)
if len(self._inputs) > self._batch_size:
return self.pack_batch()
else:
# not ready
return None
class _MixedDatasetConfigMananger:
def __init__(self, config_path: str) -> None:
self._config_path: str = config_path
self._config: Union[List[_MixedDatasetConfig], None] = None
self._last_m = 0
def changed(self):
while True:
try:
m_time = os.stat(self._config_path).st_mtime
if m_time > self._last_m:
# try to load new config
try:
self._config = load_dataset_config(self._config_path)
except Exception as e:
# failed to load config
print(
"Error: load new config in changed, "
"self._config_path={path}, err={err}"
.format(path=self._config_path, err=str(e))
)
return False
# new config loaded
self._last_m = m_time
return True
return False
except Exception as e:
print("Error: reading info list in _MixedDatasetConfigMananger.changed!, "
"self._config_path={path}, err={err}"
.format(path=self._config_path, err=str(e)))
time.sleep(30)
def get_config(self) -> List[_MixedDatasetConfig]:
if self._config is None:
if not self.changed():
raise RuntimeError("Failed to load config")
if self._config is None:
raise RuntimeError("Failed to load config")
return self._config
def _mixed_dataset_process(
config_path: str,
q_cmd: multiprocessing.Queue,
q_cmd_out: multiprocessing.Queue,
q_data: multiprocessing.Queue,
rank: int,
world_size: int,
packer: _MixedDatasetBatchPacker,
):
# ignore SIGINT
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
config_base_path = os.path.dirname(os.path.abspath(config_path))
def _convert_to_abs_path(transform_path: str):
if transform_path.startswith("/"):
return transform_path
else:
return os.path.join(config_base_path, transform_path)
def _build_sample_weights(config: List[_MixedDatasetConfig]):
if len(config) == 0:
return np.array([], dtype=np.float32)
weights = [c["weight"] * c["lines"] for c in config]
weights = np.array(weights, dtype=np.float32)
sm_weight = weights.sum()
if sm_weight > 0:
weights = weights / sm_weight
return weights
else:
raise RuntimeError("Empty datasets")
cfg_mgr = _MixedDatasetConfigMananger(config_path)
config = cfg_mgr.get_config()
for c in config:
ds = DistributedDataset(
_convert_to_abs_path(c["path"]),
rank,
world_size,
)
c["lines"] = ds._nlines
c["dataset"] = ds
if "weight" not in c:
c["weight"] = 1.0
if "transforms" not in c:
c["transforms"] = []
elif isinstance(c["transforms"], str):
c["transforms"] = _convert_to_abs_path(c["transforms"])
if "incontext_weight" not in c:
c["incontext_weight"] = [1.0]
weights = _build_sample_weights(config)
should_stop = False
should_start = False
while not should_stop:
# update config first
if cfg_mgr.changed():
path_ds_map: Dict[str, _MixedDatasetConfig] = {}
nw_path_set: Set[str] = set()
# load new config
nw_config = cfg_mgr.get_config()
# build path -> dataset map
for c in config:
path_ds_map[_dataset_identity(c)] = c
# add new datasets
for c in nw_config:
if _dataset_identity(c) in path_ds_map:
# update values only
if "weight" in c:
path_ds_map[_dataset_identity(c)]["weight"] = c["weight"]
if "transform" in c:
if isinstance(c["transforms"], str):
path_ds_map[_dataset_identity(c)]["transforms"] = _convert_to_abs_path(
c["transforms"]
)
else:
path_ds_map[_dataset_identity(c)]["transforms"] = c["transforms"]
if "incontext_weight" in c:
path_ds_map[_dataset_identity(c)]["incontext_weight"] = c[
"incontext_weight"
]
else:
# new dataset
ds = DistributedDataset(
_convert_to_abs_path(c["path"]),
rank,
world_size,
)
c["lines"] = ds._nlines
c["dataset"] = ds
if "weight" not in c:
c["weight"] = 1.0
if "transforms" not in c:
c["transforms"] = []
elif isinstance(c["transforms"], str):
c["transforms"] = _convert_to_abs_path(c["transforms"])
if "incontext_weight" not in c:
c["incontext_weight"] = [1.0]
path_ds_map[_dataset_identity(c)] = c
nw_path_set.add(_dataset_identity(c))
# remove unused datasets
for c in config:
if _dataset_identity(c) not in nw_path_set:
del path_ds_map[_dataset_identity(c)]
config: List[_MixedDatasetConfig] = []
for c in nw_config:
config.append(path_ds_map[_dataset_identity(c)])
del path_ds_map
del nw_path_set
del nw_config
weights = _build_sample_weights(config)
# get cmds
while True:
try:
cmd = q_cmd.get_nowait()
except Empty:
break
if cmd == "stop":
should_stop = True
q_cmd_out.put(True)
break
elif cmd == "state_dict":
ret = OrderedDict()
for c in config:
ds_name = _dataset_identity(c)
ret[ds_name] = c["dataset"]._state_dict()
q_cmd_out.put(ret)
elif cmd == "load_state_dict":
state_dict = q_cmd.get()
missing = []
for c in config:
ds_name = _dataset_identity(c)
if ds_name in state_dict:
c["dataset"].load_state_dict(state_dict[ds_name], strict=False)
else:
# new dataset
missing.append(ds_name)
q_cmd_out.put(missing)
elif cmd == "start":
should_start = True
q_cmd_out.put(True)
else:
raise RuntimeError("Unknown command: {}".format(cmd))
if should_stop:
break
if not should_start:
# wait for start cmd
time.sleep(1)
continue
if len(config) == 0:
# no dataset available
time.sleep(1)
continue
if q_data.full():
# queue full
time.sleep(1)
continue
# sample a dataset
ds_id: int = 0
while True:
ds_id = np.random.choice(weights.shape[0], p=weights)
if config[ds_id]["dataset"]._nlines != config[ds_id]["lines"]:
# dataset size changed
for c in config:
c["lines"] = c["dataset"]._nlines
weights = _build_sample_weights(config)
continue
else:
break
batch = packer.add_data(config[ds_id])
if batch is not None:
# new batch comming
q_data.put(batch)
# clean queue
while True:
try:
q_data.get_nowait()
except Empty:
break | null |
155,258 | import os
import time
import functools
import torch
import bmtrain as bmt
import json
from cpm_live.models import CPMBee
from .log import logger
from typing import List, Optional
def rename_if_exists(file_path):
def rename_if_exists_decorator(func):
@functools.wraps(func)
def wrapper(file_path, *args, **kwargs):
rename_if_exists(file_path)
return func(file_path, *args, **kwargs)
return wrapper | null |
155,259 | import os
import time
import functools
import torch
import bmtrain as bmt
import json
from cpm_live.models import CPMBee
from .log import logger
from typing import List, Optional
def bmt_save(file_path: str, model: CPMBee, export_files: Optional[List[str]] = None):
bmt.save(model, file_path)
if export_files is not None:
export_files.append(file_path) | null |
155,260 | import os
import time
import functools
import torch
import bmtrain as bmt
import json
from cpm_live.models import CPMBee
from .log import logger
from typing import List, Optional
def torch_save(file_path: str, obj: object, export_files: Optional[List[str]] = None):
torch.save(obj, file_path)
if export_files is not None:
export_files.append(file_path) | null |
155,261 | import os
import time
import functools
import torch
import bmtrain as bmt
import json
from cpm_live.models import CPMBee
from .log import logger
from typing import List, Optional
def json_save(file_path: str, obj: object, export_files: Optional[List[str]] = None):
with open(file_path, "w") as data_f:
json.dump(obj, data_f)
if export_files is not None:
export_files.append(file_path) | null |
155,262 | import torch
def pad(orig_items, key, padding_value=0, padding_side="left"):
items = []
if isinstance(orig_items[0][key], list):
assert isinstance(orig_items[0][key][0], torch.Tensor)
for it in orig_items:
for tr in it[key]:
items.append({key: tr})
else:
assert isinstance(orig_items[0][key], torch.Tensor)
items = orig_items
batch_size = len(items)
shape = items[0][key].shape
dim = len(shape)
assert dim <= 3
max_length = max(item[key].shape[-1] for item in items)
min_length = min(item[key].shape[-1] for item in items)
dtype = items[0][key].dtype
if dim == 1:
return torch.cat([item[key] for item in items], dim=0)
elif dim == 2:
if max_length == min_length:
return torch.cat([item[key] for item in items], dim=0)
tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value
else:
tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value
for i, item in enumerate(items):
if dim == 2:
if padding_side == "left":
tensor[i, -len(item[key][0]) :] = item[key][0].clone()
else:
tensor[i, : len(item[key][0])] = item[key][0].clone()
elif dim == 3:
if padding_side == "left":
tensor[i, -len(item[key][0]) :, :] = item[key][0].clone()
else:
tensor[i, : len(item[key][0]), :] = item[key][0].clone()
return tensor | null |
155,263 | import bmtrain as bmt
import pickle
import torch
def allgather_objects(obj):
if bmt.world_size() == 1:
return [obj]
with torch.no_grad():
data_bytes: bytes = pickle.dumps(obj)
data_length: int = len(data_bytes)
gpu_data_length = torch.tensor([data_length], device="cuda", dtype=torch.long)
gathered_length = bmt.distributed.all_gather(gpu_data_length).view(-1).cpu()
max_data_length = gathered_length.max().item()
gpu_data_bytes = torch.zeros(max_data_length, dtype=torch.uint8, device="cuda")
byte_storage = torch.ByteStorage.from_buffer(data_bytes)
gpu_data_bytes[:data_length] = torch.ByteTensor(byte_storage)
gathered_data = bmt.distributed.all_gather(gpu_data_bytes).cpu()
ret = []
for i in range(gathered_data.size(0)):
data_bytes = gathered_data[i, : gathered_length[i].item()].numpy().tobytes()
ret.append(pickle.loads(data_bytes))
return ret | null |
155,264 | import json
import os
import copy
from typing import Any, Dict, Union
from .log import logger
logger = _get_logger()
def load_dataset_config(dataset_path: str):
cfg = json.load(open(dataset_path, "r", encoding="utf-8"))
platform_config_path = os.getenv("PLATFORM_CONFIG_PATH")
if platform_config_path is None:
logger.info(
"no platform_config_path. Directly load dataset_path({dataset_path})"
.format(dataset_path=dataset_path)
)
return cfg
path_dict = json.load(open(platform_config_path, "r", encoding="utf-8"))["dataset_map"]
logger.info(
"load dataset_path({dataset_path}) with platform_config_path({platform_config_path})"
.format(dataset_path=dataset_path, platform_config_path=platform_config_path)
)
for dataset in cfg:
dataset["path"] = os.path.join(path_dict[dataset["dataset_name"]], dataset["path"])
dataset["transforms"] = os.path.join(
path_dict[dataset["dataset_name"]], dataset["transforms"]
)
return cfg | null |
155,265 | import torch
class OpGradientShrink(torch.autograd.Function):
def forward(ctx, x: torch.Tensor, alpha: float):
ctx.alpha = alpha
return x
def backward(ctx, grad_output):
return grad_output * ctx.alpha, None
def gradient_shrink(x: torch.Tensor, alpha: float = 0.1):
return OpGradientShrink.apply(x, alpha) | null |
155,266 | import os
import sys
from typing import Any, Dict, Optional, Tuple, Union
import datetime
import json
import logging
import bmtrain as bmt
def _get_logger():
log = logging.getLogger('__name__')
log.setLevel(logging.INFO)
console_handle = logging.StreamHandler(sys.stdout)
node_name = os.getenv("NODE_NAME", str(bmt.rank()))
console_handle.setFormatter(
logging.Formatter(
'[%(levelname)s][%(asctime)s][{}][%(filename)s:%(lineno)d:%(process)d] - %(message)s'
.format(node_name),
datefmt='%Y-%m-%d %H:%M:%S'
)
)
log.addHandler(console_handle)
return log | null |
155,267 | import os
import sys
import argparse
import json
import re
from tqdm import tqdm
from cpm_live.dataset import build_dataset
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--input-path", type=str, help="raw data path", required=True)
parser.add_argument("--output-path", type=str, help="output dataset path", required=True)
parser.add_argument("--output-name", type=str, help="output dataset name", required=True)
parser.add_argument("--data-type", type=str, help="raw data type can be 'json' or 'txt'", required=True)
parser.add_argument("--min-length", type=int, default=100, help="the min length of a final example")
parser.add_argument("--max-length", type=int, default=2000, help="the max length of a final example")
parser.add_argument("--max-depth", type=int, default=1000, help="the max recursion depth of segmenting data")
args = parser.parse_args()
return args | null |
155,268 | import os
import sys
import argparse
import json
import re
from tqdm import tqdm
from cpm_live.dataset import build_dataset
def split_sent(data_, depth, args, seg):
if len(data_) < args.min_length:
return []
if len(data_) > args.max_length and depth < args.max_length:
if '\n' not in data_.strip():
return [{"text":data_}]
mid = int(len(data_)/2)
while mid > 0 and (data_[mid - 1] not in seg):
mid -= 1
if mid == 0:
mid = int(len(data_)/2)
while mid > 0 and (data_[mid - 1] not in seg):
mid += 1
ret = []
ret.extend(split_sent(data_[:mid], depth + 1, args, seg))
ret.extend(split_sent(data_[mid:], depth + 1, args, seg))
return ret
else:
return [{"text": data_}] | null |
155,269 | import os
import sys
import argparse
import json
import re
from tqdm import tqdm
from cpm_live.dataset import build_dataset
def pre_process(line):
line = line.strip().replace("<n>", "\n")
line = line.strip().replace("\\r\\n", "\n")
line = line.strip().replace("\\r", "\n")
line = line.strip().replace("\\n", "\n")
line = re.sub('\n\s+\n', '\n\n', line.strip())
return line | null |
155,270 | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from rest_framework import generics, viewsets
from rest_framework.decorators import APIView, action
from rest_framework.response import Response
from ai_model_manager.models import AIModel, AIModelSetting, Prompt
from .serializers import (AIModelSerializer, AIModelSettingSerializer,
PromptSerializer)
class AIModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
path = models.FilePathField(path="models")
name = models.CharField(max_length=255)
source = models.URLField(blank=True, null=True)
author = models.CharField(max_length=255, blank=True, null=True)
model_format = models.ForeignKey(
"AIModelFormat", on_delete=models.CASCADE, blank=True, null=True
)
settings = models.ForeignKey(
"AIModelSetting", on_delete=models.CASCADE, blank=True, null=True
)
prompt = models.ForeignKey(
"Prompt", on_delete=models.CASCADE, blank=True, null=True
)
version = models.CharField(max_length=255, blank=True, null=True)
is_configured = models.BooleanField(default=True)
is_broken = models.BooleanField(default=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.settings:
self.settings = AIModelSetting.objects.create()
self.settings.save()
if not self.prompt:
self.prompt = Prompt.objects.create()
self.prompt.save()
def list_all():
return AIModel.objects.all()
def model_size(self):
try:
size = str(os.path.getsize(self.path) / (1024 * 1024 * 1024))
except FileNotFoundError:
self.is_configured = False
self.save()
size = "-1.0"
return size[: size.find(".") + 3] + " GB"
def add_models_from_dir(dir_path):
model_formats = AIModelFormat.objects.values_list("extension", flat=True)
model_formats = list(model_formats)
new_models = []
for root, dirs, files in os.walk(dir_path):
for file in files:
file_path = os.path.join(root, file)
file_ext = os.path.splitext(file)[1]
if file_ext in model_formats:
file_name = os.path.splitext(file)[0]
model_format = AIModelFormat.objects.get(extension=file_ext)
if not AIModel.objects.filter(path=file_path).exists():
obj = AIModel.objects.create(
path=file_path,
name=file_name,
model_format=model_format,
)
new_models.append(obj)
_ = [obj.save() for obj in new_models]
return [obj.path for obj in new_models]
def ui_header(self):
"""Header with model information
Returns:
UI with model information
"""
head_bar = easy_content_expander(
vexpand=False,
# bgcolor="#445566",
bgcolor="#20354a",
content=ft.Row(
alignment=ft.MainAxisAlignment.SPACE_BETWEEN,
controls=[
ft.Column(
alignment=ft.MainAxisAlignment.START,
horizontal_alignment=ft.CrossAxisAlignment.START,
controls=[
ft.TextField(
# expand=True,
width=700,
value=self.name,
content_padding=0,
text_size=30,
border_color=ft.colors.TRANSPARENT,
on_change=lambda x: save_helper(
self, "name", x.control.value
),
),
ft.Text(
f"Path: {self.path if self.path else 'Unknown' }"
),
ft.Text(f"Size: {self.model_size} "),
ft.Text(
f"Source: {self.source if self.source else 'Unknown' }"
),
ft.Text(
f"Publisher: {self.author if self.author else 'Unknown' }"
),
],
),
ft.Column(
alignment=ft.MainAxisAlignment.CENTER,
horizontal_alignment=ft.CrossAxisAlignment.CENTER,
controls=[
ft.Icon(
name=ft.icons.CHECK_CIRCLE
if self.is_configured
else ft.icons.WARNING_SHARP,
color=ft.colors.GREEN_400
if self.is_configured
else ft.colors.YELLOW_400,
size=80,
),
ft.Text("Ready" if self.is_configured else "Not Ready"),
],
),
],
),
)
return head_bar
def ui_list_repr(self, refresh_call, model_settings):
list_unit = ft.Container(
bgcolor="#232343",
content=ft.Row(
controls=[
ft.Container(
expand=True,
content=ft.Row(
controls=[
ft.Container(
content=ft.Icon(
name=ft.icons.CHECK_CIRCLE
if self.is_configured
else ft.icons.WARNING_SHARP,
color=ft.colors.GREEN_400
if self.is_configured
else ft.colors.YELLOW_400,
),
),
ft.Container(
expand=True,
content=ft.Text(self.name),
),
ft.Container(
expand=True,
alignment=ft.alignment.center,
content=ft.Text(self.model_size),
),
ft.Container(
expand=True,
alignment=ft.alignment.center,
content=ft.Text(self.model_format),
),
]
),
),
ft.Container(
# bgcolor=get_random_color(),
content=ft.Row(
controls=[
# ft.IconButton(icon=ft.icons.EDIT, icon_color="blue"),
ft.IconButton(
icon=ft.icons.DELETE,
icon_color=ft.colors.RED_900,
on_click=lambda x: [self.delete(), refresh_call()],
),
ft.IconButton(
icon=ft.icons.SETTINGS,
icon_color=ft.colors.ORANGE_400,
on_click=model_settings(self),
),
]
),
border_radius=20,
),
]
),
margin=0,
padding=20,
)
return list_unit
def add_models_from_dir(request):
dir_path = request.query_params.get("dir_path")
res = AIModel.add_models_from_dir(dir_path)
return Response(res) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.