id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
160,663 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
def overlay_image(
image: Union[str, Image.Image],
overlay: Union[str, Image.Image],
output_path: Optional[str] = None,
opacity: float = 1.0,
overlay_size: float = 1.0,
x_pos: float = 0.4,
y_pos: float = 0.4,
max_visible_opacity: float = 0.75,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlays an image onto another image at position (width * x_pos, height * y_pos)
to be augmented
that will be overlaid
If None, the resulting PIL Image will still be returned
of the original image
maximum opacity value through which the src image will still be considered
visible; see the function `overlay_image_bboxes_helper` in `utils/bboxes.py` for
more details about how this is used. If bboxes are not passed in this is not used
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
provided, this list will be modified in place such that each bounding box is
transformed according to this function
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
"""
assert 0.0 <= opacity <= 1.0, "Opacity must be a value in the range [0, 1]"
assert 0.0 <= overlay_size <= 1.0, "Image size must be a value in the range [0, 1]"
assert 0.0 <= x_pos <= 1.0, "x_pos must be a value in the range [0, 1]"
assert 0.0 <= y_pos <= 1.0, "y_pos must be a value in the range [0, 1]"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
overlay = imutils.validate_and_load_image(overlay)
im_width, im_height = image.size
overlay_width, overlay_height = overlay.size
new_height = max(1, int(im_height * overlay_size))
new_width = int(overlay_width * new_height / overlay_height)
overlay = overlay.resize((new_width, new_height))
try:
mask = overlay.convert("RGBA").getchannel("A")
mask = Image.fromarray((np.array(mask) * opacity).astype(np.uint8))
except ValueError:
mask = Image.new(mode="L", size=overlay.size, color=int(opacity * 255))
x = int(im_width * x_pos)
y = int(im_height * y_pos)
aug_image = image.convert(mode="RGBA")
aug_image.paste(im=overlay, box=(x, y), mask=mask)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_image",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def resize(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
width: Optional[int] = None,
height: Optional[int] = None,
resample: Any = Image.BILINEAR,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Resizes an image
to be augmented
If None, the resulting PIL Image will still be returned
None, the original image width will be used
None, the original image height will be used
PIL.Image.BOX, PIL.Image.BILINEAR, PIL.Image.HAMMING, PIL.Image.BICUBIC, or
PIL.Image.LANCZOS
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
provided, this list will be modified in place such that each bounding box is
transformed according to this function
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
"""
assert width is None or type(width) == int, "Width must be an integer"
assert height is None or type(height) == int, "Height must be an integer"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
im_w, im_h = image.size
aug_image = image.resize((width or im_w, height or im_h), resample)
imutils.get_metadata(
metadata=metadata,
function_name="resize",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
The provided code snippet includes necessary dependencies for implementing the `overlay_onto_background_image` function. Write a Python function `def overlay_onto_background_image( image: Union[str, Image.Image], background_image: Union[str, Image.Image], output_path: Optional[str] = None, opacity: float = 1.0, overlay_size: float = 1.0, x_pos: float = 0.4, y_pos: float = 0.4, scale_bg: bool = False, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Overlays the image onto a given background image at position (width * x_pos, height * y_pos) @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param background_image: the path to an image or a variable of type PIL.Image.Image onto which the source image will be overlaid @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param opacity: the lower the opacity, the more transparent the overlaid image @param overlay_size: size of the overlaid image is overlay_size * height of the background image @param x_pos: position of overlaid image relative to the background image width with respect to the x-axis @param y_pos: position of overlaid image relative to the background image height with respect to the y-axis @param scale_bg: if True, the background image will be scaled up or down so that overlay_size is respected; if False, the source image will be scaled instead @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def overlay_onto_background_image(
image: Union[str, Image.Image],
background_image: Union[str, Image.Image],
output_path: Optional[str] = None,
opacity: float = 1.0,
overlay_size: float = 1.0,
x_pos: float = 0.4,
y_pos: float = 0.4,
scale_bg: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlays the image onto a given background image at position
(width * x_pos, height * y_pos)
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param background_image: the path to an image or a variable of type PIL.Image.Image
onto which the source image will be overlaid
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param opacity: the lower the opacity, the more transparent the overlaid image
@param overlay_size: size of the overlaid image is overlay_size * height
of the background image
@param x_pos: position of overlaid image relative to the background image width with
respect to the x-axis
@param y_pos: position of overlaid image relative to the background image height with
respect to the y-axis
@param scale_bg: if True, the background image will be scaled up or down so that
overlay_size is respected; if False, the source image will be scaled instead
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert 0.0 <= overlay_size <= 1.0, "Image size must be a value in the range [0, 1]"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
if scale_bg:
background_image = resize(
background_image,
width=math.floor(image.width / overlay_size),
height=math.floor(image.height / overlay_size),
)
aug_image = overlay_image(
background_image,
overlay=image,
output_path=output_path,
opacity=opacity,
overlay_size=overlay_size,
x_pos=x_pos,
y_pos=y_pos,
)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_onto_background_image",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Overlays the image onto a given background image at position (width * x_pos, height * y_pos) @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param background_image: the path to an image or a variable of type PIL.Image.Image onto which the source image will be overlaid @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param opacity: the lower the opacity, the more transparent the overlaid image @param overlay_size: size of the overlaid image is overlay_size * height of the background image @param x_pos: position of overlaid image relative to the background image width with respect to the x-axis @param y_pos: position of overlaid image relative to the background image height with respect to the y-axis @param scale_bg: if True, the background image will be scaled up or down so that overlay_size is respected; if False, the source image will be scaled instead @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,664 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
def crop(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
x1: float = 0.25,
y1: float = 0.25,
x2: float = 0.75,
y2: float = 0.75,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Crops the image
to be augmented
If None, the resulting PIL Image will still be returned
the original image; must be a float value between 0 and 1
the original image; must be a float value between 0 and 1
the original image; must be a float value between 0 and 1
the original image; must be a float value between 0 and 1
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
provided, this list will be modified in place such that each bounding box is
transformed according to this function
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
"""
assert 0 <= x1 <= 1.0, "x1 must be a value in the range [0, 1]"
assert 0 <= y1 <= 1.0, "y1 must be a value in the range [0, 1]"
assert x1 < x2 <= 1.0, "x2 must be a value in the range [x1, 1]"
assert y1 < y2 <= 1.0, "y2 must be a value in the range [y1, 1]"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
left, right = int(width * x1), int(width * x2)
top, bottom = int(height * y1), int(height * y2)
aug_image = image.crop((left, top, right, bottom))
imutils.get_metadata(
metadata=metadata,
function_name="crop",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
def scale(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 0.5,
interpolation: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the resolution of an image
to be augmented
If None, the resulting PIL Image will still be returned
PIL.Image.BOX, PIL.Image.BILINEAR, PIL.Image.HAMMING, PIL.Image.BICUBIC or
PIL.Image.LANCZOS
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
provided, this list will be modified in place such that each bounding box is
transformed according to this function
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
"""
assert factor > 0, "Expected 'factor' to be a positive number"
assert interpolation in [
Image.NEAREST,
Image.BOX,
Image.BILINEAR,
Image.HAMMING,
Image.BICUBIC,
Image.LANCZOS,
None,
], "Invalid interpolation specified"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
if interpolation is None:
interpolation = Image.LANCZOS if factor < 1 else Image.BILINEAR
width, height = image.size
scaled_width = int(width * factor)
scaled_height = int(height * factor)
# pyre-fixme[6]: Expected `Union[typing_extensions.Literal[0],
# typing_extensions.Literal[1], typing_extensions.Literal[2],
# typing_extensions.Literal[3], typing_extensions.Literal[4],
# typing_extensions.Literal[5], None]` for 2nd param but got `int`.
aug_image = image.resize((scaled_width, scaled_height), resample=interpolation)
imutils.get_metadata(
metadata=metadata,
function_name="scale",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
The provided code snippet includes necessary dependencies for implementing the `overlay_onto_screenshot` function. Write a Python function `def overlay_onto_screenshot( image: Union[str, Image.Image], output_path: Optional[str] = None, template_filepath: str = utils.TEMPLATE_PATH, template_bboxes_filepath: str = utils.BBOXES_PATH, max_image_size_pixels: Optional[int] = None, crop_src_to_fit: bool = False, resize_src_to_match_template: bool = True, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Overlay the image onto a screenshot template so it looks like it was screenshotted on Instagram @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param template_filepath: iopath uri to the screenshot template @param template_bboxes_filepath: iopath uri to the file containing the bounding box for each template @param max_image_size_pixels: if provided, the template image and/or src image will be scaled down to avoid an output image with an area greater than this size (in pixels) @param crop_src_to_fit: if True, the src image will be cropped if necessary to fit into the template image if the aspect ratios are different. If False, the src image will instead be resized if needed @param resize_src_to_match_template: if True, the src image will be resized if it is too big or small in both dimensions to better match the template image. If False, the template image will be resized to match the src image instead. It can be useful to set this to True if the src image is very large so that the augmented image isn't huge, but instead is the same size as the template image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def overlay_onto_screenshot(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
template_filepath: str = utils.TEMPLATE_PATH,
template_bboxes_filepath: str = utils.BBOXES_PATH,
max_image_size_pixels: Optional[int] = None,
crop_src_to_fit: bool = False,
resize_src_to_match_template: bool = True,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay the image onto a screenshot template so it looks like it was
screenshotted on Instagram
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param template_filepath: iopath uri to the screenshot template
@param template_bboxes_filepath: iopath uri to the file containing the
bounding box for each template
@param max_image_size_pixels: if provided, the template image and/or src image
will be scaled down to avoid an output image with an area greater than this
size (in pixels)
@param crop_src_to_fit: if True, the src image will be cropped if necessary to fit
into the template image if the aspect ratios are different. If False, the src
image will instead be resized if needed
@param resize_src_to_match_template: if True, the src image will be resized if it is
too big or small in both dimensions to better match the template image. If False,
the template image will be resized to match the src image instead. It can be
useful to set this to True if the src image is very large so that the augmented
image isn't huge, but instead is the same size as the template image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
template, bbox = imutils.get_template_and_bbox(
template_filepath, template_bboxes_filepath
)
if resize_src_to_match_template:
bbox_w, bbox_h = bbox[2] - bbox[0], bbox[3] - bbox[1]
image = scale(image, factor=min(bbox_w / image.width, bbox_h / image.height))
else:
template, bbox = imutils.scale_template_image(
image.size[0],
image.size[1],
template,
bbox,
max_image_size_pixels,
crop_src_to_fit,
)
bbox_w, bbox_h = bbox[2] - bbox[0], bbox[3] - bbox[1]
cropped_src = imutils.resize_and_pad_to_given_size(
image, bbox_w, bbox_h, crop=crop_src_to_fit
)
template.paste(cropped_src, box=bbox)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_onto_screenshot",
aug_image=template,
**func_kwargs,
)
return imutils.ret_and_save_image(template, output_path, src_mode) | Overlay the image onto a screenshot template so it looks like it was screenshotted on Instagram @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param template_filepath: iopath uri to the screenshot template @param template_bboxes_filepath: iopath uri to the file containing the bounding box for each template @param max_image_size_pixels: if provided, the template image and/or src image will be scaled down to avoid an output image with an area greater than this size (in pixels) @param crop_src_to_fit: if True, the src image will be cropped if necessary to fit into the template image if the aspect ratios are different. If False, the src image will instead be resized if needed @param resize_src_to_match_template: if True, the src image will be resized if it is too big or small in both dimensions to better match the template image. If False, the template image will be resized to match the src image instead. It can be useful to set this to True if the src image is very large so that the augmented image isn't huge, but instead is the same size as the template image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,665 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `overlay_stripes` function. Write a Python function `def overlay_stripes( image: Union[str, Image.Image], output_path: Optional[str] = None, line_width: float = 0.5, line_color: Tuple[int, int, int] = utils.WHITE_RGB_COLOR, line_angle: float = 0, line_density: float = 0.5, line_type: Optional[str] = "solid", line_opacity: float = 1.0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Overlay stripe pattern onto the image (by default, white horizontal stripes are overlaid) @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param line_width: the width of individual stripes as a float value ranging from 0 to 1. Defaults to 0.5 @param line_color: color of the overlaid stripes in RGB values @param line_angle: the angle of the stripes in degrees, ranging from -360° to 360°. Defaults to 0° or horizontal stripes @param line_density: controls the distance between stripes represented as a float value ranging from 0 to 1, with 1 indicating more densely spaced stripes. Defaults to 0.5 @param line_type: the type of stripes. Current options include: dotted, dashed, and solid. Defaults to solid @param line_opacity: the opacity of the stripes, ranging from 0 to 1 with 1 being opaque. Defaults to 1.0 @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def overlay_stripes(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
line_width: float = 0.5,
line_color: Tuple[int, int, int] = utils.WHITE_RGB_COLOR,
line_angle: float = 0,
line_density: float = 0.5,
line_type: Optional[str] = "solid",
line_opacity: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay stripe pattern onto the image (by default, white horizontal
stripes are overlaid)
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param line_width: the width of individual stripes as a float value ranging
from 0 to 1. Defaults to 0.5
@param line_color: color of the overlaid stripes in RGB values
@param line_angle: the angle of the stripes in degrees, ranging from
-360° to 360°. Defaults to 0° or horizontal stripes
@param line_density: controls the distance between stripes represented as a
float value ranging from 0 to 1, with 1 indicating more densely spaced
stripes. Defaults to 0.5
@param line_type: the type of stripes. Current options include: dotted,
dashed, and solid. Defaults to solid
@param line_opacity: the opacity of the stripes, ranging from 0 to 1 with
1 being opaque. Defaults to 1.0
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert (
0.0 <= line_width <= 1.0
), "Line width must be a value in the range [0.0, 1.0]"
assert (
-360.0 <= line_angle <= 360.0
), "Line angle must be a degree in the range [360.0, 360.0]"
assert (
0.0 <= line_density <= 1.0
), "Line density must be a value in the range [0.0, 1.0]"
assert (
0.0 <= line_opacity <= 1.0
), "Line opacity must be a value in the range [0.0, 1.0]"
assert line_type in utils.SUPPORTED_LINE_TYPES, "Stripe type not supported"
utils.validate_rgb_color(line_color)
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
binary_mask = imutils.compute_stripe_mask(
src_w=width,
src_h=height,
line_width=line_width,
line_angle=line_angle,
line_density=line_density,
)
if line_type == "dotted":
# To create dotted effect, multiply mask by stripes in perpendicular direction
perpendicular_mask = imutils.compute_stripe_mask(
src_w=width,
src_h=height,
line_width=line_width,
line_angle=line_angle + 90,
line_density=line_density,
)
binary_mask *= perpendicular_mask
elif line_type == "dashed":
# To create dashed effect, multiply mask by stripes with a larger line
# width in perpendicular direction
perpendicular_mask = imutils.compute_stripe_mask(
src_w=width,
src_h=height,
line_width=0.7,
line_angle=line_angle + 90,
line_density=line_density,
)
binary_mask *= perpendicular_mask
mask = Image.fromarray(np.uint8(binary_mask * line_opacity * 255))
foreground = Image.new("RGB", image.size, line_color)
aug_image = image.copy() # to avoid modifying the input image
aug_image.paste(foreground, (0, 0), mask=mask)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_stripes",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Overlay stripe pattern onto the image (by default, white horizontal stripes are overlaid) @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param line_width: the width of individual stripes as a float value ranging from 0 to 1. Defaults to 0.5 @param line_color: color of the overlaid stripes in RGB values @param line_angle: the angle of the stripes in degrees, ranging from -360° to 360°. Defaults to 0° or horizontal stripes @param line_density: controls the distance between stripes represented as a float value ranging from 0 to 1, with 1 indicating more densely spaced stripes. Defaults to 0.5 @param line_type: the type of stripes. Current options include: dotted, dashed, and solid. Defaults to solid @param line_opacity: the opacity of the stripes, ranging from 0 to 1 with 1 being opaque. Defaults to 1.0 @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,666 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `overlay_text` function. Write a Python function `def overlay_text( image: Union[str, Image.Image], output_path: Optional[str] = None, text: List[Union[int, List[int]]] = utils.DEFAULT_TEXT_INDICES, font_file: str = utils.FONT_PATH, font_size: float = 0.15, opacity: float = 1.0, color: Tuple[int, int, int] = utils.RED_RGB_COLOR, x_pos: float = 0.0, y_pos: float = 0.5, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Overlay text onto the image (by default, text is randomly overlaid) @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param text: indices (into the file) of the characters to be overlaid. Each line of text is represented as a list of int indices; if a list of lists is supplied, multiple lines of text will be overlaid @param font_file: iopath uri to the .ttf font file @param font_size: size of the overlaid characters, calculated as font_size * min(height, width) of the original image @param opacity: the lower the opacity, the more transparent the overlaid text @param color: color of the overlaid text in RGB values @param x_pos: position of the overlaid text relative to the image width @param y_pos: position of the overlaid text relative to the image height @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def overlay_text(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
text: List[Union[int, List[int]]] = utils.DEFAULT_TEXT_INDICES,
font_file: str = utils.FONT_PATH,
font_size: float = 0.15,
opacity: float = 1.0,
color: Tuple[int, int, int] = utils.RED_RGB_COLOR,
x_pos: float = 0.0,
y_pos: float = 0.5,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay text onto the image (by default, text is randomly overlaid)
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param text: indices (into the file) of the characters to be overlaid. Each line of
text is represented as a list of int indices; if a list of lists is supplied,
multiple lines of text will be overlaid
@param font_file: iopath uri to the .ttf font file
@param font_size: size of the overlaid characters, calculated as
font_size * min(height, width) of the original image
@param opacity: the lower the opacity, the more transparent the overlaid text
@param color: color of the overlaid text in RGB values
@param x_pos: position of the overlaid text relative to the image width
@param y_pos: position of the overlaid text relative to the image height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert 0.0 <= opacity <= 1.0, "Opacity must be a value in the range [0.0, 1.0]"
assert 0.0 <= font_size <= 1.0, "Font size must be a value in the range [0.0, 1.0]"
assert 0.0 <= x_pos <= 1.0, "x_pos must be a value in the range [0.0, 1.0]"
assert 0.0 <= y_pos <= 1.0, "y_pos must be a value in the range [0.0, 1.0]"
utils.validate_rgb_color(color)
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
text_lists = text if all(isinstance(t, list) for t in text) else [text]
assert all(isinstance(t, list) for t in text_lists) and all(
all(isinstance(t, int) for t in text_l) # pyre-ignore text_l is a List[int]
for text_l in text_lists
), "Text must be a list of ints or a list of list of ints for multiple lines"
image = image.convert("RGBA")
width, height = image.size
local_font_path = utils.pathmgr.get_local_path(font_file)
font_size = int(min(width, height) * font_size)
font = ImageFont.truetype(local_font_path, font_size)
pkl_file = os.path.splitext(font_file)[0] + ".pkl"
local_pkl_path = utils.pathmgr.get_local_path(pkl_file)
with open(local_pkl_path, "rb") as f:
chars = pickle.load(f)
try:
text_strs = [
# pyre-fixme[16]: Item `int` of `Union[List[int], List[Union[List[int],
# int]], int]` has no attribute `__iter__`.
"".join([chr(chars[c % len(chars)]) for c in t])
for t in text_lists
]
except Exception:
raise IndexError("Invalid text indices specified")
draw = ImageDraw.Draw(image)
for i, text_str in enumerate(text_strs):
draw.text(
xy=(x_pos * width, y_pos * height + i * (font_size + 5)),
text=text_str,
fill=(color[0], color[1], color[2], round(opacity * 255)),
# pyre-fixme[6]: Expected `Optional[ImageFont._Font]` for 4th param but got
# `FreeTypeFont`.
font=font,
)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_text",
aug_image=image,
**func_kwargs,
)
return imutils.ret_and_save_image(image, output_path, src_mode) | Overlay text onto the image (by default, text is randomly overlaid) @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param text: indices (into the file) of the characters to be overlaid. Each line of text is represented as a list of int indices; if a list of lists is supplied, multiple lines of text will be overlaid @param font_file: iopath uri to the .ttf font file @param font_size: size of the overlaid characters, calculated as font_size * min(height, width) of the original image @param opacity: the lower the opacity, the more transparent the overlaid text @param color: color of the overlaid text in RGB values @param x_pos: position of the overlaid text relative to the image width @param y_pos: position of the overlaid text relative to the image height @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,667 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
def pad(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
w_factor: float = 0.25,
h_factor: float = 0.25,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Pads the image
to be augmented
If None, the resulting PIL Image will still be returned
of the image
bottom of the image
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
provided, this list will be modified in place such that each bounding box is
transformed according to this function
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
"""
assert w_factor >= 0, "w_factor cannot be a negative number"
assert h_factor >= 0, "h_factor cannot be a negative number"
utils.validate_rgb_color(color)
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
left = right = int(w_factor * width)
top = bottom = int(h_factor * height)
aug_image = Image.new(
mode="RGB",
size=(width + left + right, height + top + bottom),
color=color,
)
aug_image.paste(image, box=(left, top))
imutils.get_metadata(
metadata=metadata,
function_name="pad",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
The provided code snippet includes necessary dependencies for implementing the `pad_square` function. Write a Python function `def pad_square( image: Union[str, Image.Image], output_path: Optional[str] = None, color: Tuple[int, int, int] = utils.DEFAULT_COLOR, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Pads the shorter edge of the image such that it is now square-shaped @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param color: color of the padded border in RGB values @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def pad_square(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Pads the shorter edge of the image such that it is now square-shaped
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param color: color of the padded border in RGB values
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
utils.validate_rgb_color(color)
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
width, height = image.size
if width < height:
h_factor = 0
dw = height - width
w_factor = dw / (2 * width)
else:
w_factor = 0
dh = width - height
h_factor = dh / (2 * height)
aug_image = pad(image, output_path, w_factor, h_factor, color)
imutils.get_metadata(
metadata=metadata,
function_name="pad_square",
aug_image=aug_image,
**func_kwargs,
)
return aug_image | Pads the shorter edge of the image such that it is now square-shaped @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param color: color of the padded border in RGB values @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,668 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
def crop(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
x1: float = 0.25,
y1: float = 0.25,
x2: float = 0.75,
y2: float = 0.75,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Crops the image
to be augmented
If None, the resulting PIL Image will still be returned
the original image; must be a float value between 0 and 1
the original image; must be a float value between 0 and 1
the original image; must be a float value between 0 and 1
the original image; must be a float value between 0 and 1
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
provided, this list will be modified in place such that each bounding box is
transformed according to this function
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
"""
assert 0 <= x1 <= 1.0, "x1 must be a value in the range [0, 1]"
assert 0 <= y1 <= 1.0, "y1 must be a value in the range [0, 1]"
assert x1 < x2 <= 1.0, "x2 must be a value in the range [x1, 1]"
assert y1 < y2 <= 1.0, "y2 must be a value in the range [y1, 1]"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
left, right = int(width * x1), int(width * x2)
top, bottom = int(height * y1), int(height * y2)
aug_image = image.crop((left, top, right, bottom))
imutils.get_metadata(
metadata=metadata,
function_name="crop",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
The provided code snippet includes necessary dependencies for implementing the `perspective_transform` function. Write a Python function `def perspective_transform( image: Union[str, Image.Image], output_path: Optional[str] = None, sigma: float = 50.0, dx: float = 0.0, dy: float = 0.0, seed: Optional[int] = 42, crop_out_black_border: bool = False, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Apply a perspective transform to the image so it looks like it was taken as a photo from another device (e.g. taking a picture from your phone of a picture on a computer). @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param sigma: the standard deviation of the distribution of destination coordinates. the larger the sigma value, the more intense the transform @param dx: change in x for the perspective transform; instead of providing `sigma` you can provide a scalar value to be precise @param dy: change in y for the perspective transform; instead of providing `sigma` you can provide a scalar value to be precise @param seed: if provided, this will set the random seed to ensure consistency between runs @param crop_out_black_border: if True, will crop out the black border resulting from the perspective transform by cropping to the largest center rectangle with no black @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def perspective_transform(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
sigma: float = 50.0,
dx: float = 0.0,
dy: float = 0.0,
seed: Optional[int] = 42,
crop_out_black_border: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Apply a perspective transform to the image so it looks like it was taken
as a photo from another device (e.g. taking a picture from your phone of a
picture on a computer).
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param sigma: the standard deviation of the distribution of destination
coordinates. the larger the sigma value, the more intense the transform
@param dx: change in x for the perspective transform; instead of providing
`sigma` you can provide a scalar value to be precise
@param dy: change in y for the perspective transform; instead of providing
`sigma` you can provide a scalar value to be precise
@param seed: if provided, this will set the random seed to ensure consistency
between runs
@param crop_out_black_border: if True, will crop out the black border resulting
from the perspective transform by cropping to the largest center rectangle
with no black
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert sigma >= 0, "Expected 'sigma' to be nonnegative"
assert isinstance(dx, (int, float)), "Expected 'dx' to be a number"
assert isinstance(dy, (int, float)), "Expected 'dy' to be a number"
assert seed is None or isinstance(
seed, int
), "Expected 'seed' to be an integer or set to None"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
rng = np.random.RandomState(seed) if seed is not None else np.random
width, height = image.size
src_coords = [(0, 0), (width, 0), (width, height), (0, height)]
dst_coords = [
(rng.normal(point[0], sigma) + dx, rng.normal(point[1], sigma) + dy)
for point in src_coords
]
perspective_transform_coeffs = imutils.compute_transform_coeffs(
src_coords, dst_coords
)
aug_image = image.transform(
(width, height), Image.PERSPECTIVE, perspective_transform_coeffs, Image.BICUBIC
)
if crop_out_black_border:
top_left, top_right, bottom_right, bottom_left = dst_coords
new_left = max(0, top_left[0], bottom_left[0])
new_right = min(width, top_right[0], bottom_right[0])
new_top = max(0, top_left[1], top_right[1])
new_bottom = min(height, bottom_left[1], bottom_right[1])
if new_left >= new_right or new_top >= new_bottom:
raise Exception(
"Cannot crop out black border of a perspective transform this intense"
)
aug_image = aug_image.crop((new_left, new_top, new_right, new_bottom))
imutils.get_metadata(
metadata=metadata,
function_name="perspective_transform",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Apply a perspective transform to the image so it looks like it was taken as a photo from another device (e.g. taking a picture from your phone of a picture on a computer). @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param sigma: the standard deviation of the distribution of destination coordinates. the larger the sigma value, the more intense the transform @param dx: change in x for the perspective transform; instead of providing `sigma` you can provide a scalar value to be precise @param dy: change in y for the perspective transform; instead of providing `sigma` you can provide a scalar value to be precise @param seed: if provided, this will set the random seed to ensure consistency between runs @param crop_out_black_border: if True, will crop out the black border resulting from the perspective transform by cropping to the largest center rectangle with no black @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,669 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
def resize(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
width: Optional[int] = None,
height: Optional[int] = None,
resample: Any = Image.BILINEAR,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Resizes an image
to be augmented
If None, the resulting PIL Image will still be returned
None, the original image width will be used
None, the original image height will be used
PIL.Image.BOX, PIL.Image.BILINEAR, PIL.Image.HAMMING, PIL.Image.BICUBIC, or
PIL.Image.LANCZOS
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
provided, this list will be modified in place such that each bounding box is
transformed according to this function
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
"""
assert width is None or type(width) == int, "Width must be an integer"
assert height is None or type(height) == int, "Height must be an integer"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
im_w, im_h = image.size
aug_image = image.resize((width or im_w, height or im_h), resample)
imutils.get_metadata(
metadata=metadata,
function_name="resize",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
The provided code snippet includes necessary dependencies for implementing the `pixelization` function. Write a Python function `def pixelization( image: Union[str, Image.Image], output_path: Optional[str] = None, ratio: float = 1.0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Pixelizes an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param ratio: smaller values result in a more pixelated image, 1.0 indicates no change, and any value above one doesn't have a noticeable effect @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def pixelization(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
ratio: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Pixelizes an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param ratio: smaller values result in a more pixelated image, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert ratio > 0, "Expected 'ratio' to be a positive number"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
aug_image = image.resize((int(width * ratio), int(height * ratio)))
aug_image = aug_image.resize((width, height))
imutils.get_metadata(
metadata=metadata,
function_name="pixelization",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Pixelizes an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param ratio: smaller values result in a more pixelated image, 1.0 indicates no change, and any value above one doesn't have a noticeable effect @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,670 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `random_noise` function. Write a Python function `def random_noise( image: Union[str, Image.Image], output_path: Optional[str] = None, mean: float = 0.0, var: float = 0.01, seed: int = 42, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Adds random noise to the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param mean: mean of the gaussian noise added @param var: variance of the gaussian noise added @param seed: if provided, this will set the random seed before generating noise @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def random_noise(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
mean: float = 0.0,
var: float = 0.01,
seed: int = 42,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Adds random noise to the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param mean: mean of the gaussian noise added
@param var: variance of the gaussian noise added
@param seed: if provided, this will set the random seed before generating noise
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert type(mean) in [float, int], "Mean must be an integer or a float"
assert type(var) in [float, int], "Variance must be an integer or a float"
assert type(seed) == int, "Seed must be an integer"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
if seed is not None:
np.random.seed(seed=seed)
np_image = np.asarray(image).astype(np.float32)
np_image = np_image / 255.0
if np_image.min() < 0:
low_clip = -1.0
else:
low_clip = 0.0
sigma = var**0.5
gauss = np.random.normal(mean, sigma, (np_image.shape))
noisy_image = np_image + gauss
noisy_image = np.clip(noisy_image, low_clip, 1.0)
noisy_image *= 255.0
aug_image = Image.fromarray(np.uint8(noisy_image))
imutils.get_metadata(
metadata=metadata,
function_name="random_noise",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Adds random noise to the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param mean: mean of the gaussian noise added @param var: variance of the gaussian noise added @param seed: if provided, this will set the random seed before generating noise @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,671 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
def crop(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
x1: float = 0.25,
y1: float = 0.25,
x2: float = 0.75,
y2: float = 0.75,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Crops the image
to be augmented
If None, the resulting PIL Image will still be returned
the original image; must be a float value between 0 and 1
the original image; must be a float value between 0 and 1
the original image; must be a float value between 0 and 1
the original image; must be a float value between 0 and 1
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
provided, this list will be modified in place such that each bounding box is
transformed according to this function
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
"""
assert 0 <= x1 <= 1.0, "x1 must be a value in the range [0, 1]"
assert 0 <= y1 <= 1.0, "y1 must be a value in the range [0, 1]"
assert x1 < x2 <= 1.0, "x2 must be a value in the range [x1, 1]"
assert y1 < y2 <= 1.0, "y2 must be a value in the range [y1, 1]"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
left, right = int(width * x1), int(width * x2)
top, bottom = int(height * y1), int(height * y2)
aug_image = image.crop((left, top, right, bottom))
imutils.get_metadata(
metadata=metadata,
function_name="crop",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
The provided code snippet includes necessary dependencies for implementing the `rotate` function. Write a Python function `def rotate( image: Union[str, Image.Image], output_path: Optional[str] = None, degrees: float = 15.0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Rotates the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param degrees: the amount of degrees that the original image will be rotated counter clockwise @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def rotate(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
degrees: float = 15.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Rotates the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param degrees: the amount of degrees that the original image will be rotated
counter clockwise
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert type(degrees) in [float, int], "Degrees must be an integer or a float"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
rotated_image = image.rotate(degrees, expand=True)
center_x, center_y = rotated_image.width / 2, rotated_image.height / 2
wr, hr = imutils.rotated_rect_with_max_area(image.width, image.height, degrees)
aug_image = rotated_image.crop(
(
int(center_x - wr / 2),
int(center_y - hr / 2),
int(center_x + wr / 2),
int(center_y + hr / 2),
)
)
imutils.get_metadata(
metadata=metadata,
function_name="rotate",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Rotates the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param degrees: the amount of degrees that the original image will be rotated counter clockwise @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,672 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `saturation` function. Write a Python function `def saturation( image: Union[str, Image.Image], output_path: Optional[str] = None, factor: float = 1.0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Alters the saturation of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param factor: a saturation factor of below 1.0 lowers the saturation, a factor of 1.0 gives the original image, and a factor greater than 1.0 adds saturation @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def saturation(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the saturation of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param factor: a saturation factor of below 1.0 lowers the saturation, a
factor of 1.0 gives the original image, and a factor greater than 1.0
adds saturation
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
aug_image = ImageEnhance.Color(image).enhance(factor)
imutils.get_metadata(
metadata=metadata,
function_name="saturation",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Alters the saturation of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param factor: a saturation factor of below 1.0 lowers the saturation, a factor of 1.0 gives the original image, and a factor greater than 1.0 adds saturation @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,673 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `sharpen` function. Write a Python function `def sharpen( image: Union[str, Image.Image], output_path: Optional[str] = None, factor: float = 1.0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Changes the sharpness of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param factor: a factor of below 1.0 blurs the image, a factor of 1.0 gives the original image, and a factor greater than 1.0 sharpens the image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def sharpen(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Changes the sharpness of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param factor: a factor of below 1.0 blurs the image, a factor of 1.0 gives
the original image, and a factor greater than 1.0 sharpens the image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
aug_image = ImageEnhance.Sharpness(image).enhance(factor)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
imutils.get_metadata(metadata=metadata, function_name="sharpen", **func_kwargs)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Changes the sharpness of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param factor: a factor of below 1.0 blurs the image, a factor of 1.0 gives the original image, and a factor greater than 1.0 sharpens the image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,674 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `shuffle_pixels` function. Write a Python function `def shuffle_pixels( image: Union[str, Image.Image], output_path: Optional[str] = None, factor: float = 1.0, seed: int = 10, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Shuffles the pixels of an image with respect to the shuffling factor. The factor denotes percentage of pixels to be shuffled and randomly selected Note: The actual number of pixels will be less than the percentage given due to the probability of pixels staying in place in the course of shuffling @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param factor: a control parameter between 0.0 and 1.0. While a factor of 0.0 returns the original image, a factor of 1.0 performs full shuffling @param seed: seed for numpy random generator to select random pixels for shuffling @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def shuffle_pixels(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 1.0,
seed: int = 10,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Shuffles the pixels of an image with respect to the shuffling factor. The
factor denotes percentage of pixels to be shuffled and randomly selected
Note: The actual number of pixels will be less than the percentage given
due to the probability of pixels staying in place in the course of shuffling
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param factor: a control parameter between 0.0 and 1.0. While a factor of
0.0 returns the original image, a factor of 1.0 performs full shuffling
@param seed: seed for numpy random generator to select random pixels for shuffling
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
np.random.seed(seed)
image = imutils.validate_and_load_image(image)
assert 0.0 <= factor <= 1.0, "'factor' must be a value in range [0, 1]"
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
if factor == 0.0:
aug_image = image
else:
aug_image = np.asarray(image, dtype=int)
height, width = aug_image.shape[:2]
number_of_channels = aug_image.size // (height * width)
number_of_pixels = height * width
aug_image = np.reshape(aug_image, (number_of_pixels, number_of_channels))
mask = np.random.choice(
number_of_pixels, size=int(factor * number_of_pixels), replace=False
)
pixels_to_be_shuffled = aug_image[mask]
np.random.shuffle(pixels_to_be_shuffled)
aug_image[mask] = pixels_to_be_shuffled
aug_image = np.reshape(aug_image, (height, width, number_of_channels))
aug_image = np.squeeze(aug_image)
aug_image = Image.fromarray(aug_image.astype("uint8"))
imutils.get_metadata(
metadata=metadata,
function_name="shuffle_pixels",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Shuffles the pixels of an image with respect to the shuffling factor. The factor denotes percentage of pixels to be shuffled and randomly selected Note: The actual number of pixels will be less than the percentage given due to the probability of pixels staying in place in the course of shuffling @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param factor: a control parameter between 0.0 and 1.0. While a factor of 0.0 returns the original image, a factor of 1.0 performs full shuffling @param seed: seed for numpy random generator to select random pixels for shuffling @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,675 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
def spatial_bbox_helper(
bbox: Tuple[float, float, float, float],
src_w: int,
src_h: int,
aug_function: Callable,
**kwargs,
) -> Tuple:
"""
Computes the bbox that encloses the transformed bbox in the image transformed by
`aug_function`. This helper can be used to compute the transformed bbox for any
augmentation which doesn't affect the color of the source image (e.g. any spatial
augmentation).
"""
dummy_image = Image.new("RGB", (src_w, src_h))
draw = ImageDraw.Draw(dummy_image)
draw.rectangle(
(bbox[0] * src_w, bbox[1] * src_h, bbox[2] * src_w, bbox[3] * src_h),
fill="white",
)
aug_image = aug_function(dummy_image, **kwargs)
aug_w, aug_h = aug_image.size
array_image = np.array(aug_image)
white_y, white_x, _ = np.where(array_image > 0)
min_x, max_x = np.min(white_x), np.max(white_x)
min_y, max_y = np.min(white_y), np.max(white_y)
return (min_x / aug_w, min_y / aug_h, max_x / aug_w, max_y / aug_h)
The provided code snippet includes necessary dependencies for implementing the `skew` function. Write a Python function `def skew( image: Union[str, Image.Image], output_path: Optional[str] = None, skew_factor: float = 0.5, axis: int = 0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Skews an image with respect to its x or y-axis @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param skew_factor: the level of skew to apply to the image; a larger absolute value will result in a more intense skew. Recommended range is between [-2, 2] @param axis: the axis along which the image will be skewed; can be set to 0 (x-axis) or 1 (y-axis) @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def skew(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
skew_factor: float = 0.5,
axis: int = 0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Skews an image with respect to its x or y-axis
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param skew_factor: the level of skew to apply to the image; a larger absolute value will
result in a more intense skew. Recommended range is between [-2, 2]
@param axis: the axis along which the image will be skewed; can be set to 0 (x-axis)
or 1 (y-axis)
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
w, h = image.size
if axis == 0:
data = (1, skew_factor, -skew_factor * h / 2, 0, 1, 0)
elif axis == 1:
data = (1, 0, 0, skew_factor, 1, -skew_factor * w / 2)
else:
raise AssertionError(
f"Invalid 'axis' value: Got '{axis}', expected 0 for 'x-axis' or 1 for 'y-axis'"
)
aug_image = image.transform((w, h), Image.AFFINE, data, resample=Image.BILINEAR)
imutils.get_metadata(
metadata=metadata,
function_name="skew",
aug_image=aug_image,
bboxes_helper_func=spatial_bbox_helper,
aug_function=skew,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Skews an image with respect to its x or y-axis @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param skew_factor: the level of skew to apply to the image; a larger absolute value will result in a more intense skew. Recommended range is between [-2, 2] @param axis: the axis along which the image will be skewed; can be set to 0 (x-axis) or 1 (y-axis) @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,676 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `vflip` function. Write a Python function `def vflip( image: Union[str, Image.Image], output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Vertically flips an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def vflip(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Vertically flips an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
aug_image = image.transpose(Image.FLIP_TOP_BOTTOM)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
imutils.get_metadata(metadata=metadata, function_name="vflip", **func_kwargs)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Vertically flips an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,677 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def apply_pil_filter_intensity(**kwargs) -> float:
return 100.0 | null |
160,678 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def apply_lambda_intensity(aug_function: str, **kwargs) -> float:
intensity_func = globals().get(f"{aug_function}_intensity")
return intensity_func(**kwargs) if intensity_func else 100.0 | null |
160,679 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def blur_intensity(radius: int, **kwargs) -> float:
assert (
isinstance(radius, (float, int)) and radius >= 0
), "radius must be a non-negative number"
max_radius = 100
return min((radius / max_radius) * 100.0, 100.0) | null |
160,680 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def change_aspect_ratio_intensity(
ratio: float, metadata: Dict[str, Any], **kwargs
) -> float:
assert (
isinstance(ratio, (float, int)) and ratio > 0
), "ratio must be a positive number"
if ratio == metadata["src_width"] / metadata["src_height"]:
return 0.0
max_ratio = 10.0
ratio = ratio if ratio >= 1 else 1 / ratio
return min((ratio / max_ratio) * 100.0, 100.0) | null |
160,681 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def resize_intensity_helper(metadata: Dict[str, Any]) -> float:
"""
Computes intensity of any transform that resizes the src image. For these
types of transforms the intensity is defined as the percentage of image
area that has been cut out (if cropped/resized to smaller) or added (if
padding/resized to bigger). When computing the percentage, the denominator
should be the larger of the src & dst areas so the resulting percentage
isn't greater than 100.
"""
src_area = metadata["src_width"] * metadata["src_height"]
dst_area = metadata["dst_width"] * metadata["dst_height"]
larger_area = max(src_area, dst_area)
return (abs(dst_area - src_area) / larger_area) * 100.0
def clip_image_size_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata) | null |
160,682 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def normalize_mult_factor(factor: float) -> float:
def color_jitter_intensity(
brightness_factor: float, contrast_factor: float, saturation_factor: float, **kwargs
) -> float:
assert (
isinstance(brightness_factor, (float, int)) and brightness_factor >= 0
), "brightness_factor must be a nonnegative number"
assert (
isinstance(contrast_factor, (float, int)) and contrast_factor >= 0
), "contrast_factor must be a nonnegative number"
assert (
isinstance(saturation_factor, (float, int)) and saturation_factor >= 0
), "saturation_factor must be a nonnegative number"
max_total_factor = 30
brightness_factor = normalize_mult_factor(brightness_factor)
contrast_factor = normalize_mult_factor(contrast_factor)
saturation_factor = normalize_mult_factor(saturation_factor)
total_factor = brightness_factor + contrast_factor + saturation_factor
return min((total_factor / max_total_factor) * 100.0, 100.0) | null |
160,683 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def convert_color_intensity(**kwargs) -> float:
return 100.0 | null |
160,684 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def resize_intensity_helper(metadata: Dict[str, Any]) -> float:
"""
Computes intensity of any transform that resizes the src image. For these
types of transforms the intensity is defined as the percentage of image
area that has been cut out (if cropped/resized to smaller) or added (if
padding/resized to bigger). When computing the percentage, the denominator
should be the larger of the src & dst areas so the resulting percentage
isn't greater than 100.
"""
src_area = metadata["src_width"] * metadata["src_height"]
dst_area = metadata["dst_width"] * metadata["dst_height"]
larger_area = max(src_area, dst_area)
return (abs(dst_area - src_area) / larger_area) * 100.0
def crop_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata) | null |
160,685 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def encoding_quality_intensity(quality: int, **kwargs) -> float:
assert (
isinstance(quality, int) and 0 <= quality <= 100
), "quality must be a number in [0, 100]"
return ((100 - quality) / 100) * 100.0 | null |
160,686 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def grayscale_intensity(**kwargs) -> float:
return 100.0 | null |
160,687 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def hflip_intensity(**kwargs) -> float:
return 100.0 | null |
160,688 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def masked_composite_intensity(
mask: Optional[Union[str, Image.Image]], metadata: Dict[str, Any], **kwargs
) -> float:
if mask is None:
mask_intensity = 1.0
else:
mask = imutils.validate_and_load_image(mask)
mask_arr = np.array(mask)
# There can be 3 dimensions if the mask is RGBA format, in which case
# we only care about the last channel (alpha) to determine the mask
mask_values = mask_arr[:, :, -1] if mask_arr.ndim == 3 else mask_arr
mask_intensity = np.sum(mask_values > 0) / (
mask_values.shape[0] * mask_values.shape[1]
)
if metadata["transform_function"] is None:
aug_intensity = 0.0
else:
aug_intensity_func = globals().get(
f"{metadata['transform_function']}_intensity"
)
aug_intensity = (
aug_intensity_func(**kwargs) / 100.0
if aug_intensity_func is not None
else 1.0
)
return (aug_intensity * mask_intensity) * 100.0 | null |
160,689 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def resize_intensity_helper(metadata: Dict[str, Any]) -> float:
"""
Computes intensity of any transform that resizes the src image. For these
types of transforms the intensity is defined as the percentage of image
area that has been cut out (if cropped/resized to smaller) or added (if
padding/resized to bigger). When computing the percentage, the denominator
should be the larger of the src & dst areas so the resulting percentage
isn't greater than 100.
"""
src_area = metadata["src_width"] * metadata["src_height"]
dst_area = metadata["dst_width"] * metadata["dst_height"]
larger_area = max(src_area, dst_area)
return (abs(dst_area - src_area) / larger_area) * 100.0
def meme_format_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata) | null |
160,690 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def opacity_intensity(level: float, **kwargs) -> float:
assert (
isinstance(level, (float, int)) and 0 <= level <= 1
), "level must be a number in [0, 1]"
return (1 - level) * 100.0 | null |
160,691 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def overlay_media_intensity_helper(
opacity: float, overlay_content_size: float
) -> float:
assert (
isinstance(opacity, (float, int)) and 0 <= opacity <= 1
), "opacity must be a number in [0, 1]"
assert (
isinstance(overlay_content_size, (float, int))
and 0 <= overlay_content_size <= 1
), "content size factor must be a number in [0, 1]"
return (opacity * (overlay_content_size**2)) * 100.0
def overlay_emoji_intensity(emoji_size: float, opacity: float, **kwargs) -> float:
return overlay_media_intensity_helper(opacity, emoji_size) | null |
160,692 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def overlay_media_intensity_helper(
opacity: float, overlay_content_size: float
) -> float:
def overlay_image_intensity(opacity: float, overlay_size: float, **kwargs) -> float:
return overlay_media_intensity_helper(opacity, overlay_size) | null |
160,693 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def overlay_media_intensity_helper(
opacity: float, overlay_content_size: float
) -> float:
assert (
isinstance(opacity, (float, int)) and 0 <= opacity <= 1
), "opacity must be a number in [0, 1]"
assert (
isinstance(overlay_content_size, (float, int))
and 0 <= overlay_content_size <= 1
), "content size factor must be a number in [0, 1]"
return (opacity * (overlay_content_size**2)) * 100.0
def overlay_onto_background_image_intensity(
opacity: float, overlay_size: float, **kwargs
) -> float:
return 100.0 - overlay_media_intensity_helper(opacity, overlay_size) | null |
160,694 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def overlay_onto_screenshot_intensity(
template_filepath: str,
template_bboxes_filepath: str,
metadata: Dict[str, Any],
**kwargs,
) -> float:
_, bbox = imutils.get_template_and_bbox(template_filepath, template_bboxes_filepath)
bbox_area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
dst_area = metadata["dst_width"] * metadata["dst_height"]
return min(((dst_area - bbox_area) / dst_area) * 100.0, 100.0) | null |
160,695 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def overlay_media_intensity_helper(
opacity: float, overlay_content_size: float
) -> float:
assert (
isinstance(opacity, (float, int)) and 0 <= opacity <= 1
), "opacity must be a number in [0, 1]"
assert (
isinstance(overlay_content_size, (float, int))
and 0 <= overlay_content_size <= 1
), "content size factor must be a number in [0, 1]"
return (opacity * (overlay_content_size**2)) * 100.0
def overlay_stripes_intensity(
line_width: float,
line_angle: float,
line_density: float,
line_type: str,
line_opacity: float,
metadata: Dict[str, Any],
**kwargs,
) -> float:
binary_mask = imutils.compute_stripe_mask(
src_w=metadata["src_width"],
src_h=metadata["src_height"],
line_width=line_width,
line_angle=line_angle,
line_density=line_density,
)
if line_type == "dotted":
# To create dotted effect, multiply mask by stripes in perpendicular direction
perpendicular_mask = imutils.compute_stripe_mask(
src_w=metadata["src_width"],
src_h=metadata["src_height"],
line_width=line_width,
line_angle=line_angle + 90,
line_density=line_density,
)
binary_mask *= perpendicular_mask
elif line_type == "dashed":
# To create dashed effect, multiply mask by stripes with a larger line
# width in perpendicular direction
perpendicular_mask = imutils.compute_stripe_mask(
src_w=metadata["src_width"],
src_h=metadata["src_height"],
line_width=0.7,
line_angle=line_angle + 90,
line_density=line_density,
)
binary_mask *= perpendicular_mask
perc_stripes = np.mean(binary_mask)
return overlay_media_intensity_helper(line_opacity, perc_stripes) | null |
160,696 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def overlay_media_intensity_helper(
opacity: float, overlay_content_size: float
) -> float:
def overlay_text_intensity(opacity: float, font_size: float, **kwargs) -> float:
return overlay_media_intensity_helper(opacity, font_size) | null |
160,697 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def resize_intensity_helper(metadata: Dict[str, Any]) -> float:
"""
Computes intensity of any transform that resizes the src image. For these
types of transforms the intensity is defined as the percentage of image
area that has been cut out (if cropped/resized to smaller) or added (if
padding/resized to bigger). When computing the percentage, the denominator
should be the larger of the src & dst areas so the resulting percentage
isn't greater than 100.
"""
src_area = metadata["src_width"] * metadata["src_height"]
dst_area = metadata["dst_width"] * metadata["dst_height"]
larger_area = max(src_area, dst_area)
return (abs(dst_area - src_area) / larger_area) * 100.0
def pad_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata) | null |
160,698 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def resize_intensity_helper(metadata: Dict[str, Any]) -> float:
def pad_square_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata) | null |
160,699 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def perspective_transform_intensity(sigma: float, **kwargs) -> float:
assert (
isinstance(sigma, (float, int)) and sigma >= 0
), "sigma must be a non-negative number"
max_sigma_val = 100
sigma_intensity = sigma / max_sigma_val
return min(sigma_intensity * 100.0, 100.0) | null |
160,700 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def pixelization_intensity(ratio: float, **kwargs) -> float:
assert (
isinstance(ratio, (float, int)) and ratio > 0
), "ratio must be a positive number"
return min((1 - ratio) * 100.0, 100.0) | null |
160,701 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def random_noise_intensity(mean: float, var: float, **kwargs) -> float:
assert isinstance(mean, (float, int)), "mean must be a number"
assert (
isinstance(var, (float, int)) and var >= 0
), "var must be a non-negative number"
max_mean_val = 100
max_var_val = 10
# Even if mean or var is 0, we want the intensity to be non-zero if the
# other one is non-zero, so we add a little jitter away from 0
mean_intensity = max(abs(mean / max_mean_val), 0.01)
var_intensity = max(var / max_var_val, 0.01)
return (mean_intensity * var_intensity) * 100.0 | null |
160,702 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def resize_intensity_helper(metadata: Dict[str, Any]) -> float:
def resize_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return resize_intensity_helper(metadata) | null |
160,703 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def rotate_intensity(degrees: float, **kwargs) -> float:
assert isinstance(degrees, (float, int)), "degrees must be a number"
max_degrees_val = 180
degrees = abs(degrees) % 180
return (degrees / max_degrees_val) * 100.0 | null |
160,704 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def scale_intensity(factor: float, **kwargs) -> float:
assert (
isinstance(factor, (float, int)) and factor > 0
), "factor must be a positive number"
if factor == 1.0:
return 0.0
max_factor_val = 10.0
scale_factor = factor if factor > 1 else 1 / factor
return min((scale_factor / max_factor_val) * 100.0, 100.0) | null |
160,705 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def mult_factor_intensity_helper(factor: float) -> float:
factor = normalize_mult_factor(factor)
max_factor = 10
return min((factor / max_factor) * 100.0, 100.0)
def sharpen_intensity(factor: float, **kwargs) -> float:
return mult_factor_intensity_helper(factor) | null |
160,706 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def shuffle_pixels_intensity(factor: float, **kwargs) -> float:
return factor * 100.0 | null |
160,707 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def skew_intensity(skew_factor: float, **kwargs) -> float:
max_skew_factor = 2.0
return min((abs(skew_factor) / max_skew_factor) * 100.0, 100.0) | null |
160,708 | from typing import Any, Dict, Optional, Union
import numpy as np
from augly.image import utils as imutils
from PIL import Image
def vflip_intensity(**kwargs) -> float:
return 100.0 | null |
160,709 | import functools
import json
import math
import os
from typing import List, Optional, Tuple, Union
import numpy as np
from augly import utils
from PIL import Image
The provided code snippet includes necessary dependencies for implementing the `validate_and_load_image` function. Write a Python function `def validate_and_load_image(image: Union[str, Image.Image]) -> Image.Image` to solve the following problem:
If image is a str, loads the image as a PIL Image and returns it. Otherwise, we assert that image is a PIL Image and then return it.
Here is the function:
def validate_and_load_image(image: Union[str, Image.Image]) -> Image.Image:
"""
If image is a str, loads the image as a PIL Image and returns it. Otherwise,
we assert that image is a PIL Image and then return it.
"""
if isinstance(image, str):
local_path = utils.pathmgr.get_local_path(image)
utils.validate_image_path(local_path)
return Image.open(local_path)
assert isinstance(
image, Image.Image
), "Expected type PIL.Image.Image for variable 'image'"
return image | If image is a str, loads the image as a PIL Image and returns it. Otherwise, we assert that image is a PIL Image and then return it. |
160,710 | import functools
import json
import math
import os
from typing import List, Optional, Tuple, Union
import numpy as np
from augly import utils
from PIL import Image
JPEG_EXTENSIONS = [".jpg", ".JPG", ".jpeg", ".JPEG"]
def ret_and_save_image(
image: Image.Image, output_path: Optional[str], src_mode: Optional[str] = None
) -> Image.Image:
if src_mode is not None:
image = image.convert(src_mode)
if output_path is not None:
if (
any(output_path.endswith(extension) for extension in JPEG_EXTENSIONS)
or image.mode == "CMYK"
):
image = image.convert("RGB")
utils.validate_output_path(output_path)
image.save(output_path)
return image | null |
160,711 | import functools
import json
import math
import os
from typing import List, Optional, Tuple, Union
import numpy as np
from augly import utils
from PIL import Image
def get_bboxes(template_bboxes_filepath: Optional[str] = None):
def get_template_and_bbox(
template_filepath: str, template_bboxes_filepath: Optional[str]
) -> Tuple[Image.Image, Tuple[int, int, int, int]]:
local_template_path = utils.pathmgr.get_local_path(template_filepath)
template = Image.open(local_template_path)
bboxes = get_bboxes(template_bboxes_filepath)
template_key = os.path.basename(template_filepath)
bbox = bboxes[template_key]
return template, bbox | null |
160,712 | import functools
import json
import math
import os
from typing import List, Optional, Tuple, Union
import numpy as np
from augly import utils
from PIL import Image
The provided code snippet includes necessary dependencies for implementing the `rotated_rect_with_max_area` function. Write a Python function `def rotated_rect_with_max_area(w: int, h: int, angle: float) -> Tuple[float, float]` to solve the following problem:
Computes the width and height of the largest possible axis-aligned rectangle (maximal area) within the rotated rectangle source: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders # noqa: B950
Here is the function:
def rotated_rect_with_max_area(w: int, h: int, angle: float) -> Tuple[float, float]:
"""
Computes the width and height of the largest possible axis-aligned
rectangle (maximal area) within the rotated rectangle
source:
https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders # noqa: B950
"""
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
sin_a = abs(math.sin(math.radians(angle)))
cos_a = abs(math.cos(math.radians(angle)))
if side_short <= 2.0 * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10:
x = 0.5 * side_short
wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)
else:
cos_2a = cos_a * cos_a - sin_a * sin_a
wr = (w * cos_a - h * sin_a) / cos_2a
hr = (h * cos_a - w * sin_a) / cos_2a
return wr, hr | Computes the width and height of the largest possible axis-aligned rectangle (maximal area) within the rotated rectangle source: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders # noqa: B950 |
160,713 | import functools
import json
import math
import os
from typing import List, Optional, Tuple, Union
import numpy as np
from augly import utils
from PIL import Image
def pad_with_black(src: Image.Image, w: int, h: int) -> Image.Image:
"""
Returns the image src with the x dimension padded to width w if it was
smaller than w (and likewise for the y dimension with height h)
"""
curr_w, curr_h = src.size
dx = max(0, (w - curr_w) // 2)
dy = max(0, (h - curr_h) // 2)
padded = Image.new("RGB", (w, h))
padded.paste(src, (dx, dy, curr_w + dx, curr_h + dy))
return padded
The provided code snippet includes necessary dependencies for implementing the `resize_and_pad_to_given_size` function. Write a Python function `def resize_and_pad_to_given_size( src: Image.Image, w: int, h: int, crop: bool ) -> Image.Image` to solve the following problem:
Returns the image src resized & padded with black if needed for the screenshot transformation (i.e. if the spot for the image in the template is too small or too big for the src image). If crop is True, will crop the src image if necessary to fit into the template image; otherwise, will resize if necessary
Here is the function:
def resize_and_pad_to_given_size(
src: Image.Image, w: int, h: int, crop: bool
) -> Image.Image:
"""
Returns the image src resized & padded with black if needed for the screenshot
transformation (i.e. if the spot for the image in the template is too small or
too big for the src image). If crop is True, will crop the src image if necessary
to fit into the template image; otherwise, will resize if necessary
"""
curr_w, curr_h = src.size
if crop:
dx = (curr_w - w) // 2
dy = (curr_h - h) // 2
src = src.crop((dx, dy, w + dx, h + dy))
curr_w, curr_h = src.size
elif curr_w > w or curr_h > h:
resize_factor = min(w / curr_w, h / curr_h)
new_w = int(curr_w * resize_factor)
new_h = int(curr_h * resize_factor)
src = src.resize((new_w, new_h), resample=Image.BILINEAR)
curr_w, curr_h = src.size
if curr_w < w or curr_h < h:
src = pad_with_black(src, w, h)
return src | Returns the image src resized & padded with black if needed for the screenshot transformation (i.e. if the spot for the image in the template is too small or too big for the src image). If crop is True, will crop the src image if necessary to fit into the template image; otherwise, will resize if necessary |
160,714 | import functools
import json
import math
import os
from typing import List, Optional, Tuple, Union
import numpy as np
from augly import utils
from PIL import Image
The provided code snippet includes necessary dependencies for implementing the `scale_template_image` function. Write a Python function `def scale_template_image( src_w: int, src_h: int, template_image: Image.Image, bbox: Tuple[int, int, int, int], max_image_size_pixels: Optional[int], crop: bool, ) -> Tuple[Image.Image, Tuple[int, int, int, int]]` to solve the following problem:
Return template_image, and bbox resized to fit the src image. Takes in the width & height of the src image plus the bounding box where the src image will be inserted into template_image. If the template bounding box is bigger than src image in both dimensions, template_image is scaled down such that the dimension that was closest to src_image matches, without changing the aspect ratio (and bbox is scaled proportionally). Similarly if src image is bigger than the bbox in both dimensions, template_image and the bbox are scaled up.
Here is the function:
def scale_template_image(
src_w: int,
src_h: int,
template_image: Image.Image,
bbox: Tuple[int, int, int, int],
max_image_size_pixels: Optional[int],
crop: bool,
) -> Tuple[Image.Image, Tuple[int, int, int, int]]:
"""
Return template_image, and bbox resized to fit the src image. Takes in the
width & height of the src image plus the bounding box where the src image
will be inserted into template_image. If the template bounding box is
bigger than src image in both dimensions, template_image is scaled down
such that the dimension that was closest to src_image matches, without
changing the aspect ratio (and bbox is scaled proportionally). Similarly if
src image is bigger than the bbox in both dimensions, template_image and
the bbox are scaled up.
"""
template_w, template_h = template_image.size
left, upper, right, lower = bbox
bbox_w, bbox_h = right - left, lower - upper
# Scale up/down template_image & bbox
if crop:
resize_factor = min(src_w / bbox_w, src_h / bbox_h)
else:
resize_factor = max(src_w / bbox_w, src_h / bbox_h)
# If a max image size is provided & the resized template image would be too large,
# resize the template image to the max image size.
if max_image_size_pixels is not None:
template_size = template_w * template_h
if template_size * resize_factor**2 > max_image_size_pixels:
resize_factor = math.sqrt(max_image_size_pixels / template_size)
template_w = int(template_w * resize_factor)
template_h = int(template_h * resize_factor)
bbox_w, bbox_h = int(bbox_w * resize_factor), int(bbox_h * resize_factor)
left, upper = int(left * resize_factor), int(upper * resize_factor)
right, lower = left + bbox_w, upper + bbox_h
bbox = (left, upper, right, lower)
template_image = template_image.resize(
(template_w, template_h), resample=Image.BILINEAR
)
return template_image, bbox | Return template_image, and bbox resized to fit the src image. Takes in the width & height of the src image plus the bounding box where the src image will be inserted into template_image. If the template bounding box is bigger than src image in both dimensions, template_image is scaled down such that the dimension that was closest to src_image matches, without changing the aspect ratio (and bbox is scaled proportionally). Similarly if src image is bigger than the bbox in both dimensions, template_image and the bbox are scaled up. |
160,715 | import functools
import json
import math
import os
from typing import List, Optional, Tuple, Union
import numpy as np
from augly import utils
from PIL import Image
The provided code snippet includes necessary dependencies for implementing the `square_center_crop` function. Write a Python function `def square_center_crop(src: Image.Image) -> Image.Image` to solve the following problem:
Returns a square crop of the center of the image
Here is the function:
def square_center_crop(src: Image.Image) -> Image.Image:
"""Returns a square crop of the center of the image"""
w, h = src.size
smallest_edge = min(w, h)
dx = (w - smallest_edge) // 2
dy = (h - smallest_edge) // 2
return src.crop((dx, dy, dx + smallest_edge, dy + smallest_edge)) | Returns a square crop of the center of the image |
160,716 | import functools
import json
import math
import os
from typing import List, Optional, Tuple, Union
import numpy as np
from augly import utils
from PIL import Image
The provided code snippet includes necessary dependencies for implementing the `compute_transform_coeffs` function. Write a Python function `def compute_transform_coeffs( src_coords: List[Tuple[int, int]], dst_coords: List[Tuple[float, float]] ) -> np.ndarray` to solve the following problem:
Given the starting & desired corner coordinates, computes the coefficients required by the perspective transform.
Here is the function:
def compute_transform_coeffs(
src_coords: List[Tuple[int, int]], dst_coords: List[Tuple[float, float]]
) -> np.ndarray:
"""
Given the starting & desired corner coordinates, computes the
coefficients required by the perspective transform.
"""
matrix = []
for sc, dc in zip(src_coords, dst_coords):
matrix.append([dc[0], dc[1], 1, 0, 0, 0, -sc[0] * dc[0], -sc[0] * dc[1]])
matrix.append([0, 0, 0, dc[0], dc[1], 1, -sc[1] * dc[0], -sc[1] * dc[1]])
A = np.matrix(matrix, dtype=float)
B = np.array(src_coords).reshape(8)
res = np.dot(np.linalg.inv(A.T * A) * A.T, B)
return np.array(res).reshape(8) | Given the starting & desired corner coordinates, computes the coefficients required by the perspective transform. |
160,717 | import functools
import json
import math
import os
from typing import List, Optional, Tuple, Union
import numpy as np
from augly import utils
from PIL import Image
The provided code snippet includes necessary dependencies for implementing the `compute_stripe_mask` function. Write a Python function `def compute_stripe_mask( src_w: int, src_h: int, line_width: float, line_angle: float, line_density: float ) -> np.ndarray` to solve the following problem:
Given stripe parameters such as stripe width, angle, and density, returns a binary mask of the same size as the source image indicating the location of stripes. This implementation is inspired by https://stackoverflow.com/questions/34043381/how-to-create-diagonal-stripe-patterns-and-checkerboard-patterns
Here is the function:
def compute_stripe_mask(
src_w: int, src_h: int, line_width: float, line_angle: float, line_density: float
) -> np.ndarray:
"""
Given stripe parameters such as stripe width, angle, and density, returns
a binary mask of the same size as the source image indicating the location
of stripes. This implementation is inspired by
https://stackoverflow.com/questions/34043381/how-to-create-diagonal-stripe-patterns-and-checkerboard-patterns
"""
line_angle *= math.pi / 180
line_distance = (1 - line_density) * min(src_w, src_h)
y_period = math.cos(line_angle) / line_distance
x_period = math.sin(line_angle) / line_distance
y_coord_range = np.arange(0, src_h) - src_h / 2
x_coord_range = np.arange(0, src_w) - src_w / 2
x_grid_coords, y_grid_coords = np.meshgrid(x_coord_range, y_coord_range)
if abs(line_angle) == math.pi / 2 or abs(line_angle) == 3 * math.pi / 2:
# Compute mask for vertical stripes
softmax_mask = (np.cos(2 * math.pi * x_period * x_grid_coords) + 1) / 2
elif line_angle == 0 or abs(line_angle) == math.pi:
# Compute mask for horizontal stripes
softmax_mask = (np.cos(2 * math.pi * y_period * y_grid_coords) + 1) / 2
else:
# Compute mask for diagonal stripes
softmax_mask = (
np.cos(2 * math.pi * (x_period * x_grid_coords + y_period * y_grid_coords))
+ 1
) / 2
binary_mask = softmax_mask > (math.cos(math.pi * line_width) + 1) / 2
return binary_mask | Given stripe parameters such as stripe width, angle, and density, returns a binary mask of the same size as the source image indicating the location of stripes. This implementation is inspired by https://stackoverflow.com/questions/34043381/how-to-create-diagonal-stripe-patterns-and-checkerboard-patterns |
160,718 | import math
from typing import Callable, List, Optional, Tuple
import numpy as np
from augly.image import utils as imutils
from PIL import Image, ImageDraw
The provided code snippet includes necessary dependencies for implementing the `hflip_bboxes_helper` function. Write a Python function `def hflip_bboxes_helper(bbox: Tuple, **kwargs) -> Tuple` to solve the following problem:
When the src image is horizontally flipped, the bounding box also gets horizontally flipped
Here is the function:
def hflip_bboxes_helper(bbox: Tuple, **kwargs) -> Tuple:
"""
When the src image is horizontally flipped, the bounding box also gets horizontally
flipped
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
return (1 - right_factor, upper_factor, 1 - left_factor, lower_factor) | When the src image is horizontally flipped, the bounding box also gets horizontally flipped |
160,719 | import math
from typing import Callable, List, Optional, Tuple
import numpy as np
from augly.image import utils as imutils
from PIL import Image, ImageDraw
The provided code snippet includes necessary dependencies for implementing the `meme_format_bboxes_helper` function. Write a Python function `def meme_format_bboxes_helper( bbox: Tuple, src_w: int, src_h: int, caption_height: int, **kwargs ) -> Tuple` to solve the following problem:
The src image is offset vertically by caption_height pixels, so we normalize that to get the y offset, add that to the upper & lower coordinates, & renormalize with the new height. The x dimension is unaffected
Here is the function:
def meme_format_bboxes_helper(
bbox: Tuple, src_w: int, src_h: int, caption_height: int, **kwargs
) -> Tuple:
"""
The src image is offset vertically by caption_height pixels, so we normalize that to
get the y offset, add that to the upper & lower coordinates, & renormalize with the
new height. The x dimension is unaffected
"""
left_f, upper_f, right_f, lower_f = bbox
y_off = caption_height / src_h
new_h = 1.0 + y_off
return left_f, (upper_f + y_off) / new_h, right_f, (lower_f + y_off) / new_h | The src image is offset vertically by caption_height pixels, so we normalize that to get the y offset, add that to the upper & lower coordinates, & renormalize with the new height. The x dimension is unaffected |
160,720 | import math
from typing import Callable, List, Optional, Tuple
import numpy as np
from augly.image import utils as imutils
from PIL import Image, ImageDraw
The provided code snippet includes necessary dependencies for implementing the `overlay_onto_background_image_bboxes_helper` function. Write a Python function `def overlay_onto_background_image_bboxes_helper( bbox: Tuple, overlay_size: float, x_pos: float, y_pos: float, **kwargs ) -> Tuple` to solve the following problem:
The src image is overlaid on the dst image offset by (`x_pos`, `y_pos`) & with a size of `overlay_size` (all relative to the dst image dimensions). So the bounding box is also offset by (`x_pos`, `y_pos`) & scaled by `overlay_size`. It is also possible that some of the src image will be cut off, so we take the max with 0/min with 1 in order to crop the bbox if needed
Here is the function:
def overlay_onto_background_image_bboxes_helper(
bbox: Tuple, overlay_size: float, x_pos: float, y_pos: float, **kwargs
) -> Tuple:
"""
The src image is overlaid on the dst image offset by (`x_pos`, `y_pos`) & with a
size of `overlay_size` (all relative to the dst image dimensions). So the bounding
box is also offset by (`x_pos`, `y_pos`) & scaled by `overlay_size`. It is also
possible that some of the src image will be cut off, so we take the max with 0/min
with 1 in order to crop the bbox if needed
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
return (
max(0, left_factor * overlay_size + x_pos),
max(0, upper_factor * overlay_size + y_pos),
min(1, right_factor * overlay_size + x_pos),
min(1, lower_factor * overlay_size + y_pos),
) | The src image is overlaid on the dst image offset by (`x_pos`, `y_pos`) & with a size of `overlay_size` (all relative to the dst image dimensions). So the bounding box is also offset by (`x_pos`, `y_pos`) & scaled by `overlay_size`. It is also possible that some of the src image will be cut off, so we take the max with 0/min with 1 in order to crop the bbox if needed |
160,721 | import math
from typing import Callable, List, Optional, Tuple
import numpy as np
from augly.image import utils as imutils
from PIL import Image, ImageDraw
The provided code snippet includes necessary dependencies for implementing the `overlay_image_bboxes_helper` function. Write a Python function `def overlay_image_bboxes_helper( bbox: Tuple, opacity: float, overlay_size: float, x_pos: float, y_pos: float, max_visible_opacity: float, **kwargs, ) -> Tuple` to solve the following problem:
We made a few decisions for this augmentation about how bboxes are defined: 1. If `opacity` < `max_visible_opacity` (default 0.75, can be specified by the user), the bbox stays the same because it is still considered "visible" behind the overlaid image 2. If the entire bbox is covered by the overlaid image, the bbox is no longer valid so we return it as (0, 0, 0, 0), which will be turned to None in `check_bboxes()` 3. If the entire bottom of the bbox is covered by the overlaid image (i.e. `x_pos < left_factor` & `x_pos + overlay_size > right_factor` & `y_pos + overlay_size > lower_factor`), we crop out the lower part of the bbox that is covered. The analogue is true for the top/left/right being occluded 4. If just the middle of the bbox is covered or a rectangle is sliced out of the bbox, we consider that the bbox is unchanged, even though part of it is occluded. This isn't ideal but otherwise it's very complicated; we could split the remaining area into smaller visible bboxes, but then we would have to return multiple dst bboxes corresponding to one src bbox
Here is the function:
def overlay_image_bboxes_helper(
bbox: Tuple,
opacity: float,
overlay_size: float,
x_pos: float,
y_pos: float,
max_visible_opacity: float,
**kwargs,
) -> Tuple:
"""
We made a few decisions for this augmentation about how bboxes are defined:
1. If `opacity` < `max_visible_opacity` (default 0.75, can be specified by the user),
the bbox stays the same because it is still considered "visible" behind the
overlaid image
2. If the entire bbox is covered by the overlaid image, the bbox is no longer valid
so we return it as (0, 0, 0, 0), which will be turned to None in `check_bboxes()`
3. If the entire bottom of the bbox is covered by the overlaid image
(i.e. `x_pos < left_factor` & `x_pos + overlay_size > right_factor` &
`y_pos + overlay_size > lower_factor`), we crop out the lower part of the bbox
that is covered. The analogue is true for the top/left/right being occluded
4. If just the middle of the bbox is covered or a rectangle is sliced out of the
bbox, we consider that the bbox is unchanged, even though part of it is occluded.
This isn't ideal but otherwise it's very complicated; we could split the
remaining area into smaller visible bboxes, but then we would have to return
multiple dst bboxes corresponding to one src bbox
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
if opacity >= max_visible_opacity:
occluded_left = x_pos < left_factor
occluded_upper = y_pos < upper_factor
occluded_right = x_pos + overlay_size > right_factor
occluded_lower = y_pos + overlay_size > lower_factor
if occluded_left and occluded_right:
# If the bbox is completely covered, it's no longer valid so return zeros
if occluded_upper and occluded_lower:
return (0.0, 0.0, 0.0, 0.0)
if occluded_lower:
lower_factor = y_pos
elif occluded_upper:
upper_factor = y_pos + overlay_size
elif occluded_upper and occluded_lower:
if occluded_right:
right_factor = x_pos
elif occluded_left:
left_factor = x_pos + overlay_size
return left_factor, upper_factor, right_factor, lower_factor | We made a few decisions for this augmentation about how bboxes are defined: 1. If `opacity` < `max_visible_opacity` (default 0.75, can be specified by the user), the bbox stays the same because it is still considered "visible" behind the overlaid image 2. If the entire bbox is covered by the overlaid image, the bbox is no longer valid so we return it as (0, 0, 0, 0), which will be turned to None in `check_bboxes()` 3. If the entire bottom of the bbox is covered by the overlaid image (i.e. `x_pos < left_factor` & `x_pos + overlay_size > right_factor` & `y_pos + overlay_size > lower_factor`), we crop out the lower part of the bbox that is covered. The analogue is true for the top/left/right being occluded 4. If just the middle of the bbox is covered or a rectangle is sliced out of the bbox, we consider that the bbox is unchanged, even though part of it is occluded. This isn't ideal but otherwise it's very complicated; we could split the remaining area into smaller visible bboxes, but then we would have to return multiple dst bboxes corresponding to one src bbox |
160,722 | import math
from typing import Callable, List, Optional, Tuple
import numpy as np
from augly.image import utils as imutils
from PIL import Image, ImageDraw
def crop_bboxes_helper(
bbox: Tuple, x1: float, y1: float, x2: float, y2: float, **kwargs
) -> Tuple:
"""
If part of the bbox was cropped out in the x-axis, the left/right side will now be
0/1 respectively; otherwise the fraction x1 is cut off from the left & x2 from the
right and we renormalize with the new width. Analogous for the y-axis
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
new_w, new_h = x2 - x1, y2 - y1
return (
max(0, (left_factor - x1) / new_w),
max(0, (upper_factor - y1) / new_h),
min(1, 1 - (x2 - right_factor) / new_w),
min(1, 1 - (y2 - lower_factor) / new_h),
)
The provided code snippet includes necessary dependencies for implementing the `overlay_onto_screenshot_bboxes_helper` function. Write a Python function `def overlay_onto_screenshot_bboxes_helper( bbox: Tuple, src_w: int, src_h: int, template_filepath: str, template_bboxes_filepath: str, resize_src_to_match_template: bool, max_image_size_pixels: int, crop_src_to_fit: bool, **kwargs, ) -> Tuple` to solve the following problem:
We transform the bbox by applying all the same transformations as are applied in the `overlay_onto_screenshot` function, each of which is mentioned below in comments
Here is the function:
def overlay_onto_screenshot_bboxes_helper(
bbox: Tuple,
src_w: int,
src_h: int,
template_filepath: str,
template_bboxes_filepath: str,
resize_src_to_match_template: bool,
max_image_size_pixels: int,
crop_src_to_fit: bool,
**kwargs,
) -> Tuple:
"""
We transform the bbox by applying all the same transformations as are applied in the
`overlay_onto_screenshot` function, each of which is mentioned below in comments
"""
left_f, upper_f, right_f, lower_f = bbox
template, tbbox = imutils.get_template_and_bbox(
template_filepath, template_bboxes_filepath
)
# Either src image or template image is scaled
if resize_src_to_match_template:
tbbox_w, tbbox_h = tbbox[2] - tbbox[0], tbbox[3] - tbbox[1]
src_scale_factor = min(tbbox_w / src_w, tbbox_h / src_h)
else:
template, tbbox = imutils.scale_template_image(
src_w,
src_h,
template,
tbbox,
max_image_size_pixels,
crop_src_to_fit,
)
tbbox_w, tbbox_h = tbbox[2] - tbbox[0], tbbox[3] - tbbox[1]
src_scale_factor = 1
template_w, template_h = template.size
x_off, y_off = tbbox[:2]
# Src image is scaled (if resize_src_to_match_template)
curr_w, curr_h = src_w * src_scale_factor, src_h * src_scale_factor
left, upper, right, lower = (
left_f * curr_w,
upper_f * curr_h,
right_f * curr_w,
lower_f * curr_h,
)
# Src image is cropped to (tbbox_w, tbbox_h)
if crop_src_to_fit:
dx, dy = (curr_w - tbbox_w) // 2, (curr_h - tbbox_h) // 2
x1, y1, x2, y2 = dx, dy, dx + tbbox_w, dy + tbbox_h
left_f, upper_f, right_f, lower_f = crop_bboxes_helper(
bbox, x1 / curr_w, y1 / curr_h, x2 / curr_w, y2 / curr_h
)
left, upper, right, lower = (
left_f * tbbox_w,
upper_f * tbbox_h,
right_f * tbbox_w,
lower_f * tbbox_h,
)
# Src image is resized to (tbbox_w, tbbox_h)
else:
resize_f = min(tbbox_w / curr_w, tbbox_h / curr_h)
left, upper, right, lower = (
left * resize_f,
upper * resize_f,
right * resize_f,
lower * resize_f,
)
curr_w, curr_h = curr_w * resize_f, curr_h * resize_f
# Padding with black
padding_x = max(0, (tbbox_w - curr_w) // 2)
padding_y = max(0, (tbbox_h - curr_h) // 2)
left, upper, right, lower = (
left + padding_x,
upper + padding_y,
right + padding_x,
lower + padding_y,
)
# Src image is overlaid onto template image
left, upper, right, lower = (
left + x_off,
upper + y_off,
right + x_off,
lower + y_off,
)
return left / template_w, upper / template_h, right / template_w, lower / template_h | We transform the bbox by applying all the same transformations as are applied in the `overlay_onto_screenshot` function, each of which is mentioned below in comments |
160,723 | import math
from typing import Callable, List, Optional, Tuple
import numpy as np
from augly.image import utils as imutils
from PIL import Image, ImageDraw
def pad_bboxes_helper(bbox: Tuple, w_factor: float, h_factor: float, **kwargs) -> Tuple:
"""
The src image is padded horizontally with w_factor * src_w, so the bbox gets shifted
over by w_factor and then renormalized over the new width. Vertical padding is
analogous
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
new_w = 1 + 2 * w_factor
new_h = 1 + 2 * h_factor
return (
(left_factor + w_factor) / new_w,
(upper_factor + h_factor) / new_h,
(right_factor + w_factor) / new_w,
(lower_factor + h_factor) / new_h,
)
The provided code snippet includes necessary dependencies for implementing the `pad_square_bboxes_helper` function. Write a Python function `def pad_square_bboxes_helper(bbox: Tuple, src_w: int, src_h: int, **kwargs) -> Tuple` to solve the following problem:
In pad_square, pad is called with w_factor & h_factor computed as follows, so we can use the `pad_bboxes_helper` function to transform the bbox
Here is the function:
def pad_square_bboxes_helper(bbox: Tuple, src_w: int, src_h: int, **kwargs) -> Tuple:
"""
In pad_square, pad is called with w_factor & h_factor computed as follows, so we can
use the `pad_bboxes_helper` function to transform the bbox
"""
w_factor, h_factor = 0, 0
if src_w < src_h:
w_factor = (src_h - src_w) / (2 * src_w)
else:
h_factor = (src_w - src_h) / (2 * src_h)
return pad_bboxes_helper(bbox, w_factor=w_factor, h_factor=h_factor) | In pad_square, pad is called with w_factor & h_factor computed as follows, so we can use the `pad_bboxes_helper` function to transform the bbox |
160,724 | import math
from typing import Callable, List, Optional, Tuple
import numpy as np
from augly.image import utils as imutils
from PIL import Image, ImageDraw
def crop_bboxes_helper(
bbox: Tuple, x1: float, y1: float, x2: float, y2: float, **kwargs
) -> Tuple:
"""
If part of the bbox was cropped out in the x-axis, the left/right side will now be
0/1 respectively; otherwise the fraction x1 is cut off from the left & x2 from the
right and we renormalize with the new width. Analogous for the y-axis
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
new_w, new_h = x2 - x1, y2 - y1
return (
max(0, (left_factor - x1) / new_w),
max(0, (upper_factor - y1) / new_h),
min(1, 1 - (x2 - right_factor) / new_w),
min(1, 1 - (y2 - lower_factor) / new_h),
)
The provided code snippet includes necessary dependencies for implementing the `perspective_transform_bboxes_helper` function. Write a Python function `def perspective_transform_bboxes_helper( bbox: Tuple, src_w: int, src_h: int, sigma: float, dx: float, dy: float, crop_out_black_border: bool, seed: Optional[int], **kwargs, ) -> Tuple` to solve the following problem:
Computes the bbox that encloses the bbox in the perspective transformed image. Also uses the `crop_bboxes_helper` function since the image is cropped if `crop_out_black_border` is True.
Here is the function:
def perspective_transform_bboxes_helper(
bbox: Tuple,
src_w: int,
src_h: int,
sigma: float,
dx: float,
dy: float,
crop_out_black_border: bool,
seed: Optional[int],
**kwargs,
) -> Tuple:
"""
Computes the bbox that encloses the bbox in the perspective transformed image. Also
uses the `crop_bboxes_helper` function since the image is cropped if
`crop_out_black_border` is True.
"""
def transform(x: float, y: float, a: List[float]) -> Tuple:
"""
Transforms a point in the image given the perspective transform matrix; we will
use this to transform the bounding box corners. Based on PIL source code:
https://github.com/python-pillow/Pillow/blob/master/src/libImaging/Geometry.c#L399
"""
return (
(a[0] * x + a[1] * y + a[2]) / (a[6] * x + a[7] * y + a[8]),
(a[3] * x + a[4] * y + a[5]) / (a[6] * x + a[7] * y + a[8]),
)
def get_perspective_transform(
src_coords: List[Tuple[int, int]], dst_coords: List[Tuple[int, int]]
) -> List[float]:
"""
Computes the transformation matrix used for the perspective transform with
the given src & dst corner coordinates. Based on OpenCV source code:
https://github.com/opencv/opencv/blob/master/modules/imgproc/src/imgwarp.cpp#L3277-L3304
"""
a = np.zeros((8, 8), dtype=float)
dst_x, dst_y = zip(*dst_coords)
b = np.asarray(list(dst_x) + list(dst_y))
for i, (sc, dc) in enumerate(zip(src_coords, dst_coords)):
a[i][0] = a[i + 4][3] = sc[0]
a[i][1] = a[i + 4][4] = sc[1]
a[i][2] = a[i + 4][5] = 1
a[i][6] = -sc[0] * dc[0]
a[i][7] = -sc[1] * dc[0]
a[i + 4][6] = -sc[0] * dc[1]
a[i + 4][7] = -sc[1] * dc[1]
A = np.matrix(a, dtype=float)
B = np.array(b).reshape(8)
res = np.linalg.solve(A, B)
return np.array(res).reshape(8).tolist() + [1.0]
assert (
seed is not None
), "Cannot transform bbox for perspective_transform if seed is not provided"
rng = np.random.RandomState(seed)
src_coords = [(0, 0), (src_w, 0), (src_w, src_h), (0, src_h)]
dst_coords = [
(rng.normal(point[0], sigma) + dx, rng.normal(point[1], sigma) + dy)
for point in src_coords
]
perspective_transform_coeffs = get_perspective_transform(src_coords, dst_coords)
left_f, upper_f, right_f, lower_f = bbox
left, upper, right, lower = (
left_f * src_w,
upper_f * src_h,
right_f * src_w,
lower_f * src_h,
)
bbox_coords = [(left, upper), (right, upper), (right, lower), (left, lower)]
transformed_bbox_coords = [
transform(x + 0.5, y + 0.5, perspective_transform_coeffs)
for x, y in bbox_coords
]
transformed_xs, transformed_ys = zip(*transformed_bbox_coords)
transformed_bbox = (
max(0, min(transformed_xs) / src_w),
max(0, min(transformed_ys) / src_h),
min(1, max(transformed_xs) / src_w),
min(1, max(transformed_ys) / src_h),
)
# This is copy-pasted from `functional.py`, exactly how the crop coords are computed
if crop_out_black_border:
top_left, top_right, bottom_right, bottom_left = dst_coords
new_left = max(0, top_left[0], bottom_left[0])
new_right = min(src_w, top_right[0], bottom_right[0])
new_top = max(0, top_left[1], top_right[1])
new_bottom = min(src_h, bottom_left[1], bottom_right[1])
transformed_bbox = crop_bboxes_helper(
transformed_bbox,
x1=new_left / src_w,
y1=new_top / src_h,
x2=new_right / src_w,
y2=new_bottom / src_h,
)
return transformed_bbox | Computes the bbox that encloses the bbox in the perspective transformed image. Also uses the `crop_bboxes_helper` function since the image is cropped if `crop_out_black_border` is True. |
160,725 | import math
from typing import Callable, List, Optional, Tuple
import numpy as np
from augly.image import utils as imutils
from PIL import Image, ImageDraw
def crop_bboxes_helper(
bbox: Tuple, x1: float, y1: float, x2: float, y2: float, **kwargs
) -> Tuple:
"""
If part of the bbox was cropped out in the x-axis, the left/right side will now be
0/1 respectively; otherwise the fraction x1 is cut off from the left & x2 from the
right and we renormalize with the new width. Analogous for the y-axis
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
new_w, new_h = x2 - x1, y2 - y1
return (
max(0, (left_factor - x1) / new_w),
max(0, (upper_factor - y1) / new_h),
min(1, 1 - (x2 - right_factor) / new_w),
min(1, 1 - (y2 - lower_factor) / new_h),
)
The provided code snippet includes necessary dependencies for implementing the `rotate_bboxes_helper` function. Write a Python function `def rotate_bboxes_helper( bbox: Tuple, src_w: int, src_h: int, degrees: float, **kwargs ) -> Tuple` to solve the following problem:
Computes the bbox that encloses the rotated bbox in the rotated image. This code was informed by looking at the source code for PIL.Image.rotate (https://pillow.readthedocs.io/en/stable/_modules/PIL/Image.html#Image.rotate). Also uses the `crop_bboxes_helper` function since the image is cropped after being rotated.
Here is the function:
def rotate_bboxes_helper(
bbox: Tuple, src_w: int, src_h: int, degrees: float, **kwargs
) -> Tuple:
"""
Computes the bbox that encloses the rotated bbox in the rotated image. This code was
informed by looking at the source code for PIL.Image.rotate
(https://pillow.readthedocs.io/en/stable/_modules/PIL/Image.html#Image.rotate).
Also uses the `crop_bboxes_helper` function since the image is cropped after being
rotated.
"""
left_f, upper_f, right_f, lower_f = bbox
left, upper, right, lower = (
left_f * src_w,
upper_f * src_h,
right_f * src_w,
lower_f * src_h,
)
# Top left, upper right, lower right, & lower left corner coefficients (in pixels)
bbox_corners = [(left, upper), (right, upper), (right, lower), (left, lower)]
def transform(x: int, y: int, matrix: List[float]) -> Tuple[float, float]:
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
def get_enclosing_bbox(
corners: List[Tuple[int, int]], rotation_matrix: List[float]
) -> Tuple[int, int, int, int]:
rotated_corners = [transform(x, y, rotation_matrix) for x, y in corners]
xs, ys = zip(*rotated_corners)
return (
math.floor(min(xs)),
math.floor(min(ys)),
math.ceil(max(xs)),
math.ceil(max(ys)),
)
# Get rotated bbox corner coefficients
rotation_center = (src_w // 2, src_h // 2)
angle_rad = -math.radians(degrees)
rotation_matrix = [
round(math.cos(angle_rad), 15),
round(math.sin(angle_rad), 15),
0.0,
round(math.sin(angle_rad), 15),
round(-math.cos(angle_rad), 15),
0.0,
]
rotation_matrix[2], rotation_matrix[5] = transform(
-rotation_center[0], -rotation_center[1], rotation_matrix
)
rotation_matrix[2] += rotation_center[0]
rotation_matrix[5] += rotation_center[1]
# Get rotated image dimensions
src_img_corners = [(0, 0), (src_w, 0), (src_w, src_h), (0, src_h)]
(
rotated_img_min_x,
rotated_img_min_y,
rotated_img_max_x,
rotated_img_max_y,
) = get_enclosing_bbox(src_img_corners, rotation_matrix)
rotated_img_w = rotated_img_max_x - rotated_img_min_x
rotated_img_h = rotated_img_max_y - rotated_img_min_y
# Get enclosing box corners around rotated bbox (on rotated image)
new_bbox_left, new_bbox_upper, new_bbox_right, new_bbox_lower = get_enclosing_bbox(
bbox_corners, rotation_matrix
)
bbox_enclosing_bbox = (
new_bbox_left / rotated_img_w,
new_bbox_upper / rotated_img_h,
new_bbox_right / rotated_img_w,
new_bbox_lower / rotated_img_h,
)
# Crop bbox as src image is cropped inside `rotate`
cropped_w, cropped_h = imutils.rotated_rect_with_max_area(src_w, src_h, degrees)
cropped_img_left, cropped_img_upper, cropped_img_right, cropped_img_lower = (
(rotated_img_w - cropped_w) // 2 + rotated_img_min_x,
(rotated_img_h - cropped_h) // 2 + rotated_img_min_y,
(rotated_img_w + cropped_w) // 2 + rotated_img_min_x,
(rotated_img_h + cropped_h) // 2 + rotated_img_min_y,
)
return crop_bboxes_helper(
bbox_enclosing_bbox,
x1=cropped_img_left / rotated_img_w,
y1=cropped_img_upper / rotated_img_h,
x2=cropped_img_right / rotated_img_w,
y2=cropped_img_lower / rotated_img_h,
) | Computes the bbox that encloses the rotated bbox in the rotated image. This code was informed by looking at the source code for PIL.Image.rotate (https://pillow.readthedocs.io/en/stable/_modules/PIL/Image.html#Image.rotate). Also uses the `crop_bboxes_helper` function since the image is cropped after being rotated. |
160,726 | import math
from typing import Callable, List, Optional, Tuple
import numpy as np
from augly.image import utils as imutils
from PIL import Image, ImageDraw
The provided code snippet includes necessary dependencies for implementing the `vflip_bboxes_helper` function. Write a Python function `def vflip_bboxes_helper(bbox: Tuple, **kwargs) -> Tuple` to solve the following problem:
Analogous to hflip, when the src image is vertically flipped, the bounding box also gets vertically flipped
Here is the function:
def vflip_bboxes_helper(bbox: Tuple, **kwargs) -> Tuple:
"""
Analogous to hflip, when the src image is vertically flipped, the bounding box also
gets vertically flipped
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
return (left_factor, 1 - lower_factor, right_factor, 1 - upper_factor) | Analogous to hflip, when the src image is vertically flipped, the bounding box also gets vertically flipped |
160,727 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple
from augly.image import intensity as imintensity
from augly.image.utils import bboxes as imbboxes
from PIL import Image
def get_func_kwargs(
metadata: Optional[List[Dict[str, Any]]],
local_kwargs: Dict[str, Any],
**kwargs,
) -> Dict[str, Any]:
if metadata is None:
return {}
bboxes = local_kwargs.pop("bboxes", None)
bboxes = bboxes if len(metadata) == 0 else metadata[-1].get("dst_bboxes", None)
func_kwargs = deepcopy(local_kwargs)
func_kwargs.pop("metadata")
if bboxes is not None:
func_kwargs["src_bboxes"] = deepcopy(bboxes)
func_kwargs["dst_bboxes"] = deepcopy(bboxes)
func_kwargs.update(**deepcopy(kwargs))
return func_kwargs | null |
160,728 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple
from augly.image import intensity as imintensity
from augly.image.utils import bboxes as imbboxes
from PIL import Image
def transform_bboxes(
function_name: str,
image: Image.Image,
aug_image: Image.Image,
dst_bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
bboxes_helper_func: Optional[Callable] = None,
**kwargs,
) -> None:
if dst_bboxes is None:
return
assert bbox_format is not None and bbox_format in [
"pascal_voc",
"pascal_voc_norm",
"coco",
"yolo",
], "bbox_format must be specified if bboxes are passed in and must be a supported format"
src_w, src_h = image.size
aug_w, aug_h = aug_image.size
norm_bboxes = validate_and_normalize_bboxes(dst_bboxes, bbox_format, src_w, src_h)
if bboxes_helper_func is None:
bboxes_helper_func = getattr(
imbboxes, f"{function_name}_bboxes_helper", lambda bbox, **_: bbox
)
func_kwargs = deepcopy(kwargs)
func_kwargs.pop("src_bboxes", None)
transformed_norm_bboxes = [
bboxes_helper_func(bbox=bbox, src_w=src_w, src_h=src_h, **func_kwargs)
for bbox in norm_bboxes
]
transformed_norm_bboxes = check_for_gone_bboxes(transformed_norm_bboxes)
convert_bboxes(transformed_norm_bboxes, dst_bboxes, bbox_format, aug_w, aug_h)
def get_metadata(
metadata: Optional[List[Dict[str, Any]]],
function_name: str,
image: Optional[Image.Image] = None,
aug_image: Optional[Image.Image] = None,
bboxes: Optional[Tuple] = None,
bboxes_helper_func: Optional[Callable] = None,
**kwargs,
) -> None:
if metadata is None:
return
assert isinstance(
metadata, list
), "Expected `metadata` to be set to None or of type list"
assert (
image is not None
), "Expected `image` to be passed in if metadata was provided"
assert (
aug_image is not None
), "Expected `aug_image` to be passed in if metadata was provided"
transform_bboxes(
function_name=function_name,
image=image,
aug_image=aug_image,
bboxes_helper_func=bboxes_helper_func,
**kwargs,
)
# Json can't represent tuples, so they're represented as lists, which should
# be equivalent to tuples. So let's avoid tuples in the metadata by
# converting any tuples to lists here.
kwargs_types_fixed = dict(
(k, list(v)) if isinstance(v, tuple) else (k, v) for k, v in kwargs.items()
)
if (
bboxes_helper_func is not None
and bboxes_helper_func.__name__ == "spatial_bbox_helper"
):
kwargs_types_fixed.pop("aug_function", None)
metadata.append(
{
"name": function_name,
"src_width": image.width,
"src_height": image.height,
"dst_width": aug_image.width,
"dst_height": aug_image.height,
**kwargs_types_fixed,
}
)
intensity_kwargs = {"metadata": metadata[-1], **kwargs}
metadata[-1]["intensity"] = getattr(
imintensity, f"{function_name}_intensity", lambda **_: 0.0
)(**intensity_kwargs) | null |
160,729 | import functools
import os
import shutil
import tempfile
from typing import Callable, Dict, List, Optional, Union
from augly import utils
from augly.video import helpers as helpers
from augly.video.augmenters import cv2 as ac
def get_image_kwargs(imgs_dir: str) -> Dict[str, Optional[Union[List[str], str]]]:
return {"imgs_dir": imgs_dir, "imgs_files": utils.pathmgr.ls(imgs_dir)} | null |
160,730 | import math
import os
import shutil
import tempfile
from typing import Any, Dict, List, Optional, Tuple
import cv2
import ffmpeg
import numpy as np
from augly import utils
from augly.utils.ffmpeg import FFMPEG_PATH
from augly.video import helpers
def create_video_from_image(output_path: str, image_path: str, duration: float) -> None:
"""
Creates a video with all frames being the image provided
"""
utils.validate_output_path(output_path)
utils.validate_image_path(image_path)
assert duration > 0, "Duration of the video must be a positive value"
im_stream = ffmpeg.input(image_path, stream_loop=-1)
video = im_stream.filter("framerate", utils.DEFAULT_FRAME_RATE).filter(
"pad", **{"width": "ceil(iw/2)*2", "height": "ceil(ih/2)*2"}
)
silent_audio_path = utils.pathmgr.get_local_path(utils.SILENT_AUDIO_PATH)
audio = ffmpeg.input(silent_audio_path, stream_loop=math.ceil(duration)).audio
output = ffmpeg.output(video, audio, output_path, pix_fmt="yuv420p", t=duration)
output.overwrite_output().run(cmd=FFMPEG_PATH)
The provided code snippet includes necessary dependencies for implementing the `create_color_video` function. Write a Python function `def create_color_video( output_path: str, duration: float, height: int, width: int, color: Tuple[int, int, int] = utils.DEFAULT_COLOR, ) -> None` to solve the following problem:
Creates a video with frames of the specified color @param output_path: the path in which the resulting video will be stored @param duration: how long the video should be, in seconds @param height: the desired height of the video to be generated @param width: the desired width of the video to be generated @param color: RGB color of the video. Default color is black
Here is the function:
def create_color_video(
output_path: str,
duration: float,
height: int,
width: int,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
) -> None:
"""
Creates a video with frames of the specified color
@param output_path: the path in which the resulting video will be stored
@param duration: how long the video should be, in seconds
@param height: the desired height of the video to be generated
@param width: the desired width of the video to be generated
@param color: RGB color of the video. Default color is black
"""
utils.validate_output_path(output_path)
assert duration > 0, "Duration of the video must be a positive value"
assert height > 0, "Height of the video must be a positive value"
assert width > 0, "Width of the video must be a positive value"
with tempfile.TemporaryDirectory() as tmpdir:
image_path = os.path.join(tmpdir, "image.png")
color_frame = np.full((height, width, 3), color[::-1])
cv2.imwrite(image_path, color_frame)
create_video_from_image(output_path, image_path, duration) | Creates a video with frames of the specified color @param output_path: the path in which the resulting video will be stored @param duration: how long the video should be, in seconds @param height: the desired height of the video to be generated @param width: the desired width of the video to be generated @param color: RGB color of the video. Default color is black |
160,731 | import math
import os
import shutil
import tempfile
from typing import Any, Dict, List, Optional, Tuple
import cv2
import ffmpeg
import numpy as np
from augly import utils
from augly.utils.ffmpeg import FFMPEG_PATH
from augly.video import helpers
def validate_input_and_output_paths(
video_path: str, output_path: Optional[str]
) -> Tuple[str, str]:
def identity_function(
video_path: str,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
video_path, output_path = validate_input_and_output_paths(video_path, output_path)
if output_path is not None and output_path != video_path:
shutil.copy(video_path, output_path)
if metadata is not None:
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
helpers.get_metadata(
metadata=metadata, function_name="identity_function", **func_kwargs
)
return output_path or video_path | null |
160,732 | import math
import os
import shutil
import tempfile
from typing import Any, Dict, List, Optional
import ffmpeg
import numpy as np
from augly.audio import utils as audutils
from augly.utils import pathmgr, SILENT_AUDIO_PATH
from augly.utils.ffmpeg import FFMPEG_PATH, FFPROBE_PATH
from vidgear.gears import WriteGear
def execute_vidgear_command(output_path: str, ffmpeg_command: List[str]) -> None:
writer = WriteGear(output=output_path, logging=True)
writer.execute_ffmpeg_cmd(ffmpeg_command)
writer.close()
def merge_video_and_audio(
video_path: str,
audio_path: Optional[str],
output_path: str,
) -> None:
ffmpeg_command = []
if audio_path:
ffmpeg_command = [
"-y",
"-i",
video_path,
"-i",
audio_path,
"-vf",
"format=pix_fmts=yuv420p",
"-c:v",
"libx264",
"-c:a",
"copy",
"-bsf:a",
"aac_adtstoasc",
"-preset",
"ultrafast",
output_path,
]
else:
ffmpeg_command = [
"-y",
"-i",
video_path,
"-vf",
"format=pix_fmts=yuv420p",
"-c:v",
"libx264",
"-c:a",
"copy",
"-bsf:a",
"aac_adtstoasc",
"-preset",
"ultrafast",
output_path,
]
execute_vidgear_command(output_path, ffmpeg_command)
def combine_frames_and_audio_to_file(
raw_frames: str,
audio: Optional[str],
output_path: str,
framerate: float,
) -> None:
frame_dir = os.path.dirname(raw_frames)
if not os.path.isdir(frame_dir):
raise RuntimeError(
f"Got raw frames glob path of {raw_frames}, but {frame_dir} is not "
"a directory"
)
with tempfile.TemporaryDirectory() as tmpdir:
temp_video_path = os.path.join(tmpdir, "out.mp4")
ffmpeg_command = [
"-y",
"-framerate",
str(framerate),
"-pattern_type",
"glob",
"-i",
raw_frames,
"-c:v",
"libx264",
"-pix_fmt",
"yuv420p",
"-preset",
"ultrafast",
"-vf",
"pad=ceil(iw/2)*2:ceil(ih/2)*2",
temp_video_path,
]
execute_vidgear_command(temp_video_path, ffmpeg_command)
temp_padded_video_path = os.path.join(tmpdir, "out1.mp4")
ffmpeg_command = [
"-y",
"-i",
temp_video_path,
"-vf",
"pad=width=ceil(iw/2)*2:height=ceil(ih/2)*2",
"-preset",
"ultrafast",
temp_padded_video_path,
]
execute_vidgear_command(temp_padded_video_path, ffmpeg_command)
merge_video_and_audio(temp_padded_video_path, audio, output_path) | null |
160,733 | import math
import os
import shutil
import tempfile
from typing import Any, Dict, List, Optional
import ffmpeg
import numpy as np
from augly.audio import utils as audutils
from augly.utils import pathmgr, SILENT_AUDIO_PATH
from augly.utils.ffmpeg import FFMPEG_PATH, FFPROBE_PATH
from vidgear.gears import WriteGear
def get_audio_info(media_path: str) -> Dict[str, Any]:
"""
Returns whatever ffprobe returns. Of particular use are things such as the
encoder ("codec_name") used for audio encoding, the sample rate ("sample_rate"),
and length in seconds ("duration")
Accepts as input either an audio or video path.
"""
try:
local_media_path = pathmgr.get_local_path(media_path)
except RuntimeError:
raise FileNotFoundError(f"Provided media path {media_path} does not exist")
probe = ffmpeg.probe(local_media_path, cmd=FFPROBE_PATH)
audio_info = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "audio"),
None,
)
assert (
audio_info is not None
), "Error retrieving audio metadata, please verify that an audio stream exists"
return audio_info
import ffmpeg
FFMPEG_PATH = os.environ.get("AUGLY_FFMPEG_PATH", None)
if FFMPEG_PATH is None:
FFMPEG_PATH = spawn.find_executable("ffmpeg")
assert FFMPEG_PATH is not None and FFPROBE_PATH is not None, ffmpeg_paths_error.format(
"AUGLY_FFMPEG_PATH", "AUGLY_FFPROBE_PATH"
)
def extract_audio_to_file(video_path: str, output_audio_path: str) -> None:
audio_info = get_audio_info(video_path)
sample_rate = str(audio_info["sample_rate"])
codec = audio_info["codec_name"]
if os.path.splitext(output_audio_path)[-1] == ".aac":
(
ffmpeg.input(video_path, loglevel="quiet")
.output(output_audio_path, acodec=codec, ac=1)
.overwrite_output()
.run(cmd=FFMPEG_PATH)
)
else:
out, err = (
ffmpeg.input(video_path, loglevel="quiet")
.output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sample_rate)
.run(cmd=FFMPEG_PATH, capture_stdout=True, capture_stderr=True)
)
audio = np.frombuffer(out, np.float32)
audutils.ret_and_save_audio(audio, output_audio_path, int(sample_rate)) | null |
160,734 | import math
import os
import shutil
import tempfile
from typing import Any, Dict, List, Optional
import ffmpeg
import numpy as np
from augly.audio import utils as audutils
from augly.utils import pathmgr, SILENT_AUDIO_PATH
from augly.utils.ffmpeg import FFMPEG_PATH, FFPROBE_PATH
from vidgear.gears import WriteGear
def execute_vidgear_command(output_path: str, ffmpeg_command: List[str]) -> None:
writer = WriteGear(output=output_path, logging=True)
writer.execute_ffmpeg_cmd(ffmpeg_command)
writer.close()
def get_video_info(video_path: str) -> Dict[str, Any]:
"""
Returns whatever ffprobe returns. Of particular use are things such as the FPS
("avg_frame_rate"), number of raw frames ("nb_frames"), height and width of each
frame ("height", "width") and length in seconds ("duration")
"""
try:
local_video_path = pathmgr.get_local_path(video_path)
except RuntimeError:
raise FileNotFoundError(f"Provided video path {video_path} does not exist")
probe = ffmpeg.probe(local_video_path, cmd=FFPROBE_PATH)
video_info = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "video"),
None,
)
assert (
video_info is not None
), "Error retrieving video metadata, please verify that the video file exists"
return video_info
def extract_frames_to_dir(
video_path: str,
output_dir: str,
output_pattern: str = "raw_frame%08d.jpg",
quality: int = 0,
scale: float = 1,
) -> None:
video_info = get_video_info(video_path)
ffmpeg_command = [
"-y",
"-i",
video_path,
"-vf",
f"scale=iw*{scale}:ih*{scale}",
"-vframes",
str(video_info["nb_frames"]),
"-qscale:v",
str(quality),
"-preset",
"ultrafast",
os.path.join(output_dir, output_pattern),
]
execute_vidgear_command(os.path.join(output_dir, output_pattern), ffmpeg_command) | null |
160,735 | import math
import os
import shutil
import tempfile
from typing import Any, Dict, List, Optional
import ffmpeg
import numpy as np
from augly.audio import utils as audutils
from augly.utils import pathmgr, SILENT_AUDIO_PATH
from augly.utils.ffmpeg import FFMPEG_PATH, FFPROBE_PATH
from vidgear.gears import WriteGear
def get_video_info(video_path: str) -> Dict[str, Any]:
"""
Returns whatever ffprobe returns. Of particular use are things such as the FPS
("avg_frame_rate"), number of raw frames ("nb_frames"), height and width of each
frame ("height", "width") and length in seconds ("duration")
"""
try:
local_video_path = pathmgr.get_local_path(video_path)
except RuntimeError:
raise FileNotFoundError(f"Provided video path {video_path} does not exist")
probe = ffmpeg.probe(local_video_path, cmd=FFPROBE_PATH)
video_info = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "video"),
None,
)
assert (
video_info is not None
), "Error retrieving video metadata, please verify that the video file exists"
return video_info
def get_video_fps(video_path: str) -> Optional[float]:
video_info = get_video_info(video_path)
try:
frame_rate = video_info["avg_frame_rate"]
# ffmpeg often returns fractional framerates, e.g. 225480/7523
if "/" in frame_rate:
num, denom = (float(f) for f in frame_rate.split("/"))
return num / denom
else:
return float(frame_rate)
except Exception:
return None | null |
160,736 | import math
import os
import shutil
import tempfile
from typing import Any, Dict, List, Optional
import ffmpeg
import numpy as np
from augly.audio import utils as audutils
from augly.utils import pathmgr, SILENT_AUDIO_PATH
from augly.utils.ffmpeg import FFMPEG_PATH, FFPROBE_PATH
from vidgear.gears import WriteGear
def get_video_info(video_path: str) -> Dict[str, Any]:
"""
Returns whatever ffprobe returns. Of particular use are things such as the FPS
("avg_frame_rate"), number of raw frames ("nb_frames"), height and width of each
frame ("height", "width") and length in seconds ("duration")
"""
try:
local_video_path = pathmgr.get_local_path(video_path)
except RuntimeError:
raise FileNotFoundError(f"Provided video path {video_path} does not exist")
probe = ffmpeg.probe(local_video_path, cmd=FFPROBE_PATH)
video_info = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "video"),
None,
)
assert (
video_info is not None
), "Error retrieving video metadata, please verify that the video file exists"
return video_info
def has_audio_stream(video_path: str) -> bool:
streams = ffmpeg.probe(video_path, cmd=FFPROBE_PATH)["streams"]
for stream in streams:
if stream["codec_type"] == "audio":
return True
return False
import ffmpeg
FFMPEG_PATH = os.environ.get("AUGLY_FFMPEG_PATH", None)
if FFMPEG_PATH is None:
FFMPEG_PATH = spawn.find_executable("ffmpeg")
assert FFMPEG_PATH is not None and FFPROBE_PATH is not None, ffmpeg_paths_error.format(
"AUGLY_FFMPEG_PATH", "AUGLY_FFPROBE_PATH"
)
def add_silent_audio(
video_path: str,
output_path: Optional[str] = None,
duration: Optional[float] = None,
) -> None:
local_video_path = pathmgr.get_local_path(video_path)
if local_video_path != video_path:
assert (
output_path is not None
), "If remote video_path is provided, an output_path must be provided"
video_path = local_video_path
output_path = output_path or video_path
if has_audio_stream(video_path):
if video_path != output_path:
shutil.copy(video_path, output_path)
return
duration = duration or float(get_video_info(video_path)["duration"])
video = ffmpeg.input(video_path).video
silent_audio_path = pathmgr.get_local_path(SILENT_AUDIO_PATH)
audio = ffmpeg.input(silent_audio_path, stream_loop=math.ceil(duration)).audio
output = ffmpeg.output(video, audio, output_path, pix_fmt="yuv420p", t=duration)
output.overwrite_output().run(cmd=FFMPEG_PATH) | null |
160,737 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def add_noise_intensity(level: int, **kwargs) -> float:
assert (
isinstance(level, (float, int)) and 0 <= level <= 100
), "level must be a number in [0, 100]"
return (level / 100) * 100.0 | null |
160,738 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def apply_lambda_intensity(aug_function: str, **kwargs) -> float:
intensity_func = globals().get(f"{aug_function}_intensity")
return intensity_func(**kwargs) if intensity_func else 100.0 | null |
160,739 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def audio_swap_intensity(offset: float, **kwargs) -> float:
return (1.0 - offset) * 100.0 | null |
160,740 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def augment_audio_intensity(audio_metadata: List[Dict[str, Any]], **kwargs) -> float:
return audio_metadata[0]["intensity"] | null |
160,741 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def blend_videos_intensity(opacity: float, overlay_size: float, **kwargs) -> float:
return imint.overlay_media_intensity_helper(opacity, overlay_size) | null |
160,742 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def blur_intensity(sigma: int, **kwargs) -> float:
assert (
isinstance(sigma, (float, int)) and sigma >= 0
), "sigma must be a non-negative number"
max_sigma = 100
return min((sigma / max_sigma) * 100.0, 100.0) | null |
160,743 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def change_aspect_ratio_intensity(
ratio: float, metadata: Dict[str, Any], **kwargs
) -> float:
assert (
isinstance(ratio, (float, int)) and ratio > 0
), "ratio must be a positive number"
current_ratio = metadata["src_width"] / metadata["src_height"]
max_ratio_change = 10.0
ratio_change = abs(ratio - current_ratio)
return min((ratio_change / max_ratio_change) * 100.0, 100.0) | null |
160,744 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def change_video_speed_intensity(factor: float, **kwargs) -> float:
assert (
isinstance(factor, (float, int)) and factor > 0
), "factor must be a positive number"
if factor == 1.0:
return 0.0
max_factor = 10.0
speed_change_factor = factor if factor > 1 else 1 / factor
return min((speed_change_factor / max_factor) * 100.0, 100.0) | null |
160,745 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def brightness_intensity(level: float, **kwargs) -> float:
assert (
isinstance(level, (float, int)) and -1 <= level <= 1
), "level must be a number in [-1, 1]"
return abs(level) * 100.0
def contrast_intensity(level: float, **kwargs) -> float:
assert (
isinstance(level, (float, int)) and -1000 <= level <= 1000
), "level must be a number in [-1000, 1000]"
return (abs(level) / 1000) * 100.0
def color_jitter_intensity(
brightness_factor: float, contrast_factor: float, saturation_factor: float, **kwargs
) -> float:
assert (
isinstance(brightness_factor, (float, int)) and -1 <= brightness_factor <= 1
), "brightness_factor must be a number in [-1, 1]"
assert (
isinstance(contrast_factor, (float, int)) and -1000 <= contrast_factor <= 1000
), "contrast_factor must be a number in [-1000, 1000]"
assert (
isinstance(saturation_factor, (float, int)) and 0 <= saturation_factor <= 3
), "saturation_factor must be a number in [0, 3]"
brightness_intensity = abs(brightness_factor)
contrast_intensity = abs(contrast_factor) / 1000
saturation_intensity = saturation_factor / 3
return (brightness_intensity * contrast_intensity * saturation_intensity) * 100.0 | null |
160,746 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def time_crop_or_pad_intensity_helper(metadata: Dict[str, Any]) -> float:
"""
Computes intensity of a transform that consists of temporal cropping or
padding. For these types of transforms the intensity is defined as the
percentage of video time that has been cut out (for cropping) or added
(for padding). When computing the percentage, the denominator should be
the longer of the src & dst durations so the resulting percentage isn't
greater than 100.
"""
dst_duration = metadata["dst_duration"]
src_duration = metadata["src_duration"]
larger_duration = max(src_duration, dst_duration)
return (abs(dst_duration - src_duration) / larger_duration) * 100.0
def concat_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return time_crop_or_pad_intensity_helper(metadata) | null |
160,747 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def crop_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return imint.resize_intensity_helper(metadata) | null |
160,748 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def encoding_quality_intensity(quality: int, **kwargs) -> float:
assert (
isinstance(quality, int) and 0 <= quality <= 51
), "quality must be a number in [0, 51]"
return (quality / 51) * 100.0 | null |
160,749 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def fps_intensity(fps: int, metadata: Dict[str, Any], **kwargs) -> float:
assert isinstance(fps, (float, int)), "fps must be a number"
src_fps = metadata["src_fps"]
return min(((src_fps - fps) / src_fps) * 100.0, 100.0) | null |
160,750 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def grayscale_intensity(**kwargs) -> float:
return 100.0 | null |
160,751 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def hflip_intensity(**kwargs) -> float:
return 100.0 | null |
160,752 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def hstack_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return imint.resize_intensity_helper(metadata) | null |
160,753 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def identity_function_intensity(**kwargs) -> float:
return 0.0 | null |
160,754 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def time_crop_or_pad_intensity_helper(metadata: Dict[str, Any]) -> float:
"""
Computes intensity of a transform that consists of temporal cropping or
padding. For these types of transforms the intensity is defined as the
percentage of video time that has been cut out (for cropping) or added
(for padding). When computing the percentage, the denominator should be
the longer of the src & dst durations so the resulting percentage isn't
greater than 100.
"""
dst_duration = metadata["dst_duration"]
src_duration = metadata["src_duration"]
larger_duration = max(src_duration, dst_duration)
return (abs(dst_duration - src_duration) / larger_duration) * 100.0
def insert_in_background_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return time_crop_or_pad_intensity_helper(metadata) | null |
160,755 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def loop_intensity(num_loops: int, **kwargs) -> float:
max_num_loops = 100
return min((num_loops / max_num_loops) * 100.0, 100.0) | null |
160,756 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def meme_format_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return imint.resize_intensity_helper(metadata) | null |
160,757 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def overlay_intensity(
overlay_size: Optional[float], overlay_path: str, metadata: Dict[str, Any], **kwargs
) -> float:
assert overlay_size is None or (
isinstance(overlay_size, (float, int)) and 0 < overlay_size <= 1
), "overlay_size must be a value in the range (0, 1]"
if overlay_size is not None:
return (overlay_size**2) * 100.0
try:
img = imutils.validate_and_load_image(overlay_path)
overlay_area = img.width * img.height
except Exception:
video_info = get_video_info(overlay_path)
overlay_area = video_info["width"] * video_info["height"]
src_area = metadata["src_width"] * metadata["src_height"]
return min((overlay_area / src_area) * 100.0, 100.0) | null |
160,758 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def overlay_dots_intensity(num_dots: int, **kwargs) -> float:
max_num_dots = 10000
return min((num_dots / max_num_dots) * 100.0, 100.0) | null |
160,759 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def overlay_emoji_intensity(
emoji_size: float, opacity: float, metadata: Dict[str, Any], **kwargs
) -> float:
assert (
isinstance(emoji_size, (float, int)) and 0 <= emoji_size <= 1
), "emoji_size must be a number in [0, 1]"
assert (
isinstance(opacity, (float, int)) and 0 <= opacity <= 1
), "opacity must be a number in [0, 1]"
video_area = metadata["dst_width"] * metadata["dst_height"]
emoji_width = min(metadata["dst_width"], metadata["dst_height"] * emoji_size)
emoji_height = metadata["dst_height"] * emoji_size
emoji_area = emoji_width * emoji_height
area_intensity = emoji_area / video_area
return area_intensity * opacity * 100.0 | null |
160,760 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def overlay_onto_background_video_intensity(
overlay_size: Optional[float], metadata: Dict[str, Any], **kwargs
) -> float:
if overlay_size is not None:
return (1 - overlay_size**2) * 100.0
src_area = metadata["src_width"] * metadata["src_height"]
dst_area = metadata["dst_width"] * metadata["dst_height"]
return min(100.0, max(0.0, 1.0 - src_area / dst_area) * 100.0) | null |
160,761 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def overlay_onto_screenshot_intensity(
template_filepath: str,
template_bboxes_filepath: str,
metadata: Dict[str, Any],
**kwargs,
) -> float:
_, bbox = imutils.get_template_and_bbox(template_filepath, template_bboxes_filepath)
bbox_area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
dst_area = metadata["dst_width"] * metadata["dst_height"]
return ((dst_area - bbox_area) / dst_area) * 100.0 | null |
160,762 | from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from augly.image import intensity as imint, utils as imutils
from augly.video.helpers import get_video_info
def distractor_overlay_intensity_helper(
topleft: Optional[Tuple[float, float]],
bottomright: Optional[Tuple[float, float]],
num_overlay_content: int,
**kwargs,
) -> float:
"""
Computes intensity of any distractor-type transform, which adds some kind
of media (images, emojis, text, dots, logos) on top of the src video within
a specified bounding box.
"""
assert topleft is None or all(
0.0 <= t <= 1.0 for t in topleft
), "Topleft must be in the range [0, 1]"
assert bottomright is None or all(
0.0 <= b <= 1.0 for b in bottomright
), "Bottomright must be in the range [0, 1]"
assert (
isinstance(num_overlay_content, int) and num_overlay_content >= 0
), "num_overlay_content must be a nonnegative int"
if topleft is None or bottomright is None:
return 100.0
max_num_overlay_content_val = 100
num_overlay_content_intensity = num_overlay_content / max_num_overlay_content_val
x1, y1 = topleft
x2, y2 = bottomright
distractor_area = (x2 - x1) * (y2 - y1)
return min((distractor_area * num_overlay_content_intensity) * 100.0, 100.0)
def overlay_shapes_intensity(
topleft: Optional[Tuple[float, float]],
bottomright: Optional[Tuple[float, float]],
num_shapes: int,
**kwargs,
) -> float:
return distractor_overlay_intensity_helper(topleft, bottomright, num_shapes) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.