id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
160,563 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `high_pass_filter` function. Write a Python function `def high_pass_filter( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, cutoff_hz: float = 3000.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Allows audio signals with a frequency higher than the given cutoff to pass through and attenuates signals with frequencies lower than the cutoff frequency @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param cutoff_hz: frequency (in Hz) where signals with lower frequencies will begin to be reduced by 6dB per octave (doubling in frequency) below this point @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def high_pass_filter(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
cutoff_hz: float = 3000.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Allows audio signals with a frequency higher than the given cutoff to pass
through and attenuates signals with frequencies lower than the cutoff frequency
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param cutoff_hz: frequency (in Hz) where signals with lower frequencies will
begin to be reduced by 6dB per octave (doubling in frequency) below this point
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(cutoff_hz, (int, float)), "Expected 'cutoff_hz' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
audio = audio.reshape((num_channels, -1))
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(audio), sample_rate, [["highpass", str(cutoff_hz)]]
)
high_pass_array = aug_audio.numpy()
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="high_pass_filter",
dst_audio=high_pass_array,
dst_sample_rate=out_sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(high_pass_array, output_path, out_sample_rate) | Allows audio signals with a frequency higher than the given cutoff to pass through and attenuates signals with frequencies lower than the cutoff frequency @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param cutoff_hz: frequency (in Hz) where signals with lower frequencies will begin to be reduced by 6dB per octave (doubling in frequency) below this point @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,564 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
def to_mono(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Converts the audio from stereo to mono by averaging samples across channels
will be augmented
the resulting np.ndarray will still be returned
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
"""
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
aug_audio = librosa.core.to_mono(audio)
audutils.get_metadata(
metadata=metadata,
function_name="to_mono",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
The provided code snippet includes necessary dependencies for implementing the `insert_in_background` function. Write a Python function `def insert_in_background( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, offset_factor: float = 0.0, background_audio: Optional[Union[str, np.ndarray]] = None, seed: Optional[audutils.RNGSeed] = None, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Inserts audio into a background clip in a non-overlapping manner. @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param offset_factor: insert point relative to the background duration (this parameter is multiplied by the background duration) @param background_audio: the path to the background audio or a variable of type np.ndarray containing the background audio. If set to `None`, the background audio will be white noise, with the same duration as the audio. @param seed: a NumPy random generator (or seed) such that the results remain reproducible @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def insert_in_background(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
offset_factor: float = 0.0,
background_audio: Optional[Union[str, np.ndarray]] = None,
seed: Optional[audutils.RNGSeed] = None,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Inserts audio into a background clip in a non-overlapping manner.
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param offset_factor: insert point relative to the background duration
(this parameter is multiplied by the background duration)
@param background_audio: the path to the background audio or a variable of type
np.ndarray containing the background audio. If set to `None`, the background
audio will be white noise, with the same duration as the audio.
@param seed: a NumPy random generator (or seed) such that the results
remain reproducible
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
0.0 <= offset_factor <= 1.0
), "Expected 'offset_factor' to be a number in the range [0, 1]"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
func_kwargs.pop("seed")
random_generator = audutils.check_random_state(seed)
if background_audio is None:
background_audio = random_generator.standard_normal(audio.shape)
else:
background_audio, _ = audutils.validate_and_load_audio(
background_audio, sample_rate
)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
bg_num_channels = 1 if background_audio.ndim == 1 else background_audio.shape[0]
if bg_num_channels != num_channels:
background_audio, _background_sr = to_mono(background_audio)
if num_channels > 1:
background_audio = np.tile(background_audio, (num_channels, 1))
num_samples_bg = background_audio.shape[-1]
offset = int(offset_factor * num_samples_bg)
aug_audio = np.hstack(
[background_audio[..., :offset], audio, background_audio[..., offset:]]
)
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="insert_in_background",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
background_duration=background_audio.shape[-1] / sample_rate,
offset=offset,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate) | Inserts audio into a background clip in a non-overlapping manner. @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param offset_factor: insert point relative to the background duration (this parameter is multiplied by the background duration) @param background_audio: the path to the background audio or a variable of type np.ndarray containing the background audio. If set to `None`, the background audio will be white noise, with the same duration as the audio. @param seed: a NumPy random generator (or seed) such that the results remain reproducible @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,565 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `invert_channels` function. Write a Python function `def invert_channels( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Inverts channels of the audio. If the audio has only one channel, no change is applied. Otherwise, it inverts the order of the channels, eg for 4 channels, it returns channels in order [3, 2, 1, 0]. @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def invert_channels(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Inverts channels of the audio.
If the audio has only one channel, no change is applied.
Otherwise, it inverts the order of the channels, eg for 4 channels,
it returns channels in order [3, 2, 1, 0].
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
aug_audio = audio
if audio.ndim > 1:
num_channels = audio.shape[0]
inverted_channels = list(range(num_channels))[::-1]
aug_audio = audio[inverted_channels, :]
audutils.get_metadata(
metadata=metadata,
function_name="invert_channels",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate) | Inverts channels of the audio. If the audio has only one channel, no change is applied. Otherwise, it inverts the order of the channels, eg for 4 channels, it returns channels in order [3, 2, 1, 0]. @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,566 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `loop` function. Write a Python function `def loop( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, n: int = 1, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Loops the audio 'n' times @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param n: the number of times the audio will be looped @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def loop(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
n: int = 1,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Loops the audio 'n' times
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param n: the number of times the audio will be looped
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(n, int) and n >= 0, "Expected 'n' to be a nonnegative integer"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
aug_audio = audio
for _ in range(n):
aug_audio = np.append(aug_audio, audio, axis=(0 if audio.ndim == 1 else 1))
audutils.get_metadata(
metadata=metadata,
function_name="loop",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
n=n,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate) | Loops the audio 'n' times @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param n: the number of times the audio will be looped @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,567 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `low_pass_filter` function. Write a Python function `def low_pass_filter( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, cutoff_hz: float = 500.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Allows audio signals with a frequency lower than the given cutoff to pass through and attenuates signals with frequencies higher than the cutoff frequency @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param cutoff_hz: frequency (in Hz) where signals with higher frequencies will begin to be reduced by 6dB per octave (doubling in frequency) above this point @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def low_pass_filter(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
cutoff_hz: float = 500.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Allows audio signals with a frequency lower than the given cutoff to pass through
and attenuates signals with frequencies higher than the cutoff frequency
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param cutoff_hz: frequency (in Hz) where signals with higher frequencies will
begin to be reduced by 6dB per octave (doubling in frequency) above this point
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(cutoff_hz, (int, float)), "Expected 'cutoff_hz' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
audio = audio.reshape((num_channels, -1))
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(audio), sample_rate, [["lowpass", str(cutoff_hz)]]
)
low_pass_array = aug_audio.numpy()
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="low_pass_filter",
dst_audio=low_pass_array,
dst_sample_rate=out_sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(low_pass_array, output_path, out_sample_rate) | Allows audio signals with a frequency lower than the given cutoff to pass through and attenuates signals with frequencies higher than the cutoff frequency @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param cutoff_hz: frequency (in Hz) where signals with higher frequencies will begin to be reduced by 6dB per octave (doubling in frequency) above this point @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,568 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `normalize` function. Write a Python function `def normalize( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, norm: Optional[float] = np.inf, axis: int = 0, threshold: Optional[float] = None, fill: Optional[bool] = None, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Normalizes the audio array along the chosen axis (norm(audio, axis=axis) == 1) @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param norm: the type of norm to compute: - np.inf: maximum absolute value - -np.inf: minimum absolute value - 0: number of non-zeros (the support) - float: corresponding l_p norm - None: no normalization is performed @param axis: axis along which to compute the norm @param threshold: if provided, only the columns (or rows) with norm of at least `threshold` are normalized @param fill: if None, then columns (or rows) with norm below `threshold` are left as is. If False, then columns (rows) with norm below `threshold` are set to 0. If True, then columns (rows) with norm below `threshold` are filled uniformly such that the corresponding norm is 1 @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def normalize(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
norm: Optional[float] = np.inf,
axis: int = 0,
threshold: Optional[float] = None,
fill: Optional[bool] = None,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Normalizes the audio array along the chosen axis (norm(audio, axis=axis) == 1)
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param norm: the type of norm to compute:
- np.inf: maximum absolute value
- -np.inf: minimum absolute value
- 0: number of non-zeros (the support)
- float: corresponding l_p norm
- None: no normalization is performed
@param axis: axis along which to compute the norm
@param threshold: if provided, only the columns (or rows) with norm of at
least `threshold` are normalized
@param fill: if None, then columns (or rows) with norm below `threshold` are left
as is. If False, then columns (rows) with norm below `threshold` are set to 0.
If True, then columns (rows) with norm below `threshold` are filled uniformly
such that the corresponding norm is 1
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
isinstance(axis, int) and axis >= 0
), "Expected 'axis' to be a nonnegative number"
assert threshold is None or isinstance(
threshold, (int, float)
), "Expected 'threshold' to be a number or None"
assert fill is None or isinstance(
fill, bool
), "Expected 'threshold' to be a boolean or None"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs["norm"] = str(func_kwargs["norm"])
func_kwargs.pop("metadata")
aug_audio = librosa.util.normalize(
audio, norm=norm, axis=axis, threshold=threshold, fill=fill
)
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="normalize",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate) | Normalizes the audio array along the chosen axis (norm(audio, axis=axis) == 1) @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param norm: the type of norm to compute: - np.inf: maximum absolute value - -np.inf: minimum absolute value - 0: number of non-zeros (the support) - float: corresponding l_p norm - None: no normalization is performed @param axis: axis along which to compute the norm @param threshold: if provided, only the columns (or rows) with norm of at least `threshold` are normalized @param fill: if None, then columns (or rows) with norm below `threshold` are left as is. If False, then columns (rows) with norm below `threshold` are set to 0. If True, then columns (rows) with norm below `threshold` are filled uniformly such that the corresponding norm is 1 @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,569 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `peaking_equalizer` function. Write a Python function `def peaking_equalizer( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, center_hz: float = 500.0, q: float = 1.0, gain_db: float = -3.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Applies a two-pole peaking equalization filter. The signal-level at and around `center_hz` can be increased or decreased, while all other frequencies are unchanged @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param center_hz: point in the frequency spectrum at which EQ is applied @param q: ratio of center frequency to bandwidth; bandwidth is inversely proportional to Q, meaning that as you raise Q, you narrow the bandwidth @param gain_db: amount of gain (boost) or reduction (cut) that is applied at a given frequency. Beware of clipping when using positive gain @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def peaking_equalizer(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
center_hz: float = 500.0,
q: float = 1.0,
gain_db: float = -3.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Applies a two-pole peaking equalization filter. The signal-level at and around
`center_hz` can be increased or decreased, while all other frequencies are unchanged
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param center_hz: point in the frequency spectrum at which EQ is applied
@param q: ratio of center frequency to bandwidth; bandwidth is inversely
proportional to Q, meaning that as you raise Q, you narrow the bandwidth
@param gain_db: amount of gain (boost) or reduction (cut) that is applied at a
given frequency. Beware of clipping when using positive gain
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(center_hz, (int, float)), "Expected 'center_hz' to be a number"
assert isinstance(q, (int, float)) and q > 0, "Expected 'q' to be a positive number"
assert isinstance(gain_db, (int, float)), "Expected 'gain_db' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
aug_audio = audio.reshape((num_channels, -1))
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(aug_audio),
sample_rate,
[["equalizer", str(center_hz), f"{q}q", str(gain_db)]],
)
aug_audio = aug_audio.numpy()
if num_channels == 1:
aug_audio = aug_audio.reshape((aug_audio.shape[-1],))
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="peaking_equalizer",
dst_audio=aug_audio,
dst_sample_rate=out_sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate) | Applies a two-pole peaking equalization filter. The signal-level at and around `center_hz` can be increased or decreased, while all other frequencies are unchanged @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param center_hz: point in the frequency spectrum at which EQ is applied @param q: ratio of center frequency to bandwidth; bandwidth is inversely proportional to Q, meaning that as you raise Q, you narrow the bandwidth @param gain_db: amount of gain (boost) or reduction (cut) that is applied at a given frequency. Beware of clipping when using positive gain @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,570 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `percussive` function. Write a Python function `def percussive( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, kernel_size: int = 31, power: float = 2.0, margin: float = 1.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Extracts the percussive part of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param kernel_size: kernel size for the median filters @param power: exponent for the Wiener filter when constructing soft mask matrices @param margin: margin size for the masks @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def percussive(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
kernel_size: int = 31,
power: float = 2.0,
margin: float = 1.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Extracts the percussive part of the audio
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param kernel_size: kernel size for the median filters
@param power: exponent for the Wiener filter when constructing soft mask matrices
@param margin: margin size for the masks
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(kernel_size, int), "Expected 'kernel_size' to be an int"
assert isinstance(power, (int, float)), "Expected 'power' to be a number"
assert isinstance(margin, (int, float)), "Expected 'margin' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
if num_channels == 1:
aug_audio = librosa.effects.percussive(
audio, kernel_size=kernel_size, power=power, margin=margin
)
else:
aug_audio = np.vstack(
[
librosa.effects.percussive(
np.asfortranarray(audio[c]),
kernel_size=kernel_size,
power=power,
margin=margin,
)
for c in range(num_channels)
]
)
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="percussive",
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate) | Extracts the percussive part of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param kernel_size: kernel size for the median filters @param power: exponent for the Wiener filter when constructing soft mask matrices @param margin: margin size for the masks @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,571 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `pitch_shift` function. Write a Python function `def pitch_shift( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, n_steps: float = 1.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Shifts the pitch of the audio by `n_steps` @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param n_steps: each step is equal to one semitone @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def pitch_shift(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
n_steps: float = 1.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Shifts the pitch of the audio by `n_steps`
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param n_steps: each step is equal to one semitone
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(n_steps, (int, float)), "Expected 'n_steps' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
if num_channels == 1:
aug_audio = librosa.effects.pitch_shift(audio, sr=sample_rate, n_steps=n_steps)
else:
aug_audio = np.vstack(
[
librosa.effects.pitch_shift(
np.asfortranarray(audio[c]), sr=sample_rate, n_steps=n_steps
)
for c in range(num_channels)
]
)
audutils.get_metadata(
metadata=metadata,
function_name="pitch_shift",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
n_steps=n_steps,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate) | Shifts the pitch of the audio by `n_steps` @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param n_steps: each step is equal to one semitone @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,572 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `reverb` function. Write a Python function `def reverb( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, reverberance: float = 50.0, hf_damping: float = 50.0, room_scale: float = 100.0, stereo_depth: float = 100.0, pre_delay: float = 0.0, wet_gain: float = 0.0, wet_only: bool = False, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Adds reverberation to the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param reverberance: (%) sets the length of the reverberation tail. This determines how long the reverberation continues for after the original sound being reverbed comes to an end, and so simulates the "liveliness" of the room acoustics @param hf_damping: (%) increasing the damping produces a more "muted" effect. The reverberation does not build up as much, and the high frequencies decay faster than the low frequencies @param room_scale: (%) sets the size of the simulated room. A high value will simulate the reverberation effect of a large room and a low value will simulate the effect of a small room @param stereo_depth: (%) sets the apparent "width" of the reverb effect for stereo tracks only. Increasing this value applies more variation between left and right channels, creating a more "spacious" effect. When set at zero, the effect is applied independently to left and right channels @param pre_delay: (ms) delays the onset of the reverberation for the set time after the start of the original input. This also delays the onset of the reverb tail @param wet_gain: (db) applies volume adjustment to the reverberation ("wet") component in the mix @param wet_only: only the wet signal (added reverberation) will be in the resulting output, and the original audio will be removed @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def reverb(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
reverberance: float = 50.0,
hf_damping: float = 50.0,
room_scale: float = 100.0,
stereo_depth: float = 100.0,
pre_delay: float = 0.0,
wet_gain: float = 0.0,
wet_only: bool = False,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adds reverberation to the audio
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param reverberance: (%) sets the length of the reverberation tail. This determines
how long the reverberation continues for after the original sound being reverbed
comes to an end, and so simulates the "liveliness" of the room acoustics
@param hf_damping: (%) increasing the damping produces a more "muted" effect. The
reverberation does not build up as much, and the high frequencies decay faster
than the low frequencies
@param room_scale: (%) sets the size of the simulated room. A high value will
simulate the reverberation effect of a large room and a low value will simulate
the effect of a small room
@param stereo_depth: (%) sets the apparent "width" of the reverb effect for stereo
tracks only. Increasing this value applies more variation between left and right
channels, creating a more "spacious" effect. When set at zero, the effect is
applied independently to left and right channels
@param pre_delay: (ms) delays the onset of the reverberation for the set time after
the start of the original input. This also delays the onset of the reverb tail
@param wet_gain: (db) applies volume adjustment to the reverberation ("wet")
component in the mix
@param wet_only: only the wet signal (added reverberation) will be in the resulting
output, and the original audio will be removed
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert isinstance(
reverberance, (int, float)
), "Expected 'reverberance' to be a number"
assert isinstance(hf_damping, (int, float)), "Expected 'hf_damping' to be a number"
assert isinstance(room_scale, (int, float)), "Expected 'room_scale' to be a number"
assert isinstance(
stereo_depth, (int, float)
), "Expected 'stereo_depth' to be a number"
assert isinstance(pre_delay, (int, float)), "Expected 'pre_delay' to be a number"
assert isinstance(wet_gain, (int, float)), "Expected 'wet_gain' to be a number"
assert isinstance(wet_only, bool), "Expected 'wet_only' to be a number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
if metadata is not None:
func_kwargs = deepcopy(locals())
func_kwargs.pop("metadata")
aug_audio = audio.reshape((1, audio.shape[-1])) if audio.ndim == 1 else audio
effect = ["reverb"]
if wet_only:
effect.append("-w")
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(aug_audio),
sample_rate,
[
effect
+ [
str(reverberance),
str(hf_damping),
str(room_scale),
str(stereo_depth),
str(pre_delay),
str(wet_gain),
]
],
)
aug_audio = aug_audio.numpy()
if audio.shape[0] == 1:
aug_audio = aug_audio.reshape((aug_audio.shape[-1],))
if metadata is not None:
audutils.get_metadata(
metadata=metadata,
function_name="reverb",
dst_audio=aug_audio,
dst_sample_rate=out_sample_rate,
# pyre-fixme[61]: `func_kwargs` may not be initialized here.
**func_kwargs,
)
return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate) | Adds reverberation to the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param reverberance: (%) sets the length of the reverberation tail. This determines how long the reverberation continues for after the original sound being reverbed comes to an end, and so simulates the "liveliness" of the room acoustics @param hf_damping: (%) increasing the damping produces a more "muted" effect. The reverberation does not build up as much, and the high frequencies decay faster than the low frequencies @param room_scale: (%) sets the size of the simulated room. A high value will simulate the reverberation effect of a large room and a low value will simulate the effect of a small room @param stereo_depth: (%) sets the apparent "width" of the reverb effect for stereo tracks only. Increasing this value applies more variation between left and right channels, creating a more "spacious" effect. When set at zero, the effect is applied independently to left and right channels @param pre_delay: (ms) delays the onset of the reverberation for the set time after the start of the original input. This also delays the onset of the reverb tail @param wet_gain: (db) applies volume adjustment to the reverberation ("wet") component in the mix @param wet_only: only the wet signal (added reverberation) will be in the resulting output, and the original audio will be removed @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,573 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `speed` function. Write a Python function `def speed( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, factor: float = 2.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Changes the speed of the audio, affecting pitch as well @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param factor: the speed factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def speed(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
factor: float = 2.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Changes the speed of the audio, affecting pitch as well
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param factor: the speed factor. If rate > 1 the audio will be sped up by that
factor; if rate < 1 the audio will be slowed down by that factor
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
isinstance(factor, (int, float)) and factor > 0
), "Expected 'factor' to be a positive number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
out_sample_rate = int(sample_rate * factor)
audutils.get_metadata(
metadata=metadata,
function_name="speed",
audio=audio,
sample_rate=sample_rate,
dst_audio=audio,
dst_sample_rate=out_sample_rate,
output_path=output_path,
factor=factor,
)
return audutils.ret_and_save_audio(audio, output_path, out_sample_rate) | Changes the speed of the audio, affecting pitch as well @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param factor: the speed factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,574 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `tempo` function. Write a Python function `def tempo( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, factor: float = 2.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Adjusts the tempo of the audio by a given factor @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param factor: the tempo factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor, without affecting the pitch @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def tempo(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
factor: float = 2.0,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adjusts the tempo of the audio by a given factor
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param factor: the tempo factor. If rate > 1 the audio will be sped up by that
factor; if rate < 1 the audio will be slowed down by that factor, without
affecting the pitch
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
isinstance(factor, (int, float)) and factor > 0
), "Expected 'factor' to be a positive number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
aug_audio = audio.reshape((num_channels, -1))
aug_audio, out_sample_rate = sox_effects.apply_effects_tensor(
torch.Tensor(aug_audio), sample_rate, [["tempo", str(factor)]]
)
aug_audio = aug_audio.numpy()
if num_channels == 1:
aug_audio = aug_audio.reshape((aug_audio.shape[-1],))
audutils.get_metadata(
metadata=metadata,
function_name="tempo",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=out_sample_rate,
output_path=output_path,
factor=factor,
)
return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate) | Adjusts the tempo of the audio by a given factor @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param factor: the tempo factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor, without affecting the pitch @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,575 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `time_stretch` function. Write a Python function `def time_stretch( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, rate: float = 1.5, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Time-stretches the audio by a fixed rate @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param rate: the time stretch factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def time_stretch(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
rate: float = 1.5,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Time-stretches the audio by a fixed rate
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param rate: the time stretch factor. If rate > 1 the audio will be sped up by
that factor; if rate < 1 the audio will be slowed down by that factor
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
assert (
isinstance(rate, (int, float)) and rate > 0
), "Expected 'rate' to be a positive number"
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
if num_channels == 1:
aug_audio = librosa.effects.time_stretch(audio, rate=rate)
else:
aug_audio = np.vstack(
[
librosa.effects.time_stretch(np.asfortranarray(audio[c]), rate=rate)
for c in range(num_channels)
]
)
audutils.get_metadata(
metadata=metadata,
function_name="time_stretch",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
rate=rate,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate) | Time-stretches the audio by a fixed rate @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param rate: the time stretch factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,576 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from augly.audio import utils as audutils
from augly.utils import DEFAULT_SAMPLE_RATE
from augly.utils.libsndfile import install_libsndfile
import librosa
from torchaudio import sox_effects
from torchaudio.functional import fftconvolve, resample
The provided code snippet includes necessary dependencies for implementing the `fft_convolve` function. Write a Python function `def fft_convolve( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, normalize: bool = True, impulse_audio: Optional[Union[str, np.ndarray]] = None, seed: Optional[audutils.RNGSeed] = None, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]` to solve the following problem:
Applies a convolution operation to audio using an impulse response as the convolution filter @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param normalize: if True, normalize the output to the maximum amplitude @param impulse_audio: the path to the audio or a variable of type np.ndarray that will be used as the convolution filter @param seed: the seed for the random number generator @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
Here is the function:
def fft_convolve(
audio: Union[str, np.ndarray],
sample_rate: int = DEFAULT_SAMPLE_RATE,
normalize: bool = True,
impulse_audio: Optional[Union[str, np.ndarray]] = None,
seed: Optional[audutils.RNGSeed] = None,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Applies a convolution operation to audio using an impulse response as the convolution filter
@param audio: the path to the audio or a variable of type np.ndarray that
will be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param normalize: if True, normalize the output to the maximum amplitude
@param impulse_audio: the path to the audio or a variable of type np.ndarray that
will be used as the convolution filter
@param seed: the seed for the random number generator
@param output_path: the path in which the resulting audio will be stored. If None,
the resulting np.ndarray will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate)
num_channels = 1 if audio.ndim == 1 else audio.shape[0]
if impulse_audio is None:
random_generator = audutils.check_random_state(seed)
impulse_audio = random_generator.standard_normal(audio.shape)
else:
impulse_audio, impulse_sample_rate = audutils.validate_and_load_audio(
impulse_audio, sample_rate
)
if impulse_sample_rate != sample_rate:
impulse_audio = resample(
torch.tensor(impulse_audio), impulse_sample_rate, sample_rate
).numpy()
aug_audio = fftconvolve(torch.Tensor(audio), torch.Tensor(impulse_audio))
if normalize:
aug_audio = aug_audio / aug_audio.abs().max()
aug_audio = aug_audio.numpy()
if num_channels == 1:
aug_audio = aug_audio.reshape((aug_audio.shape[-1],))
audutils.get_metadata(
metadata=metadata,
function_name="fft_convolve",
audio=audio,
sample_rate=sample_rate,
dst_audio=aug_audio,
dst_sample_rate=sample_rate,
output_path=output_path,
)
return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate) | Applies a convolution operation to audio using an impulse response as the convolution filter @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param normalize: if True, normalize the output to the maximum amplitude @param impulse_audio: the path to the audio or a variable of type np.ndarray that will be used as the convolution filter @param seed: the seed for the random number generator @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate |
160,577 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def add_background_noise_intensity(snr_level_db: float = 10.0, **kwargs) -> float:
assert isinstance(snr_level_db, (float, int)), "snr_level_db must be a number"
max_snr_level_db_val = 110.0
return min(
((max_snr_level_db_val - snr_level_db) / max_snr_level_db_val) * 100.0, 100.0
) | null |
160,578 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def apply_lambda_intensity(
aug_function: Callable[..., Tuple[np.ndarray, int]],
**kwargs,
) -> float:
intensity_func = globals().get(f"{aug_function}_intensity")
return intensity_func(**kwargs) if intensity_func else 100.0 | null |
160,579 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def change_volume_intensity(volume_db: float = 0.0, **kwargs) -> float:
assert isinstance(volume_db, (float, int)), "volume_db must be a nonnegative number"
max_volume_db_val = 110.0
return min((abs(volume_db) / max_volume_db_val) * 100.0, 100.0) | null |
160,580 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def clicks_intensity(
seconds_between_clicks: float = 0.5, snr_level_db: float = 1.0, **kwargs
) -> float:
assert (
isinstance(seconds_between_clicks, (float, int)) and seconds_between_clicks >= 0
), "seconds_between_clicks must be a nonnegative number"
assert isinstance(snr_level_db, (float, int)), "snr_level_db must be a number"
max_seconds_between_clicks_val = 60.0
max_snr_level_db_val = 110.0
seconds_between_clicks_intensity = (
max_seconds_between_clicks_val - seconds_between_clicks
) / max_seconds_between_clicks_val
snr_level_db_intensity = (
max_snr_level_db_val - snr_level_db
) / max_snr_level_db_val
return min(
(seconds_between_clicks_intensity * snr_level_db_intensity) * 100.0, 100.0
) | null |
160,581 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def clip_intensity(duration_factor: float = 1.0, **kwargs) -> float:
assert 0 < duration_factor <= 1, "duration_factor must be a number in (0, 1]"
max_duration_factor = 1.0
return min(
((max_duration_factor - duration_factor) / max_duration_factor) * 100.0, 100.0
) | null |
160,582 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def harmonic_intensity(**kwargs) -> float:
return 100.0 | null |
160,583 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def high_pass_filter_intensity(cutoff_hz: float = 3000.0, **kwargs) -> float:
assert (
isinstance(cutoff_hz, (float, int)) and cutoff_hz >= 0
), "cutoff_hz must be a nonnegative number"
max_cutoff_hz_val = 20000.0
return min((cutoff_hz / max_cutoff_hz_val) * 100.0, 100.0) | null |
160,584 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def insert_in_background_intensity(metadata: Dict[str, Any], **kwargs) -> float:
bg_to_src_duration_ratio = (
metadata["dst_duration"] - metadata["src_duration"]
) / metadata["dst_duration"]
return bg_to_src_duration_ratio * 100.0 | null |
160,585 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def invert_channels_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return 0.0 if metadata["src_num_channels"] == 1 else 100.0 | null |
160,586 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def loop_intensity(n: int = 1, **kwargs) -> float:
assert isinstance(n, int) and n >= 0, "Expected 'n' to be a nonnegative integer"
max_num_loops = 100
return min((n / max_num_loops) * 100.0, 100.0) | null |
160,587 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def low_pass_filter_intensity(cutoff_hz: float = 500.0, **kwargs) -> float:
assert (
isinstance(cutoff_hz, (float, int)) and cutoff_hz >= 0
), "cutoff_hz must be a nonnegative number"
max_cutoff_hz_val = 20000.0
return min(((max_cutoff_hz_val - cutoff_hz) / max_cutoff_hz_val) * 100.0, 100.0) | null |
160,588 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def normalize_intensity(norm: Optional[float] = np.inf, **kwargs) -> float:
return 100.0 if norm else 0.0 | null |
160,589 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def peaking_equalizer_intensity(q: float, gain_db: float, **kwargs) -> float:
assert isinstance(q, (int, float)) and q > 0, "Expected 'q' to be a positive number"
assert isinstance(gain_db, (int, float)), "Expected 'gain_db' to be a number"
max_q_val, max_gain_db_val = 46, 110.0
q_intensity = (max_q_val - q) / max_q_val
gain_db_intensity = abs(gain_db) / max_gain_db_val
return min((q_intensity * gain_db_intensity) * 100.0, 100.0) | null |
160,590 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def percussive_intensity(**kwargs) -> float:
return 100.0 | null |
160,591 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def pitch_shift_intensity(n_steps: float = 2.0, **kwargs) -> float:
assert isinstance(n_steps, (float, int)), "n_steps must be a number"
max_nsteps_val = 84.0
return min((abs(n_steps) / max_nsteps_val) * 100.0, 100.0) | null |
160,592 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def reverb_intensity(
reverberance: float = 50.0,
wet_only: bool = False,
room_scale: float = 100.0,
**kwargs,
) -> float:
assert (
isinstance(reverberance, (float, int))
and 0 <= reverberance <= 100
and isinstance(room_scale, (float, int))
and 0 <= room_scale <= 100
), "reverberance & room_scale must be numbers in [0, 100]"
if wet_only:
return 100.0
max_reverberance_val = 100.0
max_room_scale_val = 100.0
return min(
(reverberance / max_reverberance_val)
* (room_scale / max_room_scale_val)
* 100.0,
100.0,
) | null |
160,593 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def speed_intensity(factor: float = 2.0, **kwargs) -> float:
assert (
isinstance(factor, (float, int)) and factor > 0
), "factor must be a positive number"
if factor == 1.0:
return 0.0
max_factor_val = 10.0
# We want the intensity of factor = 2 to be the same as the intensity of
# factor = 0.5, since they both change the speed by 2x.
# speed_change_factor represents how much the speed of the audio has changed,
# with a value in [1, inf).
speed_change_factor = factor if factor >= 1 else 1 / factor
return min((speed_change_factor / max_factor_val) * 100.0, 100.0) | null |
160,594 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def tempo_intensity(factor: float = 2.0, **kwargs) -> float:
assert (
isinstance(factor, (float, int)) and factor > 0
), "factor must be a positive number"
if factor == 1.0:
return 0.0
max_factor_val = 10.0
speed_change_factor = factor if factor >= 1 else 1 / factor
return min((speed_change_factor / max_factor_val) * 100.0, 100.0) | null |
160,595 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def time_stretch_intensity(rate: float = 1.5, **kwargs) -> float:
assert (
isinstance(rate, (float, int)) and rate > 0
), "factor must be a positive number"
if rate == 1.0:
return 0.0
max_rate_val = 10.0
speed_change_rate = rate if rate >= 1 else 1 / rate
return min((speed_change_rate / max_rate_val) * 100.0, 100.0) | null |
160,596 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def to_mono_intensity(metadata: Dict[str, Any], **kwargs) -> float:
return 0.0 if metadata["src_num_channels"] == 1 else 100.0 | null |
160,597 | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
def fft_convolve_intensity(**kwargs) -> float:
# This is a full convolution, so return 100.0 here.
return 100.0 | null |
160,598 | import re
from typing import List, Optional, Tuple
import regex
from augly.utils.libsndfile import install_libsndfile
from nlpaug import Augmenter
from nlpaug.util import Method
TOKENIZER_REGEX = regex.compile(
r"""(%s)""" % "|".join(TOKENIZER_REGEXPS), regex.VERBOSE | regex.UNICODE
)
def tokenize(text: str) -> List[str]:
return TOKENIZER_REGEX.findall(text) | null |
160,599 | import re
from typing import List, Optional, Tuple
import regex
from augly.utils.libsndfile import install_libsndfile
from nlpaug import Augmenter
from nlpaug.util import Method
PARENS_BRACKETS = [
(re.compile(r"\s([\[\(\{\<])\s"), r" \1"),
(re.compile(r"\s([\]\)\}\>])\s"), r"\1 "),
]
PUNCTUATION = [
(re.compile(r"\s([-])\s"), r"\1"), # Zero pad
(re.compile(r"(\s)?([#])\s"), r"\2"), # Hashtags
(re.compile(r"\s([,;:%])\s"), r"\1 "), # Right pad
(re.compile(r"([\$])\s([\d])"), r"\1\2"), # $ amounts
(re.compile(r"([\$])\s"), r"\1"), # Consecutive $ signs
(re.compile(r"(\s)?([\.\?\!])"), r"\2"), # End punctuation
]
QUOTES = [
(re.compile(r"([\'])\s(.*?)\s([\'])"), r"\1\2\3"),
(re.compile(r"([\"])\s(.*?)\s([\"])"), r"\1\2\3"),
(re.compile(r"\s(\')\s"), r"\1 "),
]
def detokenize(tokens: List[str]) -> str:
text = " ".join(tokens)
text = " " + text + " "
for regexp, substitution in PARENS_BRACKETS:
text = regexp.sub(substitution, text)
for regexp, substitution in PUNCTUATION:
text = regexp.sub(substitution, text)
for regexp, substitution in QUOTES:
text = regexp.sub(substitution, text)
return text.strip() | null |
160,600 | import re
from typing import List, Optional, Tuple
import regex
from augly.utils.libsndfile import install_libsndfile
from nlpaug import Augmenter
from nlpaug.util import Method
SPLIT_BY_WHITESPACE = re.compile(r"(\S+)")
def split_words_on_whitespace(text: str) -> Tuple[List[str], List[str]]:
# Beginning and end are treated as whitespace even if they are empty strings
split_elements = SPLIT_BY_WHITESPACE.split(text)
# Return words and whitespace separately
return split_elements[1::2], split_elements[::2] | null |
160,601 | import re
from typing import List, Optional, Tuple
import regex
from augly.utils.libsndfile import install_libsndfile
from nlpaug import Augmenter
from nlpaug.util import Method
def rejoin_words_and_whitespace(words: List[str], whitespace: List[str]) -> str:
# The split regex returns one whitespace element than word
assert len(whitespace) == len(words) + 1, "Input lengths do not match!"
# Add a dummy entry to 'words' so we can zip it easily, then drop it
# pyre-fixme[6]: For 2nd param expected `int` but got `Tuple[]`.
ordered_elements = sum(zip(whitespace, words + [""]), ())[:-1]
return "".join(ordered_elements) | null |
160,602 | import re
from typing import List, Optional, Tuple
import regex
from augly.utils.libsndfile import install_libsndfile
from nlpaug import Augmenter
from nlpaug.util import Method
def validate_augmenter_params(
aug_char_min: int,
aug_char_max: int,
aug_char_p: float,
aug_word_min: int,
aug_word_max: int,
aug_word_p: float,
) -> None:
assert aug_char_min >= 0, "aug_char_min must be non-negative"
assert aug_char_max >= 0, "aug_char_max must be non-negative"
assert 0 <= aug_char_p <= 1, "aug_char_p must be a value in the range [0, 1]"
assert aug_word_min >= 0, "aug_word_min must be non-negative"
assert aug_word_max >= 0, "aug_word_max must be non-negative"
assert 0 <= aug_word_p <= 1, "aug_word_p must be a value in the range [0,1]" | null |
160,603 | import re
from typing import List, Optional, Tuple
import regex
from augly.utils.libsndfile import install_libsndfile
from nlpaug import Augmenter
from nlpaug.util import Method
def get_aug_idxes(
augmenter: Augmenter,
tokens: List[str],
filtered_idxes: List[int],
aug_cnt: int,
mode: str,
min_char: Optional[int] = None,
) -> List[int]:
assert (
mode in Method.getall()
), "Expected 'mode' to be a value defined in nlpaug.util.method.Method"
priority_idxes = []
priority_words = getattr(augmenter, "priority_words", None)
ignore_words = getattr(augmenter, "ignore_words", set())
if mode == Method.WORD and priority_words is not None:
priority_words_set = set(priority_words)
for i, token in enumerate(tokens):
if token in priority_words_set and token.lower() not in ignore_words:
if min_char is None or len(token) >= min_char:
priority_idxes.append(i)
idxes = []
for i in filtered_idxes:
if i not in priority_idxes:
if (min_char is None or len(tokens[i]) >= min_char) and tokens[
i
].lower() not in ignore_words:
idxes.append(i)
if len(priority_idxes) + len(idxes) == 0:
return []
if len(priority_idxes) <= aug_cnt:
aug_idxes = priority_idxes
aug_cnt -= len(priority_idxes)
if len(idxes) < aug_cnt:
aug_cnt = len(idxes)
aug_idxes += augmenter.sample(idxes, aug_cnt)
else:
aug_idxes = augmenter.sample(priority_idxes, aug_cnt)
return aug_idxes | null |
160,604 | from typing import List, Union
from augly.text.augmenters.utils import (
detokenize,
get_aug_idxes,
tokenize,
UPSIDE_DOWN_CHAR_MAPPING,
)
from augly.utils.libsndfile import install_libsndfile
from nlpaug.augmenter.word import Augmenter
from nlpaug.util import Action, Method
UPSIDE_DOWN_CHAR_MAPPING = dict(
zip(
"zyxwvutsrqponmlkjihgfedcbaZYXWVUTSRQPONMLKJIHGFEDCBA0987654321&_?!\"'.,;",
"zʎxʍʌnʇsɹbdouɯlʞɾᴉɥɓɟǝpɔqɐZ⅄XMΛՈꞱSᴚტԀONW⅂ꓘᒋIH⅁ℲƎᗡƆᗺⱯ068ㄥ9Ϛ߈Ɛᘔ⇂⅋‾¿¡„,˙'؛",
)
)
def _flip(c: str) -> str:
if c in UPSIDE_DOWN_CHAR_MAPPING:
return UPSIDE_DOWN_CHAR_MAPPING[c]
else:
return c | null |
160,605 | import json
from copy import deepcopy
from typing import Any, Dict, List, Optional, Union
from augly import utils
from augly.text import intensity as txtintensity
def get_func_kwargs(
metadata: Optional[List[Dict[str, Any]]], local_kwargs: Dict[str, Any], **kwargs
) -> Dict[str, Any]:
if metadata is None:
return {}
func_kwargs = deepcopy(local_kwargs)
func_kwargs.pop("metadata")
func_kwargs.update(**kwargs)
return func_kwargs | null |
160,606 | import json
from copy import deepcopy
from typing import Any, Dict, List, Optional, Union
from augly import utils
from augly.text import intensity as txtintensity
def get_metadata(
metadata: Optional[List[Dict[str, Any]]],
function_name: str,
texts: Optional[List[str]] = None,
aug_texts: Optional[Union[List[str], str]] = None,
**kwargs,
) -> None:
if metadata is None:
return
assert isinstance(
metadata, list
), "Expected `metadata` to be set to None or of type list"
assert (
texts is not None
), "Expected `texts` to be passed in if metadata was provided"
assert (
aug_texts is not None
), "Expected `aug_texts` to be passed in if metadata was provided"
metadata.append(
{
"name": function_name,
"input_type": "list" if isinstance(texts, list) else "string",
"src_length": len(texts) if isinstance(texts, list) else 1,
"dst_length": len(aug_texts) if isinstance(aug_texts, list) else 1,
**kwargs,
}
)
intensity_kwargs = {"metadata": metadata[-1], "texts": texts, **kwargs}
metadata[-1]["intensity"] = getattr(
txtintensity, f"{function_name}_intensity", lambda **_: 0.0
)(**intensity_kwargs) | null |
160,607 | import json
from copy import deepcopy
from typing import Any, Dict, List, Optional, Union
from augly import utils
from augly.text import intensity as txtintensity
The provided code snippet includes necessary dependencies for implementing the `get_gendered_words_mapping` function. Write a Python function `def get_gendered_words_mapping(mapping: Union[str, Dict[str, str]]) -> Dict[str, str]` to solve the following problem:
Note: The `swap_gendered_words` augmentation, including this logic, was originally written by Adina Williams and has been used in influential work, e.g. https://arxiv.org/pdf/2005.00614.pdf
Here is the function:
def get_gendered_words_mapping(mapping: Union[str, Dict[str, str]]) -> Dict[str, str]:
"""
Note: The `swap_gendered_words` augmentation, including this logic, was originally
written by Adina Williams and has been used in influential work, e.g.
https://arxiv.org/pdf/2005.00614.pdf
"""
assert isinstance(
mapping, (str, Dict)
), "Mapping must be either a dict or filepath to a mapping of gendered words"
if isinstance(mapping, Dict):
return mapping
if isinstance(mapping, str):
with utils.pathmgr.open(mapping) as f:
return json.load(f) | Note: The `swap_gendered_words` augmentation, including this logic, was originally written by Adina Williams and has been used in influential work, e.g. https://arxiv.org/pdf/2005.00614.pdf |
160,608 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `apply_lambda` function. Write a Python function `def apply_lambda( texts: Union[str, List[str]], aug_function: Callable[..., List[str]] = lambda x: x, metadata: Optional[List[Dict[str, Any]]] = None, **kwargs, ) -> Union[str, List[str]]` to solve the following problem:
Apply a user-defined lambda on a list of text documents @param texts: a string or a list of text documents to be augmented @param aug_function: the augmentation function to be applied onto the text (should expect a list of text documents as input and return a list of text documents) @param **kwargs: the input attributes to be passed into the augmentation function to be applied @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def apply_lambda(
texts: Union[str, List[str]],
aug_function: Callable[..., List[str]] = lambda x: x,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> Union[str, List[str]]:
"""
Apply a user-defined lambda on a list of text documents
@param texts: a string or a list of text documents to be augmented
@param aug_function: the augmentation function to be applied onto the text
(should expect a list of text documents as input and return a list of
text documents)
@param **kwargs: the input attributes to be passed into the augmentation
function to be applied
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
assert callable(aug_function), (
repr(type(aug_function).__name__) + " object is not callable"
)
func_kwargs = deepcopy(locals())
if aug_function is not None:
try:
func_kwargs["aug_function"] = aug_function.__name__
except AttributeError:
func_kwargs["aug_function"] = type(aug_function).__name__
func_kwargs = txtutils.get_func_kwargs(metadata, func_kwargs)
aug_texts = aug_function(texts, **kwargs)
txtutils.get_metadata(
metadata=metadata,
function_name="apply_lambda",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Apply a user-defined lambda on a list of text documents @param texts: a string or a list of text documents to be augmented @param aug_function: the augmentation function to be applied onto the text (should expect a list of text documents as input and return a list of text documents) @param **kwargs: the input attributes to be passed into the augmentation function to be applied @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,609 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `change_case` function. Write a Python function `def change_case( texts: Union[str, List[str]], granularity: str = "word", cadence: float = 1.0, case: str = "random", seed: Optional[int] = 10, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Changes the case (e.g. upper, lower, title) of random chars, words, or the entire text @param texts: a string or a list of text documents to be augmented @param granularity: 'all' (case of the entire text is changed), 'word' (case of random words is changed), or 'char' (case of random chars is changed) @param cadence: how frequent (i.e. between this many characters/words) to change the case. Must be at least 1.0. Non-integer values are used as an 'average' cadence. Not used for granularity 'all' @param case: the case to change words to; valid values are 'lower', 'upper', 'title', or 'random' (in which case the case will randomly be changed to one of the previous three) @param seed: if provided, this will set the random seed to ensure consistency between runs @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def change_case(
texts: Union[str, List[str]],
granularity: str = "word",
cadence: float = 1.0,
case: str = "random",
seed: Optional[int] = 10,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Changes the case (e.g. upper, lower, title) of random chars, words, or the entire
text
@param texts: a string or a list of text documents to be augmented
@param granularity: 'all' (case of the entire text is changed), 'word' (case of
random words is changed), or 'char' (case of random chars is changed)
@param cadence: how frequent (i.e. between this many characters/words) to change the
case. Must be at least 1.0. Non-integer values are used as an 'average' cadence.
Not used for granularity 'all'
@param case: the case to change words to; valid values are 'lower', 'upper', 'title',
or 'random' (in which case the case will randomly be changed to one of the
previous three)
@param seed: if provided, this will set the random seed to ensure consistency between
runs
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
case_aug = a.CaseAugmenter(case, granularity, cadence, seed)
aug_texts = case_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="change_case",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Changes the case (e.g. upper, lower, title) of random chars, words, or the entire text @param texts: a string or a list of text documents to be augmented @param granularity: 'all' (case of the entire text is changed), 'word' (case of random words is changed), or 'char' (case of random chars is changed) @param cadence: how frequent (i.e. between this many characters/words) to change the case. Must be at least 1.0. Non-integer values are used as an 'average' cadence. Not used for granularity 'all' @param case: the case to change words to; valid values are 'lower', 'upper', 'title', or 'random' (in which case the case will randomly be changed to one of the previous three) @param seed: if provided, this will set the random seed to ensure consistency between runs @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,610 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `contractions` function. Write a Python function `def contractions( texts: Union[str, List[str]], aug_p: float = 0.3, mapping: Optional[Union[str, Dict[str, Any]]] = CONTRACTIONS_MAPPING, max_contraction_length: int = 2, seed: Optional[int] = 10, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Replaces pairs (or longer strings) of words with contractions given a mapping @param texts: a string or a list of text documents to be augmented @param aug_p: the probability that each pair (or longer string) of words will be replaced with the corresponding contraction, if there is one in the mapping @param mapping: either a dictionary representing the mapping or an iopath uri where the mapping is stored @param max_contraction_length: the words in each text will be checked for matches in the mapping up to this length; i.e. if 'max_contraction_length' is 3 then every substring of 2 *and* 3 words will be checked @param seed: if provided, this will set the random seed to ensure consistency between runs @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def contractions(
texts: Union[str, List[str]],
aug_p: float = 0.3,
mapping: Optional[Union[str, Dict[str, Any]]] = CONTRACTIONS_MAPPING,
max_contraction_length: int = 2,
seed: Optional[int] = 10,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces pairs (or longer strings) of words with contractions given a mapping
@param texts: a string or a list of text documents to be augmented
@param aug_p: the probability that each pair (or longer string) of words will be
replaced with the corresponding contraction, if there is one in the mapping
@param mapping: either a dictionary representing the mapping or an iopath uri where
the mapping is stored
@param max_contraction_length: the words in each text will be checked for matches in
the mapping up to this length; i.e. if 'max_contraction_length' is 3 then every
substring of 2 *and* 3 words will be checked
@param seed: if provided, this will set the random seed to ensure consistency between
runs
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
assert 0 <= aug_p <= 1, "'aug_p' must be in the range [0, 1]"
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
contraction_aug = a.ContractionAugmenter(
aug_p, mapping, max_contraction_length, seed
)
aug_texts = contraction_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="contractions",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Replaces pairs (or longer strings) of words with contractions given a mapping @param texts: a string or a list of text documents to be augmented @param aug_p: the probability that each pair (or longer string) of words will be replaced with the corresponding contraction, if there is one in the mapping @param mapping: either a dictionary representing the mapping or an iopath uri where the mapping is stored @param max_contraction_length: the words in each text will be checked for matches in the mapping up to this length; i.e. if 'max_contraction_length' is 3 then every substring of 2 *and* 3 words will be checked @param seed: if provided, this will set the random seed to ensure consistency between runs @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,611 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `get_baseline` function. Write a Python function `def get_baseline( texts: Union[str, List[str]], metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Generates a baseline by tokenizing and detokenizing the text @param texts: a string or a list of text documents to be augmented @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def get_baseline(
texts: Union[str, List[str]],
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Generates a baseline by tokenizing and detokenizing the text
@param texts: a string or a list of text documents to be augmented
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
baseline_aug = a.BaselineAugmenter()
aug_texts = baseline_aug.augment(texts, 1)
txtutils.get_metadata(
metadata=metadata,
function_name="get_baseline",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Generates a baseline by tokenizing and detokenizing the text @param texts: a string or a list of text documents to be augmented @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,612 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `insert_punctuation_chars` function. Write a Python function `def insert_punctuation_chars( texts: Union[str, List[str]], granularity: str = "all", cadence: float = 1.0, vary_chars: bool = False, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Inserts punctuation characters in each input text @param texts: a string or a list of text documents to be augmented @param granularity: 'all' or 'word' -- if 'word', a new char is picked and the cadence resets for each word in the text @param cadence: how frequent (i.e. between this many characters) to insert a punctuation character. Must be at least 1.0. Non-integer values are used as an 'average' cadence @param vary_chars: if true, picks a different punctuation char each time one is used instead of just one per word/text @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented texts
Here is the function:
def insert_punctuation_chars(
texts: Union[str, List[str]],
granularity: str = "all",
cadence: float = 1.0,
vary_chars: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Inserts punctuation characters in each input text
@param texts: a string or a list of text documents to be augmented
@param granularity: 'all' or 'word' -- if 'word', a new char is picked and
the cadence resets for each word in the text
@param cadence: how frequent (i.e. between this many characters) to insert a
punctuation character. Must be at least 1.0. Non-integer values are used
as an 'average' cadence
@param vary_chars: if true, picks a different punctuation char each time one
is used instead of just one per word/text
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented texts
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
punctuation_aug = a.InsertionAugmenter(
"punctuation", granularity, cadence, vary_chars
)
aug_texts = punctuation_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="insert_punctuation_chars",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Inserts punctuation characters in each input text @param texts: a string or a list of text documents to be augmented @param granularity: 'all' or 'word' -- if 'word', a new char is picked and the cadence resets for each word in the text @param cadence: how frequent (i.e. between this many characters) to insert a punctuation character. Must be at least 1.0. Non-integer values are used as an 'average' cadence @param vary_chars: if true, picks a different punctuation char each time one is used instead of just one per word/text @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented texts |
160,613 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `insert_text` function. Write a Python function `def insert_text( texts: Union[str, List[str]], insert_text: List[str], num_insertions: int = 1, insertion_location: str = "random", seed: Optional[int] = 10, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Inserts some specified text into the input text a given number of times at a given location @param texts: a string or a list of text documents to be augmented @param insert_text: a list of text to sample from and insert into each text in texts @param num_insertions: the number of times to sample from insert_text and insert @param insertion_location: where to insert the insert_text in the input text; valid values are "prepend", "append", or "random" (inserts at a random index between words in the input text) @param seed: if provided, this will set the random seed to ensure consistency between runs @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def insert_text(
texts: Union[str, List[str]],
insert_text: List[str],
num_insertions: int = 1,
insertion_location: str = "random",
seed: Optional[int] = 10,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Inserts some specified text into the input text a given number of times at a given
location
@param texts: a string or a list of text documents to be augmented
@param insert_text: a list of text to sample from and insert into each text in texts
@param num_insertions: the number of times to sample from insert_text and insert
@param insertion_location: where to insert the insert_text in the input text; valid
values are "prepend", "append", or "random" (inserts at a random index between
words in the input text)
@param seed: if provided, this will set the random seed to ensure consistency between
runs
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
insert_texts_aug = a.InsertTextAugmenter(num_insertions, insertion_location, seed)
aug_texts = insert_texts_aug.augment(texts, insert_text)
txtutils.get_metadata(
metadata=metadata,
function_name="insert_text",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Inserts some specified text into the input text a given number of times at a given location @param texts: a string or a list of text documents to be augmented @param insert_text: a list of text to sample from and insert into each text in texts @param num_insertions: the number of times to sample from insert_text and insert @param insertion_location: where to insert the insert_text in the input text; valid values are "prepend", "append", or "random" (inserts at a random index between words in the input text) @param seed: if provided, this will set the random seed to ensure consistency between runs @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,614 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `insert_whitespace_chars` function. Write a Python function `def insert_whitespace_chars( texts: Union[str, List[str]], granularity: str = "all", cadence: float = 1.0, vary_chars: bool = False, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Inserts whitespace characters in each input text @param texts: a string or a list of text documents to be augmented @param granularity: 'all' or 'word' -- if 'word', a new char is picked and the cadence resets for each word in the text @param cadence: how frequent (i.e. between this many characters) to insert a whitespace character. Must be at least 1.0. Non-integer values are used as an 'average' cadence @param vary_chars: if true, picks a different whitespace char each time one is used instead of just one per word/text @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented texts
Here is the function:
def insert_whitespace_chars(
texts: Union[str, List[str]],
granularity: str = "all",
cadence: float = 1.0,
vary_chars: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Inserts whitespace characters in each input text
@param texts: a string or a list of text documents to be augmented
@param granularity: 'all' or 'word' -- if 'word', a new char is picked and
the cadence resets for each word in the text
@param cadence: how frequent (i.e. between this many characters) to insert a
whitespace character. Must be at least 1.0. Non-integer values are used
as an 'average' cadence
@param vary_chars: if true, picks a different whitespace char each time one
is used instead of just one per word/text
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented texts
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
whitespace_aug = a.InsertionAugmenter(
"whitespace", granularity, cadence, vary_chars
)
aug_texts = whitespace_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="insert_whitespace_chars",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Inserts whitespace characters in each input text @param texts: a string or a list of text documents to be augmented @param granularity: 'all' or 'word' -- if 'word', a new char is picked and the cadence resets for each word in the text @param cadence: how frequent (i.e. between this many characters) to insert a whitespace character. Must be at least 1.0. Non-integer values are used as an 'average' cadence @param vary_chars: if true, picks a different whitespace char each time one is used instead of just one per word/text @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented texts |
160,615 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `insert_zero_width_chars` function. Write a Python function `def insert_zero_width_chars( texts: Union[str, List[str]], granularity: str = "all", cadence: float = 1.0, vary_chars: bool = False, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Inserts zero-width characters in each input text @param texts: a string or a list of text documents to be augmented @param granularity: 'all' or 'word' -- if 'word', a new char is picked and the cadence resets for each word in the text @param cadence: how frequent (i.e. between this many characters) to insert a zero-width character. Must be at least 1.0. Non-integer values are used as an 'average' cadence @param vary_chars: if true, picks a different zero-width char each time one is used instead of just one per word/text @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented texts
Here is the function:
def insert_zero_width_chars(
texts: Union[str, List[str]],
granularity: str = "all",
cadence: float = 1.0,
vary_chars: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Inserts zero-width characters in each input text
@param texts: a string or a list of text documents to be augmented
@param granularity: 'all' or 'word' -- if 'word', a new char is picked and
the cadence resets for each word in the text
@param cadence: how frequent (i.e. between this many characters) to insert
a zero-width character. Must be at least 1.0. Non-integer values are
used as an 'average' cadence
@param vary_chars: if true, picks a different zero-width char each time one
is used instead of just one per word/text
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented texts
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
zero_width_aug = a.InsertionAugmenter(
"zero_width", granularity, cadence, vary_chars
)
aug_texts = zero_width_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="insert_zero_width_chars",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Inserts zero-width characters in each input text @param texts: a string or a list of text documents to be augmented @param granularity: 'all' or 'word' -- if 'word', a new char is picked and the cadence resets for each word in the text @param cadence: how frequent (i.e. between this many characters) to insert a zero-width character. Must be at least 1.0. Non-integer values are used as an 'average' cadence @param vary_chars: if true, picks a different zero-width char each time one is used instead of just one per word/text @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented texts |
160,616 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `merge_words` function. Write a Python function `def merge_words( texts: Union[str, List[str]], aug_word_p: float = 0.3, min_char: int = 2, aug_word_min: int = 1, aug_word_max: int = 1000, n: int = 1, priority_words: Optional[List[str]] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Merges words in the text together @param texts: a string or a list of text documents to be augmented @param aug_word_p: probability of words to be augmented @param min_char: minimum # of characters in a word to be merged @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def merge_words(
texts: Union[str, List[str]],
aug_word_p: float = 0.3,
min_char: int = 2,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Merges words in the text together
@param texts: a string or a list of text documents to be augmented
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of characters in a word to be merged
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
merge_aug = a.WordsAugmenter(
"delete", min_char, aug_word_min, aug_word_max, aug_word_p, priority_words
)
aug_texts = merge_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="merge_words",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Merges words in the text together @param texts: a string or a list of text documents to be augmented @param aug_word_p: probability of words to be augmented @param min_char: minimum # of characters in a word to be merged @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,617 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `replace_bidirectional` function. Write a Python function `def replace_bidirectional( texts: Union[str, List[str]], granularity: str = "all", split_word: bool = False, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Reverses each word (or part of the word) in each input text and uses bidirectional marks to render the text in its original order. It reverses each word separately which keeps the word order even when a line wraps @param texts: a string or a list of text documents to be augmented @param granularity: the level at which the font is applied; this must be either 'word' or 'all' @param split_word: if true and granularity is 'word', reverses only the second half of each word @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented texts
Here is the function:
def replace_bidirectional(
texts: Union[str, List[str]],
granularity: str = "all",
split_word: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Reverses each word (or part of the word) in each input text and uses
bidirectional marks to render the text in its original order. It reverses
each word separately which keeps the word order even when a line wraps
@param texts: a string or a list of text documents to be augmented
@param granularity: the level at which the font is applied; this must be either
'word' or 'all'
@param split_word: if true and granularity is 'word', reverses only the second
half of each word
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented texts
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
bidirectional_aug = a.BidirectionalAugmenter(granularity, split_word)
aug_texts = bidirectional_aug.augment(texts)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_bidirectional",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Reverses each word (or part of the word) in each input text and uses bidirectional marks to render the text in its original order. It reverses each word separately which keeps the word order even when a line wraps @param texts: a string or a list of text documents to be augmented @param granularity: the level at which the font is applied; this must be either 'word' or 'all' @param split_word: if true and granularity is 'word', reverses only the second half of each word @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented texts |
160,618 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `replace_fun_fonts` function. Write a Python function `def replace_fun_fonts( texts: Union[str, List[str]], aug_p: float = 0.3, aug_min: int = 1, aug_max: int = 10000, granularity: str = "all", vary_fonts: bool = False, fonts_path: str = FUN_FONTS_PATH, n: int = 1, priority_words: Optional[List[str]] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Replaces words or characters depending on the granularity with fun fonts applied @param texts: a string or a list of text documents to be augmented @param aug_p: probability of words to be augmented @param aug_min: minimum # of words to be augmented @param aug_max: maximum # of words to be augmented @param granularity: the level at which the font is applied; this must be be either word, char, or all @param vary_fonts: whether or not to switch font in each replacement @param fonts_path: iopath uri where the fonts are stored @param n: number of augmentations to be performed for each text @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def replace_fun_fonts(
texts: Union[str, List[str]],
aug_p: float = 0.3,
aug_min: int = 1,
aug_max: int = 10000,
granularity: str = "all",
vary_fonts: bool = False,
fonts_path: str = FUN_FONTS_PATH,
n: int = 1,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces words or characters depending on the granularity with fun fonts applied
@param texts: a string or a list of text documents to be augmented
@param aug_p: probability of words to be augmented
@param aug_min: minimum # of words to be augmented
@param aug_max: maximum # of words to be augmented
@param granularity: the level at which the font is applied; this must be be
either word, char, or all
@param vary_fonts: whether or not to switch font in each replacement
@param fonts_path: iopath uri where the fonts are stored
@param n: number of augmentations to be performed for each text
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
fun_fonts_aug = a.FunFontsAugmenter(
granularity, aug_min, aug_max, aug_p, vary_fonts, fonts_path, priority_words
)
aug_texts = fun_fonts_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_fun_fonts",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Replaces words or characters depending on the granularity with fun fonts applied @param texts: a string or a list of text documents to be augmented @param aug_p: probability of words to be augmented @param aug_min: minimum # of words to be augmented @param aug_max: maximum # of words to be augmented @param granularity: the level at which the font is applied; this must be be either word, char, or all @param vary_fonts: whether or not to switch font in each replacement @param fonts_path: iopath uri where the fonts are stored @param n: number of augmentations to be performed for each text @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,619 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `replace_similar_chars` function. Write a Python function `def replace_similar_chars( texts: Union[str, List[str]], aug_char_p: float = 0.3, aug_word_p: float = 0.3, min_char: int = 2, aug_char_min: int = 1, aug_char_max: int = 1000, aug_word_min: int = 1, aug_word_max: int = 1000, n: int = 1, mapping_path: Optional[str] = None, priority_words: Optional[List[str]] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Replaces letters in each text with similar characters @param texts: a string or a list of text documents to be augmented @param aug_char_p: probability of letters to be replaced in each word @param aug_word_p: probability of words to be augmented @param min_char: minimum # of letters in a word for a valid augmentation @param aug_char_min: minimum # of letters to be replaced in each word @param aug_char_max: maximum # of letters to be replaced in each word @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param mapping_path: iopath uri where the mapping is stored @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def replace_similar_chars(
texts: Union[str, List[str]],
aug_char_p: float = 0.3,
aug_word_p: float = 0.3,
min_char: int = 2,
aug_char_min: int = 1,
aug_char_max: int = 1000,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping_path: Optional[str] = None,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces letters in each text with similar characters
@param texts: a string or a list of text documents to be augmented
@param aug_char_p: probability of letters to be replaced in each word
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of letters in a word for a valid augmentation
@param aug_char_min: minimum # of letters to be replaced in each word
@param aug_char_max: maximum # of letters to be replaced in each word
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping_path: iopath uri where the mapping is stored
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
char_aug = a.LetterReplacementAugmenter(
min_char,
aug_char_min,
aug_char_max,
aug_char_p,
aug_word_min,
aug_word_max,
aug_word_p,
mapping_path,
priority_words,
)
aug_texts = char_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_similar_chars",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Replaces letters in each text with similar characters @param texts: a string or a list of text documents to be augmented @param aug_char_p: probability of letters to be replaced in each word @param aug_word_p: probability of words to be augmented @param min_char: minimum # of letters in a word for a valid augmentation @param aug_char_min: minimum # of letters to be replaced in each word @param aug_char_max: maximum # of letters to be replaced in each word @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param mapping_path: iopath uri where the mapping is stored @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,620 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `replace_similar_unicode_chars` function. Write a Python function `def replace_similar_unicode_chars( texts: Union[str, List[str]], aug_char_p: float = 0.3, aug_word_p: float = 0.3, min_char: int = 2, aug_char_min: int = 1, aug_char_max: int = 1000, aug_word_min: int = 1, aug_word_max: int = 1000, n: int = 1, mapping_path: str = UNICODE_MAPPING_PATH, priority_words: Optional[List[str]] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Replaces letters in each text with similar unicodes @param texts: a string or a list of text documents to be augmented @param aug_char_p: probability of letters to be replaced in each word @param aug_word_p: probability of words to be augmented @param min_char: minimum # of letters in a word for a valid augmentation @param aug_char_min: minimum # of letters to be replaced in each word @param aug_char_max: maximum # of letters to be replaced in each word @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param mapping_path: iopath uri where the mapping is stored @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def replace_similar_unicode_chars(
texts: Union[str, List[str]],
aug_char_p: float = 0.3,
aug_word_p: float = 0.3,
min_char: int = 2,
aug_char_min: int = 1,
aug_char_max: int = 1000,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping_path: str = UNICODE_MAPPING_PATH,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces letters in each text with similar unicodes
@param texts: a string or a list of text documents to be augmented
@param aug_char_p: probability of letters to be replaced in each word
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of letters in a word for a valid augmentation
@param aug_char_min: minimum # of letters to be replaced in each word
@param aug_char_max: maximum # of letters to be replaced in each word
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping_path: iopath uri where the mapping is stored
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
unicode_aug = a.LetterReplacementAugmenter(
min_char,
aug_char_min,
aug_char_max,
aug_char_p,
aug_word_min,
aug_word_max,
aug_word_p,
mapping_path,
priority_words,
)
aug_texts = unicode_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_similar_unicode_chars",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Replaces letters in each text with similar unicodes @param texts: a string or a list of text documents to be augmented @param aug_char_p: probability of letters to be replaced in each word @param aug_word_p: probability of words to be augmented @param min_char: minimum # of letters in a word for a valid augmentation @param aug_char_min: minimum # of letters to be replaced in each word @param aug_char_max: maximum # of letters to be replaced in each word @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param mapping_path: iopath uri where the mapping is stored @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,621 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `replace_text` function. Write a Python function `def replace_text( texts: Union[str, List[str]], replace_text: Union[str, Dict[str, str]], metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Replaces the input text entirely with some specified text @param texts: a string or a list of text documents to be augmented @param replace_text: specifies the text to replace the input text with, either as a string or a mapping from input text to new text @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: a string or a list of augmented text documents
Here is the function:
def replace_text(
texts: Union[str, List[str]],
replace_text: Union[str, Dict[str, str]],
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces the input text entirely with some specified text
@param texts: a string or a list of text documents to be augmented
@param replace_text: specifies the text to replace the input text with,
either as a string or a mapping from input text to new text
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: a string or a list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
text_aug = a.TextReplacementAugmenter()
aug_texts = text_aug.augment(texts, replace_text)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_text",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Replaces the input text entirely with some specified text @param texts: a string or a list of text documents to be augmented @param replace_text: specifies the text to replace the input text with, either as a string or a mapping from input text to new text @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: a string or a list of augmented text documents |
160,622 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `replace_upside_down` function. Write a Python function `def replace_upside_down( texts: Union[str, List[str]], aug_p: float = 0.3, aug_min: int = 1, aug_max: int = 1000, granularity: str = "all", n: int = 1, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Flips words in the text upside down depending on the granularity @param texts: a string or a list of text documents to be augmented @param aug_p: probability of words to be augmented @param aug_min: minimum # of words to be augmented @param aug_max: maximum # of words to be augmented @param granularity: the level at which the font is applied; this must be either word, char, or all @param n: number of augmentations to be performed for each text @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def replace_upside_down(
texts: Union[str, List[str]],
aug_p: float = 0.3,
aug_min: int = 1,
aug_max: int = 1000,
granularity: str = "all",
n: int = 1,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Flips words in the text upside down depending on the granularity
@param texts: a string or a list of text documents to be augmented
@param aug_p: probability of words to be augmented
@param aug_min: minimum # of words to be augmented
@param aug_max: maximum # of words to be augmented
@param granularity: the level at which the font is applied; this must be
either word, char, or all
@param n: number of augmentations to be performed for each text
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
upside_down_aug = a.UpsideDownAugmenter(granularity, aug_min, aug_max, aug_p)
aug_texts = upside_down_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_upside_down",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Flips words in the text upside down depending on the granularity @param texts: a string or a list of text documents to be augmented @param aug_p: probability of words to be augmented @param aug_min: minimum # of words to be augmented @param aug_max: maximum # of words to be augmented @param granularity: the level at which the font is applied; this must be either word, char, or all @param n: number of augmentations to be performed for each text @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,623 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `replace_words` function. Write a Python function `def replace_words( texts: Union[str, List[str]], aug_word_p: float = 0.3, aug_word_min: int = 1, aug_word_max: int = 1000, n: int = 1, mapping: Optional[Union[str, Dict[str, Any]]] = None, priority_words: Optional[List[str]] = None, ignore_words: Optional[List[str]] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Replaces words in each text based on a given mapping @param texts: a string or a list of text documents to be augmented @param aug_word_p: probability of words to be augmented @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param mapping: either a dictionary representing the mapping or an iopath uri where the mapping is stored @param priority_words: list of target words that the augmenter should prioritize to augment first @param ignore_words: list of words that the augmenter should not augment @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def replace_words(
texts: Union[str, List[str]],
aug_word_p: float = 0.3,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping: Optional[Union[str, Dict[str, Any]]] = None,
priority_words: Optional[List[str]] = None,
ignore_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces words in each text based on a given mapping
@param texts: a string or a list of text documents to be augmented
@param aug_word_p: probability of words to be augmented
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping: either a dictionary representing the mapping or an iopath uri where
the mapping is stored
@param priority_words: list of target words that the augmenter should prioritize to
augment first
@param ignore_words: list of words that the augmenter should not augment
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
word_aug = a.WordReplacementAugmenter(
aug_word_min, aug_word_max, aug_word_p, mapping, priority_words, ignore_words
)
aug_texts = word_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="replace_words",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Replaces words in each text based on a given mapping @param texts: a string or a list of text documents to be augmented @param aug_word_p: probability of words to be augmented @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param mapping: either a dictionary representing the mapping or an iopath uri where the mapping is stored @param priority_words: list of target words that the augmenter should prioritize to augment first @param ignore_words: list of words that the augmenter should not augment @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,624 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `simulate_typos` function. Write a Python function `def simulate_typos( texts: Union[str, List[str]], aug_char_p: float = 0.3, aug_word_p: float = 0.3, min_char: int = 2, aug_char_min: int = 1, aug_char_max: int = 1, aug_word_min: int = 1, aug_word_max: int = 1000, n: int = 1, typo_type: str = "all", misspelling_dict_path: Optional[str] = MISSPELLING_DICTIONARY_PATH, max_typo_length: int = 1, priority_words: Optional[List[str]] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Simulates typos in each text using misspellings, keyboard distance, and swapping. You can specify a typo_type: charmix, which does a combination of character-level modifications (delete, insert, substitute, & swap); keyboard, which swaps characters which those close to each other on the QWERTY keyboard; misspelling, which replaces words with misspellings defined in a dictionary file; or all, which will apply a random combination of all 4 @param texts: a string or a list of text documents to be augmented @param aug_char_p: probability of letters to be replaced in each word; This is only applicable for keyboard distance and swapping @param aug_word_p: probability of words to be augmented @param min_char: minimum # of letters in a word for a valid augmentation; This is only applicable for keyboard distance and swapping @param aug_char_min: minimum # of letters to be replaced/swapped in each word; This is only applicable for keyboard distance and swapping @param aug_char_max: maximum # of letters to be replaced/swapped in each word; This is only applicable for keyboard distance and swapping @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param typo_type: the type of typos to apply to the text; valid values are "misspelling", "keyboard", "charmix", or "all" @param misspelling_dict_path: iopath uri where the misspelling dictionary is stored; must be specified if typo_type is "misspelling" or "all", but otherwise can be None @param max_typo_length: the words in the misspelling dictionary will be checked for matches in the mapping up to this length; i.e. if 'max_typo_length' is 3 then every substring of 2 *and* 3 words will be checked @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def simulate_typos(
texts: Union[str, List[str]],
aug_char_p: float = 0.3,
aug_word_p: float = 0.3,
min_char: int = 2,
aug_char_min: int = 1,
aug_char_max: int = 1,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
typo_type: str = "all",
misspelling_dict_path: Optional[str] = MISSPELLING_DICTIONARY_PATH,
max_typo_length: int = 1,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Simulates typos in each text using misspellings, keyboard distance, and swapping.
You can specify a typo_type: charmix, which does a combination of character-level
modifications (delete, insert, substitute, & swap); keyboard, which swaps characters
which those close to each other on the QWERTY keyboard; misspelling, which replaces
words with misspellings defined in a dictionary file; or all, which will apply a
random combination of all 4
@param texts: a string or a list of text documents to be augmented
@param aug_char_p: probability of letters to be replaced in each word;
This is only applicable for keyboard distance and swapping
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of letters in a word for a valid augmentation;
This is only applicable for keyboard distance and swapping
@param aug_char_min: minimum # of letters to be replaced/swapped in each word;
This is only applicable for keyboard distance and swapping
@param aug_char_max: maximum # of letters to be replaced/swapped in each word;
This is only applicable for keyboard distance and swapping
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param typo_type: the type of typos to apply to the text; valid values are
"misspelling", "keyboard", "charmix", or "all"
@param misspelling_dict_path: iopath uri where the misspelling dictionary is stored;
must be specified if typo_type is "misspelling" or "all", but otherwise can be
None
@param max_typo_length: the words in the misspelling dictionary will be checked for
matches in the mapping up to this length; i.e. if 'max_typo_length' is 3 then
every substring of 2 *and* 3 words will be checked
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
typo_aug = a.TypoAugmenter(
min_char,
aug_char_min,
aug_char_max,
aug_char_p,
aug_word_min,
aug_word_max,
aug_word_p,
typo_type,
misspelling_dict_path,
max_typo_length,
priority_words,
)
aug_texts = typo_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="simulate_typos",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Simulates typos in each text using misspellings, keyboard distance, and swapping. You can specify a typo_type: charmix, which does a combination of character-level modifications (delete, insert, substitute, & swap); keyboard, which swaps characters which those close to each other on the QWERTY keyboard; misspelling, which replaces words with misspellings defined in a dictionary file; or all, which will apply a random combination of all 4 @param texts: a string or a list of text documents to be augmented @param aug_char_p: probability of letters to be replaced in each word; This is only applicable for keyboard distance and swapping @param aug_word_p: probability of words to be augmented @param min_char: minimum # of letters in a word for a valid augmentation; This is only applicable for keyboard distance and swapping @param aug_char_min: minimum # of letters to be replaced/swapped in each word; This is only applicable for keyboard distance and swapping @param aug_char_max: maximum # of letters to be replaced/swapped in each word; This is only applicable for keyboard distance and swapping @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param typo_type: the type of typos to apply to the text; valid values are "misspelling", "keyboard", "charmix", or "all" @param misspelling_dict_path: iopath uri where the misspelling dictionary is stored; must be specified if typo_type is "misspelling" or "all", but otherwise can be None @param max_typo_length: the words in the misspelling dictionary will be checked for matches in the mapping up to this length; i.e. if 'max_typo_length' is 3 then every substring of 2 *and* 3 words will be checked @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,625 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `split_words` function. Write a Python function `def split_words( texts: Union[str, List[str]], aug_word_p: float = 0.3, min_char: int = 4, aug_word_min: int = 1, aug_word_max: int = 1000, n: int = 1, priority_words: Optional[List[str]] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Splits words in the text into subwords @param texts: a string or a list of text documents to be augmented @param aug_word_p: probability of words to be augmented @param min_char: minimum # of characters in a word for a split @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def split_words(
texts: Union[str, List[str]],
aug_word_p: float = 0.3,
min_char: int = 4,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
priority_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Splits words in the text into subwords
@param texts: a string or a list of text documents to be augmented
@param aug_word_p: probability of words to be augmented
@param min_char: minimum # of characters in a word for a split
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
split_aug = a.WordsAugmenter(
"split", min_char, aug_word_min, aug_word_max, aug_word_p, priority_words
)
aug_texts = split_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="split_words",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Splits words in the text into subwords @param texts: a string or a list of text documents to be augmented @param aug_word_p: probability of words to be augmented @param min_char: minimum # of characters in a word for a split @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param priority_words: list of target words that the augmenter should prioritize to augment first @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,626 | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Union
from augly.text import augmenters as a, utils as txtutils
from augly.utils import (
CONTRACTIONS_MAPPING,
FUN_FONTS_PATH,
GENDERED_WORDS_MAPPING,
MISSPELLING_DICTIONARY_PATH,
UNICODE_MAPPING_PATH,
)
The provided code snippet includes necessary dependencies for implementing the `swap_gendered_words` function. Write a Python function `def swap_gendered_words( texts: Union[str, List[str]], aug_word_p: float = 0.3, aug_word_min: int = 1, aug_word_max: int = 1000, n: int = 1, mapping: Union[str, Dict[str, str]] = GENDERED_WORDS_MAPPING, priority_words: Optional[List[str]] = None, ignore_words: Optional[List[str]] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Union[str, List[str]]` to solve the following problem:
Replaces words in each text based on a provided `mapping`, which can either be a dict already constructed mapping words from one gender to another or a file path to a dict. Note: the logic in this augmentation was originally written by Adina Williams and has been used in influential work, e.g. https://arxiv.org/pdf/2005.00614.pdf @param texts: a string or a list of text documents to be augmented @param aug_word_p: probability of words to be augmented @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param mapping: a mapping of words from one gender to another; a mapping can be supplied either directly as a dict or as a filepath to a json file containing the dict @param priority_words: list of target words that the augmenter should prioritize to augment first @param ignore_words: list of words that the augmenter should not augment @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents
Here is the function:
def swap_gendered_words(
texts: Union[str, List[str]],
aug_word_p: float = 0.3,
aug_word_min: int = 1,
aug_word_max: int = 1000,
n: int = 1,
mapping: Union[str, Dict[str, str]] = GENDERED_WORDS_MAPPING,
priority_words: Optional[List[str]] = None,
ignore_words: Optional[List[str]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Union[str, List[str]]:
"""
Replaces words in each text based on a provided `mapping`, which can either be a dict
already constructed mapping words from one gender to another or a file path to a
dict. Note: the logic in this augmentation was originally written by Adina Williams
and has been used in influential work, e.g. https://arxiv.org/pdf/2005.00614.pdf
@param texts: a string or a list of text documents to be augmented
@param aug_word_p: probability of words to be augmented
@param aug_word_min: minimum # of words to be augmented
@param aug_word_max: maximum # of words to be augmented
@param n: number of augmentations to be performed for each text
@param mapping: a mapping of words from one gender to another; a mapping can be
supplied either directly as a dict or as a filepath to a json file containing the
dict
@param priority_words: list of target words that the augmenter should
prioritize to augment first
@param ignore_words: list of words that the augmenter should not augment
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest length, etc. will be appended to
the inputted list. If set to None, no metadata will be appended or returned
@returns: the list of augmented text documents
"""
func_kwargs = txtutils.get_func_kwargs(metadata, locals())
mapping = txtutils.get_gendered_words_mapping(mapping)
word_aug = a.WordReplacementAugmenter(
aug_word_min, aug_word_max, aug_word_p, mapping, priority_words, ignore_words
)
aug_texts = word_aug.augment(texts, n)
txtutils.get_metadata(
metadata=metadata,
function_name="swap_gendered_words",
aug_texts=aug_texts,
**func_kwargs,
)
return aug_texts | Replaces words in each text based on a provided `mapping`, which can either be a dict already constructed mapping words from one gender to another or a file path to a dict. Note: the logic in this augmentation was originally written by Adina Williams and has been used in influential work, e.g. https://arxiv.org/pdf/2005.00614.pdf @param texts: a string or a list of text documents to be augmented @param aug_word_p: probability of words to be augmented @param aug_word_min: minimum # of words to be augmented @param aug_word_max: maximum # of words to be augmented @param n: number of augmentations to be performed for each text @param mapping: a mapping of words from one gender to another; a mapping can be supplied either directly as a dict or as a filepath to a json file containing the dict @param priority_words: list of target words that the augmenter should prioritize to augment first @param ignore_words: list of words that the augmenter should not augment @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest length, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @returns: the list of augmented text documents |
160,627 | from typing import Any, Dict, List, Optional, Union
def apply_lambda_intensity(aug_function: str, **kwargs) -> float:
intensity_func = globals().get(f"{aug_function}_intensity")
return intensity_func(**kwargs) if intensity_func else 100.0 | null |
160,628 | from typing import Any, Dict, List, Optional, Union
def char_insertion_intensity_helper(granularity: str, cadence: float) -> float:
return 100.0 if granularity == "all" else (1 / cadence) * 100.0
def change_case_intensity(granularity: str, cadence: float, **kwargs) -> float:
return char_insertion_intensity_helper(granularity, cadence) | null |
160,629 | from typing import Any, Dict, List, Optional, Union
def contractions_intensity(aug_p: float, **kwargs) -> float:
return aug_p * 100.0 | null |
160,630 | from typing import Any, Dict, List, Optional, Union
def get_baseline_intensity(**kwargs) -> float:
# get_baseline simply tokenizes and detokenizes text and at most adds extra spaces
return 0.0 | null |
160,631 | from typing import Any, Dict, List, Optional, Union
def char_insertion_intensity_helper(granularity: str, cadence: float) -> float:
return 100.0 if granularity == "all" else (1 / cadence) * 100.0
def insert_punctuation_chars_intensity(
granularity: str, cadence: float, **kwargs
) -> float:
return char_insertion_intensity_helper(granularity, cadence) | null |
160,632 | from typing import Any, Dict, List, Optional, Union
def insert_text_intensity(num_insertions: int, **kwargs) -> float:
assert (
isinstance(num_insertions, int) and num_insertions >= 0
), "Expected 'num_insertions' to be a nonnegative integer"
max_insertions = 10
return min((num_insertions / max_insertions) * 100.0, 100.0) | null |
160,633 | from typing import Any, Dict, List, Optional, Union
def char_insertion_intensity_helper(granularity: str, cadence: float) -> float:
def insert_whitespace_chars_intensity(
granularity: str, cadence: float, **kwargs
) -> float:
return char_insertion_intensity_helper(granularity, cadence) | null |
160,634 | from typing import Any, Dict, List, Optional, Union
def char_insertion_intensity_helper(granularity: str, cadence: float) -> float:
return 100.0 if granularity == "all" else (1 / cadence) * 100.0
def insert_zero_width_chars_intensity(
granularity: str, cadence: float, **kwargs
) -> float:
return char_insertion_intensity_helper(granularity, cadence) | null |
160,635 | from typing import Any, Dict, List, Optional, Union
def replace_intensity_helper(aug_p: float, aug_max: int) -> float:
return 0.0 if aug_max == 0 else aug_p * 100.0
def merge_words_intensity(aug_word_p: float, aug_word_max: int, **kwargs) -> float:
return replace_intensity_helper(aug_word_p, aug_word_max) | null |
160,636 | from typing import Any, Dict, List, Optional, Union
def replace_bidirectional_intensity(**kwargs) -> float:
return 100.0 | null |
160,637 | from typing import Any, Dict, List, Optional, Union
def replace_intensity_helper(aug_p: float, aug_max: int) -> float:
def replace_fun_fonts_intensity(
aug_p: float, aug_max: int, granularity: str, **kwargs
) -> float:
return 100.0 if granularity == "all" else replace_intensity_helper(aug_p, aug_max) | null |
160,638 | from typing import Any, Dict, List, Optional, Union
def replace_intensity_helper(aug_p: float, aug_max: int) -> float:
def replace_similar_chars_intensity(
aug_char_p: float, aug_word_p: float, aug_char_max: int, aug_word_max: int, **kwargs
) -> float:
# we only care if aug_*_max is zero or not, so it's okay to multiply the values here
return replace_intensity_helper(
aug_word_p * aug_char_p, aug_word_max * aug_char_max
) | null |
160,639 | from typing import Any, Dict, List, Optional, Union
def replace_intensity_helper(aug_p: float, aug_max: int) -> float:
return 0.0 if aug_max == 0 else aug_p * 100.0
def replace_similar_unicode_chars_intensity(
aug_char_p: float, aug_word_p: float, aug_char_max: int, aug_word_max: int, **kwargs
) -> float:
# we only care if aug_*_max is zero or not, so it's okay to multiply the values here
return replace_intensity_helper(
aug_word_p * aug_char_p, aug_word_max * aug_char_max
) | null |
160,640 | from typing import Any, Dict, List, Optional, Union
def replace_text_intensity(
texts: Union[List[str], str], replace_text: Union[Dict[str, str], str], **kwargs
) -> float:
return (
100.0
if isinstance(replace_text, str) or any(t in texts for t in replace_text)
else 0.0
) | null |
160,641 | from typing import Any, Dict, List, Optional, Union
def replace_intensity_helper(aug_p: float, aug_max: int) -> float:
def replace_upside_down_intensity(
aug_p: float, aug_max: int, granularity: str, **kwargs
) -> float:
return 100.0 if granularity == "all" else replace_intensity_helper(aug_p, aug_max) | null |
160,642 | from typing import Any, Dict, List, Optional, Union
def replace_intensity_helper(aug_p: float, aug_max: int) -> float:
return 0.0 if aug_max == 0 else aug_p * 100.0
def replace_words_intensity(
aug_word_p: float,
aug_word_max: int,
mapping: Optional[Union[str, Dict[str, Any]]],
**kwargs,
) -> float:
return 0.0 if not mapping else replace_intensity_helper(aug_word_p, aug_word_max) | null |
160,643 | from typing import Any, Dict, List, Optional, Union
def replace_intensity_helper(aug_p: float, aug_max: int) -> float:
return 0.0 if aug_max == 0 else aug_p * 100.0
def simulate_typos_intensity(
aug_char_p: float, aug_word_p: float, aug_char_max: int, aug_word_max: int, **kwargs
) -> float:
# we only care if aug_*_max is zero or not, so it's okay to multiply the values here
return replace_intensity_helper(
aug_word_p * aug_char_p, aug_word_max * aug_char_max
) | null |
160,644 | from typing import Any, Dict, List, Optional, Union
def replace_intensity_helper(aug_p: float, aug_max: int) -> float:
return 0.0 if aug_max == 0 else aug_p * 100.0
def split_words_intensity(aug_word_p: float, aug_word_max: int, **kwargs) -> float:
return replace_intensity_helper(aug_word_p, aug_word_max) | null |
160,645 | from typing import Any, Dict, List, Optional, Union
def replace_intensity_helper(aug_p: float, aug_max: int) -> float:
return 0.0 if aug_max == 0 else aug_p * 100.0
def swap_gendered_words_intensity(
aug_word_p: float,
aug_word_max: int,
**kwargs,
) -> float:
return replace_intensity_helper(aug_word_p, aug_word_max) | null |
160,646 | from typing import Callable
import numpy as np
from PIL import Image
The provided code snippet includes necessary dependencies for implementing the `aug_np_wrapper` function. Write a Python function `def aug_np_wrapper( image: np.ndarray, aug_function: Callable[..., None], **kwargs ) -> np.ndarray` to solve the following problem:
This function is a wrapper on all image augmentation functions such that a numpy array could be passed in as input instead of providing the path to the image or a PIL Image @param image: the numpy array representing the image to be augmented @param aug_function: the augmentation function to be applied onto the image @param **kwargs: the input attributes to be passed into the augmentation function
Here is the function:
def aug_np_wrapper(
image: np.ndarray, aug_function: Callable[..., None], **kwargs
) -> np.ndarray:
"""
This function is a wrapper on all image augmentation functions
such that a numpy array could be passed in as input instead of providing
the path to the image or a PIL Image
@param image: the numpy array representing the image to be augmented
@param aug_function: the augmentation function to be applied onto the image
@param **kwargs: the input attributes to be passed into the augmentation function
"""
pil_image = Image.fromarray(image)
aug_image = aug_function(pil_image, **kwargs)
return np.array(aug_image) | This function is a wrapper on all image augmentation functions such that a numpy array could be passed in as input instead of providing the path to the image or a PIL Image @param image: the numpy array representing the image to be augmented @param aug_function: the augmentation function to be applied onto the image @param **kwargs: the input attributes to be passed into the augmentation function |
160,647 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `apply_lambda` function. Write a Python function `def apply_lambda( image: Union[str, Image.Image], output_path: Optional[str] = None, aug_function: Callable[..., Image.Image] = lambda x: x, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, **kwargs, ) -> Image.Image` to solve the following problem:
Apply a user-defined lambda on an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param aug_function: the augmentation function to be applied onto the image (should expect a PIL image as input and return one) @param **kwargs: the input attributes to be passed into the augmentation function to be applied @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def apply_lambda(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
aug_function: Callable[..., Image.Image] = lambda x: x,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
**kwargs,
) -> Image.Image:
"""
Apply a user-defined lambda on an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param aug_function: the augmentation function to be applied onto the image
(should expect a PIL image as input and return one)
@param **kwargs: the input attributes to be passed into the augmentation
function to be applied
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert callable(aug_function), (
repr(type(aug_function).__name__) + " object is not callable"
)
image = imutils.validate_and_load_image(image)
func_kwargs = deepcopy(locals())
if aug_function is not None:
try:
func_kwargs["aug_function"] = aug_function.__name__
except AttributeError:
func_kwargs["aug_function"] = type(aug_function).__name__
func_kwargs = imutils.get_func_kwargs(metadata, func_kwargs)
src_mode = image.mode
aug_image = aug_function(image, **kwargs)
imutils.get_metadata(
metadata=metadata,
function_name="apply_lambda",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Apply a user-defined lambda on an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param aug_function: the augmentation function to be applied onto the image (should expect a PIL image as input and return one) @param **kwargs: the input attributes to be passed into the augmentation function to be applied @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,648 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `apply_pil_filter` function. Write a Python function `def apply_pil_filter( image: Union[str, Image.Image], output_path: Optional[str] = None, filter_type: Union[Callable, ImageFilter.Filter] = ImageFilter.EDGE_ENHANCE_MORE, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Applies a given PIL filter to the input image using `Image.filter()` @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param filter_type: the PIL ImageFilter to apply to the image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def apply_pil_filter(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
filter_type: Union[Callable, ImageFilter.Filter] = ImageFilter.EDGE_ENHANCE_MORE,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Applies a given PIL filter to the input image using `Image.filter()`
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param filter_type: the PIL ImageFilter to apply to the image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = deepcopy(locals())
ftr = filter_type() if isinstance(filter_type, Callable) else filter_type
assert isinstance(
ftr, ImageFilter.Filter
), "Filter type must be a PIL.ImageFilter.Filter class"
func_kwargs = imutils.get_func_kwargs(
metadata, func_kwargs, filter_type=getattr(ftr, "name", filter_type)
)
src_mode = image.mode
aug_image = image.filter(ftr)
imutils.get_metadata(
metadata=metadata,
function_name="apply_pil_filter",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Applies a given PIL filter to the input image using `Image.filter()` @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param filter_type: the PIL ImageFilter to apply to the image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,649 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `blur` function. Write a Python function `def blur( image: Union[str, Image.Image], output_path: Optional[str] = None, radius: float = 2.0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Blurs the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param radius: the larger the radius, the blurrier the image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def blur(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
radius: float = 2.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Blurs the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param radius: the larger the radius, the blurrier the image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert radius > 0, "Radius cannot be negative"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
aug_image = image.filter(ImageFilter.GaussianBlur(radius))
imutils.get_metadata(
metadata=metadata,
function_name="blur",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Blurs the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param radius: the larger the radius, the blurrier the image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,650 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `brightness` function. Write a Python function `def brightness( image: Union[str, Image.Image], output_path: Optional[str] = None, factor: float = 1.0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Changes the brightness of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param factor: values less than 1.0 darken the image and values greater than 1.0 brighten the image. Setting factor to 1.0 will not alter the image's brightness @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def brightness(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Changes the brightness of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param factor: values less than 1.0 darken the image and values greater than 1.0
brighten the image. Setting factor to 1.0 will not alter the image's brightness
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
aug_image = ImageEnhance.Brightness(image).enhance(factor)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
imutils.get_metadata(metadata=metadata, function_name="brightness", **func_kwargs)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Changes the brightness of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param factor: values less than 1.0 darken the image and values greater than 1.0 brighten the image. Setting factor to 1.0 will not alter the image's brightness @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,651 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
def resize(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
width: Optional[int] = None,
height: Optional[int] = None,
resample: Any = Image.BILINEAR,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Resizes an image
to be augmented
If None, the resulting PIL Image will still be returned
None, the original image width will be used
None, the original image height will be used
PIL.Image.BOX, PIL.Image.BILINEAR, PIL.Image.HAMMING, PIL.Image.BICUBIC, or
PIL.Image.LANCZOS
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
provided, this list will be modified in place such that each bounding box is
transformed according to this function
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
"""
assert width is None or type(width) == int, "Width must be an integer"
assert height is None or type(height) == int, "Height must be an integer"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
im_w, im_h = image.size
aug_image = image.resize((width or im_w, height or im_h), resample)
imutils.get_metadata(
metadata=metadata,
function_name="resize",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
The provided code snippet includes necessary dependencies for implementing the `change_aspect_ratio` function. Write a Python function `def change_aspect_ratio( image: Union[str, Image.Image], output_path: Optional[str] = None, ratio: float = 1.0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Changes the aspect ratio of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param ratio: aspect ratio, i.e. width/height, of the new image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def change_aspect_ratio(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
ratio: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Changes the aspect ratio of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param ratio: aspect ratio, i.e. width/height, of the new image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert ratio > 0, "Ratio cannot be negative"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
area = width * height
new_width = int(math.sqrt(ratio * area))
new_height = int(area / new_width)
aug_image = image.resize((new_width, new_height))
imutils.get_metadata(
metadata=metadata,
function_name="change_aspect_ratio",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Changes the aspect ratio of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param ratio: aspect ratio, i.e. width/height, of the new image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,652 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
def scale(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 0.5,
interpolation: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the resolution of an image
to be augmented
If None, the resulting PIL Image will still be returned
PIL.Image.BOX, PIL.Image.BILINEAR, PIL.Image.HAMMING, PIL.Image.BICUBIC or
PIL.Image.LANCZOS
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
provided, this list will be modified in place such that each bounding box is
transformed according to this function
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
"""
assert factor > 0, "Expected 'factor' to be a positive number"
assert interpolation in [
Image.NEAREST,
Image.BOX,
Image.BILINEAR,
Image.HAMMING,
Image.BICUBIC,
Image.LANCZOS,
None,
], "Invalid interpolation specified"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
if interpolation is None:
interpolation = Image.LANCZOS if factor < 1 else Image.BILINEAR
width, height = image.size
scaled_width = int(width * factor)
scaled_height = int(height * factor)
# pyre-fixme[6]: Expected `Union[typing_extensions.Literal[0],
# typing_extensions.Literal[1], typing_extensions.Literal[2],
# typing_extensions.Literal[3], typing_extensions.Literal[4],
# typing_extensions.Literal[5], None]` for 2nd param but got `int`.
aug_image = image.resize((scaled_width, scaled_height), resample=interpolation)
imutils.get_metadata(
metadata=metadata,
function_name="scale",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
The provided code snippet includes necessary dependencies for implementing the `clip_image_size` function. Write a Python function `def clip_image_size( image: Union[str, Image.Image], output_path: Optional[str] = None, min_resolution: Optional[int] = None, max_resolution: Optional[int] = None, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Scales the image up or down if necessary to fit in the given min and max resolution @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param min_resolution: the minimum resolution, i.e. width * height, that the augmented image should have; if the input image has a lower resolution than this, the image will be scaled up as necessary @param max_resolution: the maximum resolution, i.e. width * height, that the augmented image should have; if the input image has a higher resolution than this, the image will be scaled down as necessary @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def clip_image_size(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
min_resolution: Optional[int] = None,
max_resolution: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Scales the image up or down if necessary to fit in the given min and max resolution
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param min_resolution: the minimum resolution, i.e. width * height, that the
augmented image should have; if the input image has a lower resolution than this,
the image will be scaled up as necessary
@param max_resolution: the maximum resolution, i.e. width * height, that the
augmented image should have; if the input image has a higher resolution than
this, the image will be scaled down as necessary
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert min_resolution is None or (
isinstance(min_resolution, int) and min_resolution >= 0
), "min_resolution must be None or a nonnegative int"
assert max_resolution is None or (
isinstance(max_resolution, int) and max_resolution >= 0
), "max_resolution must be None or a nonnegative int"
assert not (
min_resolution is not None
and max_resolution is not None
and min_resolution > max_resolution
), "min_resolution cannot be greater than max_resolution"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
aug_image = image
if min_resolution is not None and image.width * image.height < min_resolution:
resize_factor = math.sqrt(min_resolution / (image.width * image.height))
aug_image = scale(aug_image, factor=resize_factor)
elif max_resolution is not None and image.width * image.height > max_resolution:
resize_factor = math.sqrt(max_resolution / (image.width * image.height))
aug_image = scale(aug_image, factor=resize_factor)
imutils.get_metadata(
metadata=metadata,
function_name="clip_image_size",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Scales the image up or down if necessary to fit in the given min and max resolution @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param min_resolution: the minimum resolution, i.e. width * height, that the augmented image should have; if the input image has a lower resolution than this, the image will be scaled up as necessary @param max_resolution: the maximum resolution, i.e. width * height, that the augmented image should have; if the input image has a higher resolution than this, the image will be scaled down as necessary @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,653 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `color_jitter` function. Write a Python function `def color_jitter( image: Union[str, Image.Image], output_path: Optional[str] = None, brightness_factor: float = 1.0, contrast_factor: float = 1.0, saturation_factor: float = 1.0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Color jitters the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param brightness_factor: a brightness factor below 1.0 darkens the image, a factor of 1.0 does not alter the image, and a factor greater than 1.0 brightens the image @param contrast_factor: a contrast factor below 1.0 removes contrast, a factor of 1.0 gives the original image, and a factor greater than 1.0 adds contrast @param saturation_factor: a saturation factor of below 1.0 lowers the saturation, a factor of 1.0 gives the original image, and a factor greater than 1.0 adds saturation @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def color_jitter(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
brightness_factor: float = 1.0,
contrast_factor: float = 1.0,
saturation_factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Color jitters the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param brightness_factor: a brightness factor below 1.0 darkens the image, a factor
of 1.0 does not alter the image, and a factor greater than 1.0 brightens the image
@param contrast_factor: a contrast factor below 1.0 removes contrast, a factor of
1.0 gives the original image, and a factor greater than 1.0 adds contrast
@param saturation_factor: a saturation factor of below 1.0 lowers the saturation,
a factor of 1.0 gives the original image, and a factor greater than 1.0
adds saturation
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
aug_image = ImageEnhance.Brightness(image).enhance(brightness_factor)
aug_image = ImageEnhance.Contrast(aug_image).enhance(contrast_factor)
aug_image = ImageEnhance.Color(aug_image).enhance(saturation_factor)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
imutils.get_metadata(metadata=metadata, function_name="color_jitter", **func_kwargs)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Color jitters the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param brightness_factor: a brightness factor below 1.0 darkens the image, a factor of 1.0 does not alter the image, and a factor greater than 1.0 brightens the image @param contrast_factor: a contrast factor below 1.0 removes contrast, a factor of 1.0 gives the original image, and a factor greater than 1.0 adds contrast @param saturation_factor: a saturation factor of below 1.0 lowers the saturation, a factor of 1.0 gives the original image, and a factor greater than 1.0 adds saturation @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,654 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `contrast` function. Write a Python function `def contrast( image: Union[str, Image.Image], output_path: Optional[str] = None, factor: float = 1.0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Alters the contrast of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param factor: zero gives a grayscale image, values below 1.0 decreases contrast, a factor of 1.0 gives the original image, and a factor greater than 1.0 increases contrast @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: Image.Image - Augmented PIL Image
Here is the function:
def contrast(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
factor: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alters the contrast of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param factor: zero gives a grayscale image, values below 1.0 decreases contrast,
a factor of 1.0 gives the original image, and a factor greater than 1.0
increases contrast
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Image.Image - Augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
enhancer = ImageEnhance.Contrast(image)
aug_image = enhancer.enhance(factor)
imutils.get_metadata(
metadata=metadata,
function_name="contrast",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Alters the contrast of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param factor: zero gives a grayscale image, values below 1.0 decreases contrast, a factor of 1.0 gives the original image, and a factor greater than 1.0 increases contrast @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: Image.Image - Augmented PIL Image |
160,655 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `convert_color` function. Write a Python function `def convert_color( image: Union[str, Image.Image], output_path: Optional[str] = None, mode: Optional[str] = None, matrix: Union[ None, Tuple[float, float, float, float], Tuple[ float, float, float, float, float, float, float, float, float, float, float, float, ], ] = None, dither: Optional[int] = None, palette: int = 0, colors: int = 256, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Converts the image in terms of color modes @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param mode: defines the type and depth of a pixel in the image. If mode is omitted, a mode is chosen so that all information in the image and the palette can be represented without a palette. For list of available modes, check: https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes @param matrix: an optional conversion matrix. If given, this should be 4- or 12-tuple containing floating point values @param dither: dithering method, used when converting from mode “RGB” to “P” or from “RGB” or “L” to “1”. Available methods are NONE or FLOYDSTEINBERG (default). @param palette: palette to use when converting from mode “RGB” to “P”. Available palettes are WEB or ADAPTIVE @param colors: number of colors to use for the ADAPTIVE palette. Defaults to 256. @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: Image.Image - Augmented PIL Image
Here is the function:
def convert_color(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
mode: Optional[str] = None,
matrix: Union[
None,
Tuple[float, float, float, float],
Tuple[
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
],
] = None,
dither: Optional[int] = None,
palette: int = 0,
colors: int = 256,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Converts the image in terms of color modes
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param mode: defines the type and depth of a pixel in the image. If mode is omitted,
a mode is chosen so that all information in the image and the palette can be
represented without a palette. For list of available modes, check:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
@param matrix: an optional conversion matrix. If given, this should be 4- or
12-tuple containing floating point values
@param dither: dithering method, used when converting from mode “RGB” to “P” or from
“RGB” or “L” to “1”. Available methods are NONE or FLOYDSTEINBERG (default).
@param palette: palette to use when converting from mode “RGB” to “P”. Available
palettes are WEB or ADAPTIVE
@param colors: number of colors to use for the ADAPTIVE palette. Defaults to 256.
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: Image.Image - Augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
# pyre-fixme[6]: Expected `Union[typing_extensions.Literal[0],
# typing_extensions.Literal[1]]` for 4th param but got `int`.
aug_image = image.convert(mode, matrix, dither, palette, colors)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
imutils.get_metadata(
metadata=metadata,
function_name="convert_color",
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path) | Converts the image in terms of color modes @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param mode: defines the type and depth of a pixel in the image. If mode is omitted, a mode is chosen so that all information in the image and the palette can be represented without a palette. For list of available modes, check: https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes @param matrix: an optional conversion matrix. If given, this should be 4- or 12-tuple containing floating point values @param dither: dithering method, used when converting from mode “RGB” to “P” or from “RGB” or “L” to “1”. Available methods are NONE or FLOYDSTEINBERG (default). @param palette: palette to use when converting from mode “RGB” to “P”. Available palettes are WEB or ADAPTIVE @param colors: number of colors to use for the ADAPTIVE palette. Defaults to 256. @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: Image.Image - Augmented PIL Image |
160,656 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `encoding_quality` function. Write a Python function `def encoding_quality( image: Union[str, Image.Image], output_path: Optional[str] = None, quality: int = 50, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Changes the JPEG encoding quality level @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param quality: JPEG encoding quality. 0 is lowest quality, 100 is highest @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def encoding_quality(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
quality: int = 50,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Changes the JPEG encoding quality level
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param quality: JPEG encoding quality. 0 is lowest quality, 100 is highest
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert 0 <= quality <= 100, "'quality' must be a value in the range [0, 100]"
image = imutils.validate_and_load_image(image).convert("RGB")
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
buffer = io.BytesIO()
image.save(buffer, format="JPEG", quality=quality)
aug_image = Image.open(buffer)
imutils.get_metadata(
metadata=metadata,
function_name="encoding_quality",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Changes the JPEG encoding quality level @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param quality: JPEG encoding quality. 0 is lowest quality, 100 is highest @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,657 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `grayscale` function. Write a Python function `def grayscale( image: Union[str, Image.Image], output_path: Optional[str] = None, mode: str = "luminosity", metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Changes an image to be grayscale @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param mode: the type of greyscale conversion to perform; two options are supported ("luminosity" and "average") @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def grayscale(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
mode: str = "luminosity",
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Changes an image to be grayscale
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param mode: the type of greyscale conversion to perform; two options
are supported ("luminosity" and "average")
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert mode in [
"luminosity",
"average",
], "Greyscale mode not supported -- choose either 'luminosity' or 'average'"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
# If grayscale image is passed in, return it
if image.mode == "L":
aug_image = image
else:
if mode == "luminosity":
aug_image = image.convert(mode="L")
elif mode == "average":
np_image = np.asarray(image).astype(np.float32)
np_image = np.average(np_image, axis=2)
aug_image = Image.fromarray(np.uint8(np_image))
aug_image = aug_image.convert(mode="RGB")
imutils.get_metadata(
metadata=metadata,
function_name="grayscale",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Changes an image to be grayscale @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param mode: the type of greyscale conversion to perform; two options are supported ("luminosity" and "average") @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,658 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `hflip` function. Write a Python function `def hflip( image: Union[str, Image.Image], output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Horizontally flips an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def hflip(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Horizontally flips an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
aug_image = image.transpose(Image.FLIP_LEFT_RIGHT)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
imutils.get_metadata(metadata=metadata, function_name="hflip", **func_kwargs)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Horizontally flips an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,659 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `masked_composite` function. Write a Python function `def masked_composite( image: Union[str, Image.Image], output_path: Optional[str] = None, mask: Optional[Union[str, Image.Image]] = None, transform_function: Optional[Callable] = None, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Applies given augmentation function to the masked area of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param mask: the path to an image or a variable of type PIL.Image.Image for masking. This image can have mode “1”, “L”, or “RGBA”, and must have the same size as the other two images. If the mask is not provided the function returns the augmented image @param transform_function: the augmentation function to be applied. If transform_function is not provided, the function returns the input image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def masked_composite(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
mask: Optional[Union[str, Image.Image]] = None,
transform_function: Optional[Callable] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Applies given augmentation function to the masked area of the image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param mask: the path to an image or a variable of type PIL.Image.Image for
masking. This image can have mode “1”, “L”, or “RGBA”, and must have the
same size as the other two images. If the mask is not provided the function
returns the augmented image
@param transform_function: the augmentation function to be applied. If
transform_function is not provided, the function returns the input image
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = deepcopy(locals())
if transform_function is not None:
try:
func_kwargs["transform_function"] = transform_function.__name__
except AttributeError:
func_kwargs["transform_function"] = type(transform_function).__name__
func_kwargs = imutils.get_func_kwargs(metadata, func_kwargs)
src_mode = image.mode
if transform_function is None:
masked_image = imutils.ret_and_save_image(image, output_path)
else:
aug_image = transform_function(image)
if mask is None:
masked_image = imutils.ret_and_save_image(aug_image, output_path, src_mode)
else:
mask = imutils.validate_and_load_image(mask)
assert image.size == mask.size, "Mask size must be equal to image size"
masked_image = Image.composite(aug_image, image, mask)
imutils.get_metadata(
metadata=metadata,
function_name="masked_composite",
aug_image=masked_image,
**func_kwargs,
)
return imutils.ret_and_save_image(masked_image, output_path, src_mode) | Applies given augmentation function to the masked area of the image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param mask: the path to an image or a variable of type PIL.Image.Image for masking. This image can have mode “1”, “L”, or “RGBA”, and must have the same size as the other two images. If the mask is not provided the function returns the augmented image @param transform_function: the augmentation function to be applied. If transform_function is not provided, the function returns the input image @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,660 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `meme_format` function. Write a Python function `def meme_format( image: Union[str, Image.Image], output_path: Optional[str] = None, text: str = "LOL", font_file: str = utils.MEME_DEFAULT_FONT, opacity: float = 1.0, text_color: Tuple[int, int, int] = utils.DEFAULT_COLOR, caption_height: int = 250, meme_bg_color: Tuple[int, int, int] = utils.WHITE_RGB_COLOR, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Creates a new image that looks like a meme, given text and an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param text: the text to be overlaid/used in the meme. note: if using a very long string, please add in newline characters such that the text remains in a readable font size. @param font_file: iopath uri to a .ttf font file @param opacity: the lower the opacity, the more transparent the text @param text_color: color of the text in RGB values @param caption_height: the height of the meme caption @param meme_bg_color: background color of the meme caption in RGB values @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def meme_format(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
text: str = "LOL",
font_file: str = utils.MEME_DEFAULT_FONT,
opacity: float = 1.0,
text_color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
caption_height: int = 250,
meme_bg_color: Tuple[int, int, int] = utils.WHITE_RGB_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Creates a new image that looks like a meme, given text and an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param text: the text to be overlaid/used in the meme. note: if using a very
long string, please add in newline characters such that the text remains
in a readable font size.
@param font_file: iopath uri to a .ttf font file
@param opacity: the lower the opacity, the more transparent the text
@param text_color: color of the text in RGB values
@param caption_height: the height of the meme caption
@param meme_bg_color: background color of the meme caption in RGB values
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert isinstance(text, str), "Expected variable `text` to be a string"
assert 0.0 <= opacity <= 1.0, "Opacity must be a value in the range [0.0, 1.0]"
assert caption_height > 10, "Caption height must be greater than 10"
utils.validate_rgb_color(text_color)
utils.validate_rgb_color(meme_bg_color)
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
width, height = image.size
local_font_path = utils.pathmgr.get_local_path(font_file)
font_size = caption_height - 10
meme = Image.new("RGB", (width, height + caption_height), meme_bg_color)
meme.paste(image, (0, caption_height))
draw = ImageDraw.Draw(meme)
x_pos, y_pos = 5, 5
ascender_adjustment = 40
while True:
font = ImageFont.truetype(local_font_path, font_size)
text_bbox = draw.multiline_textbbox(
(x_pos, y_pos),
text,
# pyre-fixme[6]: Expected `Optional[ImageFont._Font]` for 3rd param but got
# `FreeTypeFont`.
font=font,
anchor="la",
align="center",
)
text_width, text_height = (
text_bbox[2] - text_bbox[0],
text_bbox[3] - text_bbox[1],
)
x_pos = round((width - text_width) / 2)
y_pos = round((caption_height - text_height) / 2) - ascender_adjustment
if text_width <= (width - 10) and text_height <= (caption_height - 10):
break
font_size -= 5
draw.multiline_text(
(x_pos, y_pos),
text,
# pyre-fixme[6]: Expected `Optional[ImageFont._Font]` for 3rd param but got
# `FreeTypeFont`.
font=font,
anchor="la",
fill=(text_color[0], text_color[1], text_color[2], round(opacity * 255)),
align="center",
)
imutils.get_metadata(
metadata=metadata,
function_name="meme_format",
aug_image=meme,
**func_kwargs,
)
return imutils.ret_and_save_image(meme, output_path, src_mode) | Creates a new image that looks like a meme, given text and an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param text: the text to be overlaid/used in the meme. note: if using a very long string, please add in newline characters such that the text remains in a readable font size. @param font_file: iopath uri to a .ttf font file @param opacity: the lower the opacity, the more transparent the text @param text_color: color of the text in RGB values @param caption_height: the height of the meme caption @param meme_bg_color: background color of the meme caption in RGB values @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,661 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
The provided code snippet includes necessary dependencies for implementing the `opacity` function. Write a Python function `def opacity( image: Union[str, Image.Image], output_path: Optional[str] = None, level: float = 1.0, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Alter the opacity of an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param level: the level the opacity should be set to, where 0 means completely transparent and 1 means no transparency at all @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def opacity(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
level: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Alter the opacity of an image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param level: the level the opacity should be set to, where 0 means
completely transparent and 1 means no transparency at all
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
assert 0 <= level <= 1, "level must be a value in the range [0, 1]"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
image = image.convert(mode="RGBA")
mask = image.convert("RGBA").getchannel("A")
mask = Image.fromarray((np.array(mask) * level).astype(np.uint8))
background = Image.new("RGBA", image.size, (255, 255, 255, 0))
aug_image = Image.composite(image, background, mask)
imutils.get_metadata(
metadata=metadata,
function_name="opacity",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode) | Alter the opacity of an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param level: the level the opacity should be set to, where 0 means completely transparent and 1 means no transparency at all @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
160,662 | import io
import math
import os
import pickle
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from augly import utils
from augly.image import utils as imutils
from augly.image.utils.bboxes import spatial_bbox_helper
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageFont
def overlay_image(
image: Union[str, Image.Image],
overlay: Union[str, Image.Image],
output_path: Optional[str] = None,
opacity: float = 1.0,
overlay_size: float = 1.0,
x_pos: float = 0.4,
y_pos: float = 0.4,
max_visible_opacity: float = 0.75,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlays an image onto another image at position (width * x_pos, height * y_pos)
to be augmented
that will be overlaid
If None, the resulting PIL Image will still be returned
of the original image
maximum opacity value through which the src image will still be considered
visible; see the function `overlay_image_bboxes_helper` in `utils/bboxes.py` for
more details about how this is used. If bboxes are not passed in this is not used
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
provided, this list will be modified in place such that each bounding box is
transformed according to this function
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
"""
assert 0.0 <= opacity <= 1.0, "Opacity must be a value in the range [0, 1]"
assert 0.0 <= overlay_size <= 1.0, "Image size must be a value in the range [0, 1]"
assert 0.0 <= x_pos <= 1.0, "x_pos must be a value in the range [0, 1]"
assert 0.0 <= y_pos <= 1.0, "y_pos must be a value in the range [0, 1]"
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
src_mode = image.mode
overlay = imutils.validate_and_load_image(overlay)
im_width, im_height = image.size
overlay_width, overlay_height = overlay.size
new_height = max(1, int(im_height * overlay_size))
new_width = int(overlay_width * new_height / overlay_height)
overlay = overlay.resize((new_width, new_height))
try:
mask = overlay.convert("RGBA").getchannel("A")
mask = Image.fromarray((np.array(mask) * opacity).astype(np.uint8))
except ValueError:
mask = Image.new(mode="L", size=overlay.size, color=int(opacity * 255))
x = int(im_width * x_pos)
y = int(im_height * y_pos)
aug_image = image.convert(mode="RGBA")
aug_image.paste(im=overlay, box=(x, y), mask=mask)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_image",
aug_image=aug_image,
**func_kwargs,
)
return imutils.ret_and_save_image(aug_image, output_path, src_mode)
The provided code snippet includes necessary dependencies for implementing the `overlay_emoji` function. Write a Python function `def overlay_emoji( image: Union[str, Image.Image], output_path: Optional[str] = None, emoji_path: str = utils.EMOJI_PATH, opacity: float = 1.0, emoji_size: float = 0.15, x_pos: float = 0.4, y_pos: float = 0.8, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, ) -> Image.Image` to solve the following problem:
Overlay an emoji onto the original image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param emoji_path: iopath uri to the emoji image @param opacity: the lower the opacity, the more transparent the overlaid emoji @param emoji_size: size of the emoji is emoji_size * height of the original image @param x_pos: position of emoji relative to the image width @param y_pos: position of emoji relative to the image height @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
Here is the function:
def overlay_emoji(
image: Union[str, Image.Image],
output_path: Optional[str] = None,
emoji_path: str = utils.EMOJI_PATH,
opacity: float = 1.0,
emoji_size: float = 0.15,
x_pos: float = 0.4,
y_pos: float = 0.8,
metadata: Optional[List[Dict[str, Any]]] = None,
bboxes: Optional[List[Tuple]] = None,
bbox_format: Optional[str] = None,
) -> Image.Image:
"""
Overlay an emoji onto the original image
@param image: the path to an image or a variable of type PIL.Image.Image
to be augmented
@param output_path: the path in which the resulting image will be stored.
If None, the resulting PIL Image will still be returned
@param emoji_path: iopath uri to the emoji image
@param opacity: the lower the opacity, the more transparent the overlaid emoji
@param emoji_size: size of the emoji is emoji_size * height of the original image
@param x_pos: position of emoji relative to the image width
@param y_pos: position of emoji relative to the image height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest width, height, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@param bboxes: a list of bounding boxes can be passed in here if desired. If
provided, this list will be modified in place such that each bounding box is
transformed according to this function
@param bbox_format: signifies what bounding box format was used in `bboxes`. Must
specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are
"pascal_voc", "pascal_voc_norm", "coco", and "yolo"
@returns: the augmented PIL Image
"""
image = imutils.validate_and_load_image(image)
func_kwargs = imutils.get_func_kwargs(metadata, locals())
local_emoji_path = utils.pathmgr.get_local_path(emoji_path)
aug_image = overlay_image(
image,
overlay=local_emoji_path,
output_path=output_path,
opacity=opacity,
overlay_size=emoji_size,
x_pos=x_pos,
y_pos=y_pos,
)
imutils.get_metadata(
metadata=metadata,
function_name="overlay_emoji",
aug_image=aug_image,
**func_kwargs,
)
return aug_image | Overlay an emoji onto the original image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param emoji_path: iopath uri to the emoji image @param opacity: the lower the opacity, the more transparent the overlaid emoji @param emoji_size: size of the emoji is emoji_size * height of the original image @param x_pos: position of emoji relative to the image width @param y_pos: position of emoji relative to the image height @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.