id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
156,903 | import gc
import numpy as np
import gradio as gr
import json
import torch
import torchaudio
from aeiou.viz import audio_spectrogram_image
from einops import rearrange
from safetensors.torch import load_file
from torch.nn import functional as F
from torchaudio import transforms as T
from ..inference.generation import generate_diffusion_cond, generate_diffusion_uncond
from ..models.factory import create_model_from_config
from ..models.pretrained import get_pretrained_model
from ..models.utils import load_ckpt_state_dict
from ..inference.utils import prepare_audio
from ..training.utils import copy_state_dict
def load_model(model_config=None, model_ckpt_path=None, pretrained_name=None, pretransform_ckpt_path=None, device="cuda", model_half=False):
global model, sample_rate, sample_size
if pretrained_name is not None:
print(f"Loading pretrained model {pretrained_name}")
model, model_config = get_pretrained_model(pretrained_name)
elif model_config is not None and model_ckpt_path is not None:
print(f"Creating model from config")
model = create_model_from_config(model_config)
print(f"Loading model checkpoint from {model_ckpt_path}")
# Load checkpoint
copy_state_dict(model, load_ckpt_state_dict(model_ckpt_path))
#model.load_state_dict(load_ckpt_state_dict(model_ckpt_path))
sample_rate = model_config["sample_rate"]
sample_size = model_config["sample_size"]
if pretransform_ckpt_path is not None:
print(f"Loading pretransform checkpoint from {pretransform_ckpt_path}")
model.pretransform.load_state_dict(load_ckpt_state_dict(pretransform_ckpt_path), strict=False)
print(f"Done loading pretransform")
model.to(device).eval().requires_grad_(False)
if model_half:
model.to(torch.float16)
print(f"Done loading model")
return model, model_config
def create_txt2audio_ui(model_config):
with gr.Blocks() as ui:
with gr.Tab("Generation"):
create_sampling_ui(model_config)
with gr.Tab("Inpainting"):
create_sampling_ui(model_config, inpainting=True)
return ui
def create_diffusion_uncond_ui(model_config):
with gr.Blocks() as ui:
create_uncond_sampling_ui(model_config)
return ui
def create_autoencoder_ui(model_config):
is_dac_rvq = "model" in model_config and "bottleneck" in model_config["model"] and model_config["model"]["bottleneck"]["type"] in ["dac_rvq","dac_rvq_vae"]
if is_dac_rvq:
n_quantizers = model_config["model"]["bottleneck"]["config"]["n_codebooks"]
else:
n_quantizers = 0
with gr.Blocks() as ui:
input_audio = gr.Audio(label="Input audio")
output_audio = gr.Audio(label="Output audio", interactive=False)
n_quantizers_slider = gr.Slider(minimum=1, maximum=n_quantizers, step=1, value=n_quantizers, label="# quantizers", visible=is_dac_rvq)
latent_noise_slider = gr.Slider(minimum=0.0, maximum=10.0, step=0.001, value=0.0, label="Add latent noise")
process_button = gr.Button("Process", variant='primary', scale=1)
process_button.click(fn=autoencoder_process, inputs=[input_audio, latent_noise_slider, n_quantizers_slider], outputs=output_audio, api_name="process")
return ui
def create_diffusion_prior_ui(model_config):
with gr.Blocks() as ui:
input_audio = gr.Audio(label="Input audio")
output_audio = gr.Audio(label="Output audio", interactive=False)
# Sampler params
with gr.Row():
steps_slider = gr.Slider(minimum=1, maximum=500, step=1, value=100, label="Steps")
sampler_type_dropdown = gr.Dropdown(["dpmpp-2m-sde", "dpmpp-3m-sde", "k-heun", "k-lms", "k-dpmpp-2s-ancestral", "k-dpm-2", "k-dpm-fast"], label="Sampler type", value="dpmpp-2m-sde")
sigma_min_slider = gr.Slider(minimum=0.0, maximum=2.0, step=0.01, value=0.03, label="Sigma min")
sigma_max_slider = gr.Slider(minimum=0.0, maximum=200.0, step=0.1, value=80, label="Sigma max")
process_button = gr.Button("Process", variant='primary', scale=1)
process_button.click(fn=diffusion_prior_process, inputs=[input_audio, steps_slider, sampler_type_dropdown, sigma_min_slider, sigma_max_slider], outputs=output_audio, api_name="process")
return ui
def create_lm_ui(model_config):
with gr.Blocks() as ui:
output_audio = gr.Audio(label="Output audio", interactive=False)
audio_spectrogram_output = gr.Gallery(label="Output spectrogram", show_label=False)
# Sampling params
with gr.Row():
temperature_slider = gr.Slider(minimum=0, maximum=5, step=0.01, value=1.0, label="Temperature")
top_p_slider = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.95, label="Top p")
top_k_slider = gr.Slider(minimum=0, maximum=100, step=1, value=0, label="Top k")
generate_button = gr.Button("Generate", variant='primary', scale=1)
generate_button.click(
fn=generate_lm,
inputs=[
temperature_slider,
top_p_slider,
top_k_slider
],
outputs=[output_audio, audio_spectrogram_output],
api_name="generate"
)
return ui
def create_ui(model_config_path=None, ckpt_path=None, pretrained_name=None, pretransform_ckpt_path=None, model_half=False):
assert (pretrained_name is not None) ^ (model_config_path is not None and ckpt_path is not None), "Must specify either pretrained name or provide a model config and checkpoint, but not both"
if model_config_path is not None:
# Load config from json file
with open(model_config_path) as f:
model_config = json.load(f)
else:
model_config = None
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_, model_config = load_model(model_config, ckpt_path, pretrained_name=pretrained_name, pretransform_ckpt_path=pretransform_ckpt_path, model_half=model_half, device=device)
model_type = model_config["model_type"]
if model_type == "diffusion_cond":
ui = create_txt2audio_ui(model_config)
elif model_type == "diffusion_uncond":
ui = create_diffusion_uncond_ui(model_config)
elif model_type == "autoencoder" or model_type == "diffusion_autoencoder":
ui = create_autoencoder_ui(model_config)
elif model_type == "diffusion_prior":
ui = create_diffusion_prior_ui(model_config)
elif model_type == "lm":
ui = create_lm_ui(model_config)
return ui | null |
156,904 | from functools import reduce, partial
from packaging import version
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.cuda.amp import autocast
from typing import Callable, Literal
def checkpoint(function, *args, **kwargs):
kwargs.setdefault("use_reentrant", False)
return torch.utils.checkpoint.checkpoint(function, *args, **kwargs) | null |
156,905 | from functools import reduce, partial
from packaging import version
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.cuda.amp import autocast
from typing import Callable, Literal
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1) | null |
156,906 | from functools import reduce, partial
from packaging import version
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.cuda.amp import autocast
from typing import Callable, Literal
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head | null |
156,907 | from functools import reduce, partial
from packaging import version
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.cuda.amp import autocast
from typing import Callable, Literal
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
out_dtype = t.dtype
# cast to float32 if necessary for numerical stability
dtype = reduce(torch.promote_types, (t.dtype, freqs.dtype, torch.float32))
rot_dim, seq_len = freqs.shape[-1], t.shape[-2]
freqs, t = freqs.to(dtype), t.to(dtype)
freqs = freqs[-seq_len:, :]
if t.ndim == 4 and freqs.ndim == 3:
freqs = rearrange(freqs, 'b n d -> b 1 n d')
# partial rotary embeddings, Wang et al. GPT-J
t, t_unrotated = t[..., :rot_dim], t[..., rot_dim:]
t = (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
t, t_unrotated = t.to(out_dtype), t_unrotated.to(out_dtype)
return torch.cat((t, t_unrotated), dim = -1) | null |
156,908 | import torch
from safetensors.torch import load_file
from torch.nn.utils import remove_weight_norm
def remove_weight_norm_from_model(model):
for module in model.modules():
if hasattr(module, "weight"):
print(f"Removing weight norm from {module}")
remove_weight_norm(module)
return model | null |
156,909 | import torch
from safetensors.torch import load_file
from torch.nn.utils import remove_weight_norm
def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None):
"""torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension.
Args:
input (torch.Tensor): The input tensor containing probabilities.
num_samples (int): Number of samples to draw.
replacement (bool): Whether to draw with replacement or not.
Keywords args:
generator (torch.Generator): A pseudorandom number generator for sampling.
Returns:
torch.Tensor: Last dimension contains num_samples indices
sampled from the multinomial probability distribution
located in the last dimension of tensor input.
"""
if num_samples == 1:
q = torch.empty_like(input).exponential_(1, generator=generator)
return torch.argmax(input / q, dim=-1, keepdim=True).to(torch.int64)
input_ = input.reshape(-1, input.shape[-1])
output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator)
output = output_.reshape(*list(input.shape[:-1]), -1)
return output
The provided code snippet includes necessary dependencies for implementing the `sample_top_k` function. Write a Python function `def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor` to solve the following problem:
Sample next token from top K values along the last dimension of the input probs tensor. Args: probs (torch.Tensor): Input probabilities with token candidates on the last dimension. k (int): The k in “top-k”. Returns: torch.Tensor: Sampled tokens.
Here is the function:
def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor:
"""Sample next token from top K values along the last dimension of the input probs tensor.
Args:
probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
k (int): The k in “top-k”.
Returns:
torch.Tensor: Sampled tokens.
"""
top_k_value, _ = torch.topk(probs, k, dim=-1)
min_value_top_k = top_k_value[..., [-1]]
probs *= (probs >= min_value_top_k).float()
probs.div_(probs.sum(dim=-1, keepdim=True))
next_token = multinomial(probs, num_samples=1)
return next_token | Sample next token from top K values along the last dimension of the input probs tensor. Args: probs (torch.Tensor): Input probabilities with token candidates on the last dimension. k (int): The k in “top-k”. Returns: torch.Tensor: Sampled tokens. |
156,910 | import torch
from safetensors.torch import load_file
from torch.nn.utils import remove_weight_norm
def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None):
"""torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension.
Args:
input (torch.Tensor): The input tensor containing probabilities.
num_samples (int): Number of samples to draw.
replacement (bool): Whether to draw with replacement or not.
Keywords args:
generator (torch.Generator): A pseudorandom number generator for sampling.
Returns:
torch.Tensor: Last dimension contains num_samples indices
sampled from the multinomial probability distribution
located in the last dimension of tensor input.
"""
if num_samples == 1:
q = torch.empty_like(input).exponential_(1, generator=generator)
return torch.argmax(input / q, dim=-1, keepdim=True).to(torch.int64)
input_ = input.reshape(-1, input.shape[-1])
output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator)
output = output_.reshape(*list(input.shape[:-1]), -1)
return output
The provided code snippet includes necessary dependencies for implementing the `sample_top_p` function. Write a Python function `def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor` to solve the following problem:
Sample next token from top P probabilities along the last dimension of the input probs tensor. Args: probs (torch.Tensor): Input probabilities with token candidates on the last dimension. p (int): The p in “top-p”. Returns: torch.Tensor: Sampled tokens.
Here is the function:
def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor:
"""Sample next token from top P probabilities along the last dimension of the input probs tensor.
Args:
probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
p (int): The p in “top-p”.
Returns:
torch.Tensor: Sampled tokens.
"""
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > p
probs_sort *= (~mask).float()
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
next_token = multinomial(probs_sort, num_samples=1)
next_token = torch.gather(probs_idx, -1, next_token)
return next_token | Sample next token from top P probabilities along the last dimension of the input probs tensor. Args: probs (torch.Tensor): Input probabilities with token candidates on the last dimension. p (int): The p in “top-p”. Returns: torch.Tensor: Sampled tokens. |
156,911 | import torch
import math
import numpy as np
from torch import nn, sin, pow
from torch.nn import functional as F
from torchaudio import transforms as T
from alias_free_torch import Activation1d
from dac.nn.layers import WNConv1d, WNConvTranspose1d
from typing import List, Literal, Dict, Any, Callable
from einops import rearrange
from ..inference.sampling import sample
from ..inference.utils import prepare_audio
from .blocks import SnakeBeta
from .bottleneck import Bottleneck, DiscreteBottleneck
from .diffusion import ConditionedDiffusionModel, DAU1DCondWrapper, UNet1DCondWrapper, DiTWrapper
from .factory import create_pretransform_from_config, create_bottleneck_from_config
from .pretransforms import Pretransform, AutoencoderPretransform
def checkpoint(function, *args, **kwargs):
kwargs.setdefault("use_reentrant", False)
return torch.utils.checkpoint.checkpoint(function, *args, **kwargs) | null |
156,912 | import torch
import math
import numpy as np
from torch import nn, sin, pow
from torch.nn import functional as F
from torchaudio import transforms as T
from alias_free_torch import Activation1d
from dac.nn.layers import WNConv1d, WNConvTranspose1d
from typing import List, Literal, Dict, Any, Callable
from einops import rearrange
from ..inference.sampling import sample
from ..inference.utils import prepare_audio
from .blocks import SnakeBeta
from .bottleneck import Bottleneck, DiscreteBottleneck
from .diffusion import ConditionedDiffusionModel, DAU1DCondWrapper, UNet1DCondWrapper, DiTWrapper
from .factory import create_pretransform_from_config, create_bottleneck_from_config
from .pretransforms import Pretransform, AutoencoderPretransform
class SnakeBeta(nn.Module):
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=True):
super(SnakeBeta, self).__init__()
self.in_features = in_features
# initialize alpha
self.alpha_logscale = alpha_logscale
if self.alpha_logscale: # log scale alphas initialized to zeros
self.alpha = nn.Parameter(torch.zeros(in_features) * alpha)
self.beta = nn.Parameter(torch.zeros(in_features) * alpha)
else: # linear scale alphas initialized to ones
self.alpha = nn.Parameter(torch.ones(in_features) * alpha)
self.beta = nn.Parameter(torch.ones(in_features) * alpha)
self.alpha.requires_grad = alpha_trainable
self.beta.requires_grad = alpha_trainable
self.no_div_by_zero = 0.000000001
def forward(self, x):
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
beta = self.beta.unsqueeze(0).unsqueeze(-1)
if self.alpha_logscale:
alpha = torch.exp(alpha)
beta = torch.exp(beta)
x = snake_beta(x, alpha, beta)
return x
def get_activation(activation: Literal["elu", "snake", "none"], antialias=False, channels=None) -> nn.Module:
if activation == "elu":
act = nn.ELU()
elif activation == "snake":
act = SnakeBeta(channels)
elif activation == "none":
act = nn.Identity()
else:
raise ValueError(f"Unknown activation {activation}")
if antialias:
act = Activation1d(act)
return act | null |
156,913 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
T = TypeVar("T")
def exists(val: Optional[T]) -> T:
return val is not None
import typing as tp
def default(val: Optional[T], d: Union[Callable[..., T], T]) -> T:
if exists(val):
return val
return d() if isfunction(d) else d | null |
156,914 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
import typing as tp
def closest_power_2(x: float) -> int:
exponent = log2(x)
distance_fn = lambda z: abs(x - 2 ** z) # noqa
exponent_closest = min((floor(exponent), ceil(exponent)), key=distance_fn)
return 2 ** int(exponent_closest) | null |
156,915 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
def group_dict_by_prefix(prefix: str, d: Dict) -> Tuple[Dict, Dict]:
return_dicts: Tuple[Dict, Dict] = ({}, {})
for key in d.keys():
no_prefix = int(not key.startswith(prefix))
return_dicts[no_prefix][key] = d[key]
return return_dicts
import typing as tp
def groupby(prefix: str, d: Dict, keep_prefix: bool = False) -> Tuple[Dict, Dict]:
kwargs_with_prefix, kwargs = group_dict_by_prefix(prefix, d)
if keep_prefix:
return kwargs_with_prefix, kwargs
kwargs_no_prefix = {k[len(prefix) :]: v for k, v in kwargs_with_prefix.items()}
return kwargs_no_prefix, kwargs | null |
156,916 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
import typing as tp
def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int,
padding_total: int = 0) -> int:
"""See `pad_for_conv1d`."""
length = x.shape[-1]
n_frames = (length - kernel_size + padding_total) / stride + 1
ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
return ideal_length - length
The provided code snippet includes necessary dependencies for implementing the `pad_for_conv1d` function. Write a Python function `def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0)` to solve the following problem:
Pad for a convolution to make sure that the last window is full. Extra padding is added at the end. This is required to ensure that we can rebuild an output of the same length, as otherwise, even with padding, some time steps might get removed. For instance, with total padding = 4, kernel size = 4, stride = 2: 0 0 1 2 3 4 5 0 0 # (0s are padding) 1 2 3 # (output frames of a convolution, last 0 is never used) 0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding) 1 2 3 4 # once you removed padding, we are missing one time step !
Here is the function:
def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0):
"""Pad for a convolution to make sure that the last window is full.
Extra padding is added at the end. This is required to ensure that we can rebuild
an output of the same length, as otherwise, even with padding, some time steps
might get removed.
For instance, with total padding = 4, kernel size = 4, stride = 2:
0 0 1 2 3 4 5 0 0 # (0s are padding)
1 2 3 # (output frames of a convolution, last 0 is never used)
0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding)
1 2 3 4 # once you removed padding, we are missing one time step !
"""
extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
return F.pad(x, (0, extra_padding)) | Pad for a convolution to make sure that the last window is full. Extra padding is added at the end. This is required to ensure that we can rebuild an output of the same length, as otherwise, even with padding, some time steps might get removed. For instance, with total padding = 4, kernel size = 4, stride = 2: 0 0 1 2 3 4 5 0 0 # (0s are padding) 1 2 3 # (output frames of a convolution, last 0 is never used) 0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding) 1 2 3 4 # once you removed padding, we are missing one time step ! |
156,917 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
import typing as tp
The provided code snippet includes necessary dependencies for implementing the `pad1d` function. Write a Python function `def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.)` to solve the following problem:
Tiny wrapper around F.pad, just to allow for reflect padding on small input. If this is the case, we insert extra 0 padding to the right before the reflection happen.
Here is the function:
def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.):
"""Tiny wrapper around F.pad, just to allow for reflect padding on small input.
If this is the case, we insert extra 0 padding to the right before the reflection happen.
"""
length = x.shape[-1]
padding_left, padding_right = paddings
assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
if mode == 'reflect':
max_pad = max(padding_left, padding_right)
extra_pad = 0
if length <= max_pad:
extra_pad = max_pad - length + 1
x = F.pad(x, (0, extra_pad))
padded = F.pad(x, paddings, mode, value)
end = padded.shape[-1] - extra_pad
return padded[..., :end]
else:
return F.pad(x, paddings, mode, value) | Tiny wrapper around F.pad, just to allow for reflect padding on small input. If this is the case, we insert extra 0 padding to the right before the reflection happen. |
156,918 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
import typing as tp
The provided code snippet includes necessary dependencies for implementing the `unpad1d` function. Write a Python function `def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int])` to solve the following problem:
Remove padding from x, handling properly zero padding. Only for 1d!
Here is the function:
def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]):
"""Remove padding from x, handling properly zero padding. Only for 1d!"""
padding_left, padding_right = paddings
assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
assert (padding_left + padding_right) <= x.shape[-1]
end = x.shape[-1] - padding_right
return x[..., padding_left: end] | Remove padding from x, handling properly zero padding. Only for 1d! |
156,919 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
import typing as tp
class Conv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, x: Tensor, causal=False) -> Tensor:
kernel_size = self.kernel_size[0]
stride = self.stride[0]
dilation = self.dilation[0]
kernel_size = (kernel_size - 1) * dilation + 1 # effective kernel size with dilations
padding_total = kernel_size - stride
extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
if causal:
# Left padding for causal
x = pad1d(x, (padding_total, extra_padding))
else:
# Asymmetric padding required for odd strides
padding_right = padding_total // 2
padding_left = padding_total - padding_right
x = pad1d(x, (padding_left, padding_right + extra_padding))
return super().forward(x)
def Downsample1d(
in_channels: int, out_channels: int, factor: int, kernel_multiplier: int = 2
) -> nn.Module:
assert kernel_multiplier % 2 == 0, "Kernel multiplier must be even"
return Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=factor * kernel_multiplier + 1,
stride=factor
) | null |
156,920 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
import typing as tp
class Conv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, x: Tensor, causal=False) -> Tensor:
kernel_size = self.kernel_size[0]
stride = self.stride[0]
dilation = self.dilation[0]
kernel_size = (kernel_size - 1) * dilation + 1 # effective kernel size with dilations
padding_total = kernel_size - stride
extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
if causal:
# Left padding for causal
x = pad1d(x, (padding_total, extra_padding))
else:
# Asymmetric padding required for odd strides
padding_right = padding_total // 2
padding_left = padding_total - padding_right
x = pad1d(x, (padding_left, padding_right + extra_padding))
return super().forward(x)
class ConvTranspose1d(nn.ConvTranspose1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, x: Tensor, causal=False) -> Tensor:
kernel_size = self.kernel_size[0]
stride = self.stride[0]
padding_total = kernel_size - stride
y = super().forward(x)
# We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
# removed at the very end, when keeping only the right length for the output,
# as removing it here would require also passing the length at the matching layer
# in the encoder.
if causal:
padding_right = ceil(padding_total)
padding_left = padding_total - padding_right
y = unpad1d(y, (padding_left, padding_right))
else:
# Asymmetric padding required for odd strides
padding_right = padding_total // 2
padding_left = padding_total - padding_right
y = unpad1d(y, (padding_left, padding_right))
return y
def Upsample1d(
in_channels: int, out_channels: int, factor: int, use_nearest: bool = False
) -> nn.Module:
if factor == 1:
return Conv1d(
in_channels=in_channels, out_channels=out_channels, kernel_size=3
)
if use_nearest:
return nn.Sequential(
nn.Upsample(scale_factor=factor, mode="nearest"),
Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3
),
)
else:
return ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=factor * 2,
stride=factor
) | null |
156,921 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
import typing as tp
def add_mask(sim: Tensor, mask: Tensor) -> Tensor:
b, ndim = sim.shape[0], mask.ndim
if ndim == 3:
mask = rearrange(mask, "b n m -> b 1 n m")
if ndim == 2:
mask = repeat(mask, "n m -> b 1 n m", b=b)
max_neg_value = -torch.finfo(sim.dtype).max
sim = sim.masked_fill(~mask, max_neg_value)
return sim | null |
156,922 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
import typing as tp
def causal_mask(q: Tensor, k: Tensor) -> Tensor:
b, i, j, device = q.shape[0], q.shape[-2], k.shape[-2], q.device
mask = ~torch.ones((i, j), dtype=torch.bool, device=device).triu(j - i + 1)
mask = repeat(mask, "n m -> b n m", b=b)
return mask | null |
156,923 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
import typing as tp
class LearnedPositionalEmbedding(nn.Module):
"""Used for continuous time"""
def __init__(self, dim: int):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x: Tensor) -> Tensor:
x = rearrange(x, "b -> b 1")
freqs = x * rearrange(self.weights, "d -> 1 d") * 2 * pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim=-1)
fouriered = torch.cat((x, fouriered), dim=-1)
return fouriered
def TimePositionalEmbedding(dim: int, out_features: int) -> nn.Module:
return nn.Sequential(
LearnedPositionalEmbedding(dim),
nn.Linear(in_features=dim + 1, out_features=out_features),
) | null |
156,924 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
import typing as tp
def rand_bool(shape: Any, proba: float, device: Any = None) -> Tensor:
if proba == 1:
return torch.ones(shape, device=device, dtype=torch.bool)
elif proba == 0:
return torch.zeros(shape, device=device, dtype=torch.bool)
else:
return torch.bernoulli(torch.full(shape, proba, device=device)).to(torch.bool) | null |
156,925 | import math
from inspect import isfunction
from math import ceil, floor, log, pi, log2
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
from packaging import version
import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
from einops_exts import rearrange_many
from torch import Tensor, einsum
from torch.backends.cuda import sdp_kernel
from torch.nn import functional as F
from dac.nn.layers import Snake1d
import typing as tp
class UNet1d(nn.Module):
def __init__(
self,
in_channels: int,
channels: int,
multipliers: Sequence[int],
factors: Sequence[int],
num_blocks: Sequence[int],
attentions: Sequence[int],
patch_size: int = 1,
resnet_groups: int = 8,
use_context_time: bool = True,
kernel_multiplier_downsample: int = 2,
use_nearest_upsample: bool = False,
use_skip_scale: bool = True,
use_snake: bool = False,
use_stft: bool = False,
use_stft_context: bool = False,
out_channels: Optional[int] = None,
context_features: Optional[int] = None,
context_features_multiplier: int = 4,
context_channels: Optional[Sequence[int]] = None,
context_embedding_features: Optional[int] = None,
**kwargs,
):
super().__init__()
out_channels = default(out_channels, in_channels)
context_channels = list(default(context_channels, []))
num_layers = len(multipliers) - 1
use_context_features = exists(context_features)
use_context_channels = len(context_channels) > 0
context_mapping_features = None
attention_kwargs, kwargs = groupby("attention_", kwargs, keep_prefix=True)
self.num_layers = num_layers
self.use_context_time = use_context_time
self.use_context_features = use_context_features
self.use_context_channels = use_context_channels
self.use_stft = use_stft
self.use_stft_context = use_stft_context
self.context_features = context_features
context_channels_pad_length = num_layers + 1 - len(context_channels)
context_channels = context_channels + [0] * context_channels_pad_length
self.context_channels = context_channels
self.context_embedding_features = context_embedding_features
if use_context_channels:
has_context = [c > 0 for c in context_channels]
self.has_context = has_context
self.channels_ids = [sum(has_context[:i]) for i in range(len(has_context))]
assert (
len(factors) == num_layers
and len(attentions) >= num_layers
and len(num_blocks) == num_layers
)
if use_context_time or use_context_features:
context_mapping_features = channels * context_features_multiplier
self.to_mapping = nn.Sequential(
nn.Linear(context_mapping_features, context_mapping_features),
nn.GELU(),
nn.Linear(context_mapping_features, context_mapping_features),
nn.GELU(),
)
if use_context_time:
assert exists(context_mapping_features)
self.to_time = nn.Sequential(
TimePositionalEmbedding(
dim=channels, out_features=context_mapping_features
),
nn.GELU(),
)
if use_context_features:
assert exists(context_features) and exists(context_mapping_features)
self.to_features = nn.Sequential(
nn.Linear(
in_features=context_features, out_features=context_mapping_features
),
nn.GELU(),
)
if use_stft:
stft_kwargs, kwargs = groupby("stft_", kwargs)
assert "num_fft" in stft_kwargs, "stft_num_fft required if use_stft=True"
stft_channels = (stft_kwargs["num_fft"] // 2 + 1) * 2
in_channels *= stft_channels
out_channels *= stft_channels
context_channels[0] *= stft_channels if use_stft_context else 1
assert exists(in_channels) and exists(out_channels)
self.stft = STFT(**stft_kwargs)
assert not kwargs, f"Unknown arguments: {', '.join(list(kwargs.keys()))}"
self.to_in = Patcher(
in_channels=in_channels + context_channels[0],
out_channels=channels * multipliers[0],
patch_size=patch_size,
context_mapping_features=context_mapping_features,
use_snake=use_snake
)
self.downsamples = nn.ModuleList(
[
DownsampleBlock1d(
in_channels=channels * multipliers[i],
out_channels=channels * multipliers[i + 1],
context_mapping_features=context_mapping_features,
context_channels=context_channels[i + 1],
context_embedding_features=context_embedding_features,
num_layers=num_blocks[i],
factor=factors[i],
kernel_multiplier=kernel_multiplier_downsample,
num_groups=resnet_groups,
use_pre_downsample=True,
use_skip=True,
use_snake=use_snake,
num_transformer_blocks=attentions[i],
**attention_kwargs,
)
for i in range(num_layers)
]
)
self.bottleneck = BottleneckBlock1d(
channels=channels * multipliers[-1],
context_mapping_features=context_mapping_features,
context_embedding_features=context_embedding_features,
num_groups=resnet_groups,
num_transformer_blocks=attentions[-1],
use_snake=use_snake,
**attention_kwargs,
)
self.upsamples = nn.ModuleList(
[
UpsampleBlock1d(
in_channels=channels * multipliers[i + 1],
out_channels=channels * multipliers[i],
context_mapping_features=context_mapping_features,
context_embedding_features=context_embedding_features,
num_layers=num_blocks[i] + (1 if attentions[i] else 0),
factor=factors[i],
use_nearest=use_nearest_upsample,
num_groups=resnet_groups,
use_skip_scale=use_skip_scale,
use_pre_upsample=False,
use_skip=True,
use_snake=use_snake,
skip_channels=channels * multipliers[i + 1],
num_transformer_blocks=attentions[i],
**attention_kwargs,
)
for i in reversed(range(num_layers))
]
)
self.to_out = Unpatcher(
in_channels=channels * multipliers[0],
out_channels=out_channels,
patch_size=patch_size,
context_mapping_features=context_mapping_features,
use_snake=use_snake
)
def get_channels(
self, channels_list: Optional[Sequence[Tensor]] = None, layer: int = 0
) -> Optional[Tensor]:
"""Gets context channels at `layer` and checks that shape is correct"""
use_context_channels = self.use_context_channels and self.has_context[layer]
if not use_context_channels:
return None
assert exists(channels_list), "Missing context"
# Get channels index (skipping zero channel contexts)
channels_id = self.channels_ids[layer]
# Get channels
channels = channels_list[channels_id]
message = f"Missing context for layer {layer} at index {channels_id}"
assert exists(channels), message
# Check channels
num_channels = self.context_channels[layer]
message = f"Expected context with {num_channels} channels at idx {channels_id}"
assert channels.shape[1] == num_channels, message
# STFT channels if requested
channels = self.stft.encode1d(channels) if self.use_stft_context else channels # type: ignore # noqa
return channels
def get_mapping(
self, time: Optional[Tensor] = None, features: Optional[Tensor] = None
) -> Optional[Tensor]:
"""Combines context time features and features into mapping"""
items, mapping = [], None
# Compute time features
if self.use_context_time:
assert_message = "use_context_time=True but no time features provided"
assert exists(time), assert_message
items += [self.to_time(time)]
# Compute features
if self.use_context_features:
assert_message = "context_features exists but no features provided"
assert exists(features), assert_message
items += [self.to_features(features)]
# Compute joint mapping
if self.use_context_time or self.use_context_features:
mapping = reduce(torch.stack(items), "n b m -> b m", "sum")
mapping = self.to_mapping(mapping)
return mapping
def forward(
self,
x: Tensor,
time: Optional[Tensor] = None,
*,
features: Optional[Tensor] = None,
channels_list: Optional[Sequence[Tensor]] = None,
embedding: Optional[Tensor] = None,
embedding_mask: Optional[Tensor] = None,
causal: Optional[bool] = False,
) -> Tensor:
channels = self.get_channels(channels_list, layer=0)
# Apply stft if required
x = self.stft.encode1d(x) if self.use_stft else x # type: ignore
# Concat context channels at layer 0 if provided
x = torch.cat([x, channels], dim=1) if exists(channels) else x
# Compute mapping from time and features
mapping = self.get_mapping(time, features)
x = self.to_in(x, mapping, causal=causal)
skips_list = [x]
for i, downsample in enumerate(self.downsamples):
channels = self.get_channels(channels_list, layer=i + 1)
x, skips = downsample(
x, mapping=mapping, channels=channels, embedding=embedding, embedding_mask=embedding_mask, causal=causal
)
skips_list += [skips]
x = self.bottleneck(x, mapping=mapping, embedding=embedding, embedding_mask=embedding_mask, causal=causal)
for i, upsample in enumerate(self.upsamples):
skips = skips_list.pop()
x = upsample(x, skips=skips, mapping=mapping, embedding=embedding, embedding_mask=embedding_mask, causal=causal)
x += skips_list.pop()
x = self.to_out(x, mapping, causal=causal)
x = self.stft.decode1d(x) if self.use_stft else x
return x
class UNetCFG1d(UNet1d):
"""UNet1d with Classifier-Free Guidance"""
def __init__(
self,
context_embedding_max_length: int,
context_embedding_features: int,
use_xattn_time: bool = False,
**kwargs,
):
super().__init__(
context_embedding_features=context_embedding_features, **kwargs
)
self.use_xattn_time = use_xattn_time
if use_xattn_time:
assert exists(context_embedding_features)
self.to_time_embedding = nn.Sequential(
TimePositionalEmbedding(
dim=kwargs["channels"], out_features=context_embedding_features
),
nn.GELU(),
)
context_embedding_max_length += 1 # Add one for time embedding
self.fixed_embedding = FixedEmbedding(
max_length=context_embedding_max_length, features=context_embedding_features
)
def forward( # type: ignore
self,
x: Tensor,
time: Tensor,
*,
embedding: Tensor,
embedding_mask: Optional[Tensor] = None,
embedding_scale: float = 1.0,
embedding_mask_proba: float = 0.0,
batch_cfg: bool = False,
rescale_cfg: bool = False,
scale_phi: float = 0.4,
negative_embedding: Optional[Tensor] = None,
negative_embedding_mask: Optional[Tensor] = None,
**kwargs,
) -> Tensor:
b, device = embedding.shape[0], embedding.device
if self.use_xattn_time:
embedding = torch.cat([embedding, self.to_time_embedding(time).unsqueeze(1)], dim=1)
if embedding_mask is not None:
embedding_mask = torch.cat([embedding_mask, torch.ones((b, 1), device=device)], dim=1)
fixed_embedding = self.fixed_embedding(embedding)
if embedding_mask_proba > 0.0:
# Randomly mask embedding
batch_mask = rand_bool(
shape=(b, 1, 1), proba=embedding_mask_proba, device=device
)
embedding = torch.where(batch_mask, fixed_embedding, embedding)
if embedding_scale != 1.0:
if batch_cfg:
batch_x = torch.cat([x, x], dim=0)
batch_time = torch.cat([time, time], dim=0)
if negative_embedding is not None:
if negative_embedding_mask is not None:
negative_embedding_mask = negative_embedding_mask.to(torch.bool).unsqueeze(2)
negative_embedding = torch.where(negative_embedding_mask, negative_embedding, fixed_embedding)
batch_embed = torch.cat([embedding, negative_embedding], dim=0)
else:
batch_embed = torch.cat([embedding, fixed_embedding], dim=0)
batch_mask = None
if embedding_mask is not None:
batch_mask = torch.cat([embedding_mask, embedding_mask], dim=0)
batch_features = None
features = kwargs.pop("features", None)
if self.use_context_features:
batch_features = torch.cat([features, features], dim=0)
batch_channels = None
channels_list = kwargs.pop("channels_list", None)
if self.use_context_channels:
batch_channels = []
for channels in channels_list:
batch_channels += [torch.cat([channels, channels], dim=0)]
# Compute both normal and fixed embedding outputs
batch_out = super().forward(batch_x, batch_time, embedding=batch_embed, embedding_mask=batch_mask, features=batch_features, channels_list=batch_channels, **kwargs)
out, out_masked = batch_out.chunk(2, dim=0)
else:
# Compute both normal and fixed embedding outputs
out = super().forward(x, time, embedding=embedding, embedding_mask=embedding_mask, **kwargs)
out_masked = super().forward(x, time, embedding=fixed_embedding, embedding_mask=embedding_mask, **kwargs)
out_cfg = out_masked + (out - out_masked) * embedding_scale
if rescale_cfg:
out_std = out.std(dim=1, keepdim=True)
out_cfg_std = out_cfg.std(dim=1, keepdim=True)
return scale_phi * (out_cfg * (out_std/out_cfg_std)) + (1-scale_phi) * out_cfg
else:
return out_cfg
else:
return super().forward(x, time, embedding=embedding, embedding_mask=embedding_mask, **kwargs)
class UNetNCCA1d(UNet1d):
"""UNet1d with Noise Channel Conditioning Augmentation"""
def __init__(self, context_features: int, **kwargs):
super().__init__(context_features=context_features, **kwargs)
self.embedder = NumberEmbedder(features=context_features)
def expand(self, x: Any, shape: Tuple[int, ...]) -> Tensor:
x = x if torch.is_tensor(x) else torch.tensor(x)
return x.expand(shape)
def forward( # type: ignore
self,
x: Tensor,
time: Tensor,
*,
channels_list: Sequence[Tensor],
channels_augmentation: Union[
bool, Sequence[bool], Sequence[Sequence[bool]], Tensor
] = False,
channels_scale: Union[
float, Sequence[float], Sequence[Sequence[float]], Tensor
] = 0,
**kwargs,
) -> Tensor:
b, n = x.shape[0], len(channels_list)
channels_augmentation = self.expand(channels_augmentation, shape=(b, n)).to(x)
channels_scale = self.expand(channels_scale, shape=(b, n)).to(x)
# Augmentation (for each channel list item)
for i in range(n):
scale = channels_scale[:, i] * channels_augmentation[:, i]
scale = rearrange(scale, "b -> b 1 1")
item = channels_list[i]
channels_list[i] = torch.randn_like(item) * scale + item * (1 - scale) # type: ignore # noqa
# Scale embedding (sum reduction if more than one channel list item)
channels_scale_emb = self.embedder(channels_scale)
channels_scale_emb = reduce(channels_scale_emb, "b n d -> b d", "sum")
return super().forward(
x=x,
time=time,
channels_list=channels_list,
features=channels_scale_emb,
**kwargs,
)
class UNetAll1d(UNetCFG1d, UNetNCCA1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, *args, **kwargs): # type: ignore
return UNetCFG1d.forward(self, *args, **kwargs)
def XUNet1d(type: str = "base", **kwargs) -> UNet1d:
if type == "base":
return UNet1d(**kwargs)
elif type == "all":
return UNetAll1d(**kwargs)
elif type == "cfg":
return UNetCFG1d(**kwargs)
elif type == "ncca":
return UNetNCCA1d(**kwargs)
else:
raise ValueError(f"Unknown XUNet1d type: {type}") | null |
156,926 | from functools import reduce
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.backends.cuda import sdp_kernel
from packaging import version
from dac.nn.layers import Snake1d
def expand_to_planes(input, shape):
return input[..., None].repeat([1, 1, shape[2]]) | null |
156,927 | from functools import reduce
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.backends.cuda import sdp_kernel
from packaging import version
from dac.nn.layers import Snake1d
def Downsample1d_2(
in_channels: int, out_channels: int, factor: int, kernel_multiplier: int = 2
) -> nn.Module:
assert kernel_multiplier % 2 == 0, "Kernel multiplier must be even"
return nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=factor * kernel_multiplier + 1,
stride=factor,
padding=factor * (kernel_multiplier // 2),
) | null |
156,928 | from functools import reduce
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.backends.cuda import sdp_kernel
from packaging import version
from dac.nn.layers import Snake1d
def Upsample1d_2(
in_channels: int, out_channels: int, factor: int, use_nearest: bool = False
) -> nn.Module:
if factor == 1:
return nn.Conv1d(
in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1
)
if use_nearest:
return nn.Sequential(
nn.Upsample(scale_factor=factor, mode="nearest"),
nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
padding=1,
),
)
else:
return nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=factor * 2,
stride=factor,
padding=factor // 2 + factor % 2,
output_padding=factor % 2,
) | null |
156,929 | from functools import reduce
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.backends.cuda import sdp_kernel
from packaging import version
from dac.nn.layers import Snake1d
def zero_init(layer):
nn.init.zeros_(layer.weight)
if layer.bias is not None:
nn.init.zeros_(layer.bias)
return layer | null |
156,930 | from functools import reduce
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.backends.cuda import sdp_kernel
from packaging import version
from dac.nn.layers import Snake1d
def normalize(x, eps=1e-4):
dim = list(range(1, x.ndim))
n = torch.linalg.vector_norm(x, dim=dim, keepdim=True)
alpha = np.sqrt(n.numel() / x.numel())
return x / torch.add(eps, n, alpha=alpha) | null |
156,931 | from functools import reduce
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.backends.cuda import sdp_kernel
from packaging import version
from dac.nn.layers import Snake1d
use_compile = True
def compile(function, *args, **kwargs):
if not use_compile:
return function
try:
return torch.compile(function, *args, **kwargs)
except RuntimeError:
return function | null |
156,932 | from functools import reduce
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.backends.cuda import sdp_kernel
from packaging import version
from dac.nn.layers import Snake1d
def linear_geglu(x, weight, bias=None):
x = x @ weight.mT
if bias is not None:
x = x + bias
x, gate = x.chunk(2, dim=-1)
return x * F.gelu(gate) | null |
156,933 | from functools import reduce
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.backends.cuda import sdp_kernel
from packaging import version
from dac.nn.layers import Snake1d
def snake_beta(x, alpha, beta):
return x + (1.0 / (beta + 0.000000001)) * pow(torch.sin(x * alpha), 2) | null |
156,934 | from einops import rearrange
import pywt
import torch
from torch import nn
from torch.nn import functional as F
from typing import Literal
def get_filter_bank(wavelet):
filt = torch.tensor(pywt.Wavelet(wavelet).filter_bank)
if wavelet.startswith("bior") and torch.all(filt[:, 0] == 0):
filt = filt[:, 1:]
return filt | null |
156,935 | import math
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from scipy.optimize import fmin
from scipy.signal import firwin, kaiser, kaiser_beta, kaiserord
The provided code snippet includes necessary dependencies for implementing the `prepare_signal_dimensions` function. Write a Python function `def prepare_signal_dimensions(signal)` to solve the following problem:
Rearrange signal into Batch x Channels x Length. Parameters ---------- signal : torch.Tensor or numpy.ndarray The input signal. Returns ------- torch.Tensor Preprocessed signal tensor.
Here is the function:
def prepare_signal_dimensions(signal):
"""
Rearrange signal into Batch x Channels x Length.
Parameters
----------
signal : torch.Tensor or numpy.ndarray
The input signal.
Returns
-------
torch.Tensor
Preprocessed signal tensor.
"""
# Convert numpy to torch tensor
if isinstance(signal, np.ndarray):
signal = torch.from_numpy(signal)
# Ensure tensor
if not isinstance(signal, torch.Tensor):
raise ValueError("Input should be either a numpy array or a PyTorch tensor.")
# Modify dimension of signal to Batch x Channels x Length
if signal.dim() == 1:
# This is just a mono signal. Unsqueeze to 1 x 1 x Length
signal = signal.unsqueeze(0).unsqueeze(0)
elif signal.dim() == 2:
# This is a multi-channel signal (e.g. stereo)
# Rearrange so that larger dimension (Length) is last
if signal.shape[0] > signal.shape[1]:
signal = signal.T
# Unsqueeze to 1 x Channels x Length
signal = signal.unsqueeze(0)
return signal | Rearrange signal into Batch x Channels x Length. Parameters ---------- signal : torch.Tensor or numpy.ndarray The input signal. Returns ------- torch.Tensor Preprocessed signal tensor. |
156,936 | import math
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from scipy.optimize import fmin
from scipy.signal import firwin, kaiser, kaiser_beta, kaiserord
The provided code snippet includes necessary dependencies for implementing the `pad_signal` function. Write a Python function `def pad_signal(signal, num_bands)` to solve the following problem:
Pads the signal to make its length divisible by the given number of bands. Parameters ---------- signal : torch.Tensor The input signal tensor, where the last dimension represents the signal length. num_bands : int The number of bands by which the signal length should be divisible. Returns ------- torch.Tensor The padded signal tensor. If the original signal length was already divisible by num_bands, returns the original signal unchanged.
Here is the function:
def pad_signal(signal, num_bands):
"""
Pads the signal to make its length divisible by the given number of bands.
Parameters
----------
signal : torch.Tensor
The input signal tensor, where the last dimension represents the signal length.
num_bands : int
The number of bands by which the signal length should be divisible.
Returns
-------
torch.Tensor
The padded signal tensor. If the original signal length was already divisible
by num_bands, returns the original signal unchanged.
"""
remainder = signal.shape[-1] % num_bands
if remainder > 0:
padding_size = num_bands - remainder
signal = nn.functional.pad(signal, (0, padding_size))
return signal | Pads the signal to make its length divisible by the given number of bands. Parameters ---------- signal : torch.Tensor The input signal tensor, where the last dimension represents the signal length. num_bands : int The number of bands by which the signal length should be divisible. Returns ------- torch.Tensor The padded signal tensor. If the original signal length was already divisible by num_bands, returns the original signal unchanged. |
156,937 | import math
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from scipy.optimize import fmin
from scipy.signal import firwin, kaiser, kaiser_beta, kaiserord
The provided code snippet includes necessary dependencies for implementing the `generate_modulated_filter_bank` function. Write a Python function `def generate_modulated_filter_bank(prototype_filter, num_bands)` to solve the following problem:
Generate a QMF bank of cosine modulated filters based on a given prototype filter. Parameters ---------- prototype_filter : torch.Tensor The prototype filter used as the basis for modulation. num_bands : int The number of desired subbands or filters. Returns ------- torch.Tensor A bank of cosine modulated filters.
Here is the function:
def generate_modulated_filter_bank(prototype_filter, num_bands):
"""
Generate a QMF bank of cosine modulated filters based on a given prototype filter.
Parameters
----------
prototype_filter : torch.Tensor
The prototype filter used as the basis for modulation.
num_bands : int
The number of desired subbands or filters.
Returns
-------
torch.Tensor
A bank of cosine modulated filters.
"""
# Initialize indices for modulation.
subband_indices = torch.arange(num_bands).reshape(-1, 1)
# Calculate the length of the prototype filter.
filter_length = prototype_filter.shape[-1]
# Generate symmetric time indices centered around zero.
time_indices = torch.arange(-(filter_length // 2), (filter_length // 2) + 1)
# Calculate phase offsets to ensure orthogonality between subbands.
phase_offsets = (-1)**subband_indices * np.pi / 4
# Compute the cosine modulation function.
modulation = torch.cos(
(2 * subband_indices + 1) * np.pi / (2 * num_bands) * time_indices + phase_offsets
)
# Apply modulation to the prototype filter.
modulated_filters = 2 * prototype_filter * modulation
return modulated_filters | Generate a QMF bank of cosine modulated filters based on a given prototype filter. Parameters ---------- prototype_filter : torch.Tensor The prototype filter used as the basis for modulation. num_bands : int The number of desired subbands or filters. Returns ------- torch.Tensor A bank of cosine modulated filters. |
156,938 | import math
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from scipy.optimize import fmin
from scipy.signal import firwin, kaiser, kaiser_beta, kaiserord
def design_kaiser_lowpass(angular_cutoff, attenuation, filter_length=None):
"""
Design a lowpass filter using the Kaiser window.
Parameters
----------
angular_cutoff : float
The angular frequency cutoff of the filter.
attenuation : float
The desired stopband attenuation in decibels (dB).
filter_length : int, optional
Desired length of the filter. If not provided, it's computed based on the given specs.
Returns
-------
ndarray
The designed lowpass filter coefficients.
"""
estimated_length, beta = kaiserord(attenuation, angular_cutoff / np.pi)
# Ensure the estimated length is odd.
estimated_length = 2 * (estimated_length // 2) + 1
if filter_length is None:
filter_length = estimated_length
return firwin(filter_length, angular_cutoff, window=('kaiser', beta), scale=False, nyq=np.pi)
def evaluate_filter_objective(angular_cutoff, attenuation, num_bands, filter_length):
"""
Evaluate the filter's objective value based on the criteria from https://ieeexplore.ieee.org/document/681427
Parameters
----------
angular_cutoff : float
Angular frequency cutoff of the filter.
attenuation : float
Desired stopband attenuation in dB.
num_bands : int
Number of bands for the multiband filter system.
filter_length : int, optional
Desired length of the filter.
Returns
-------
float
The computed objective (loss) value for the given filter specs.
"""
filter_coeffs = design_kaiser_lowpass(angular_cutoff, attenuation, filter_length)
convolved_filter = np.convolve(filter_coeffs, filter_coeffs[::-1], "full")
return np.max(np.abs(convolved_filter[convolved_filter.shape[-1] // 2::2 * num_bands][1:]))
The provided code snippet includes necessary dependencies for implementing the `design_prototype_filter` function. Write a Python function `def design_prototype_filter(attenuation, num_bands, filter_length=None)` to solve the following problem:
Design the optimal prototype filter for a multiband system given the desired specs. Parameters ---------- attenuation : float The desired stopband attenuation in dB. num_bands : int Number of bands for the multiband filter system. filter_length : int, optional Desired length of the filter. If not provided, it's computed based on the given specs. Returns ------- ndarray The optimal prototype filter coefficients.
Here is the function:
def design_prototype_filter(attenuation, num_bands, filter_length=None):
"""
Design the optimal prototype filter for a multiband system given the desired specs.
Parameters
----------
attenuation : float
The desired stopband attenuation in dB.
num_bands : int
Number of bands for the multiband filter system.
filter_length : int, optional
Desired length of the filter. If not provided, it's computed based on the given specs.
Returns
-------
ndarray
The optimal prototype filter coefficients.
"""
optimal_angular_cutoff = fmin(lambda angular_cutoff: evaluate_filter_objective(angular_cutoff, attenuation, num_bands, filter_length),
1 / num_bands, disp=0)[0]
prototype_filter = design_kaiser_lowpass(optimal_angular_cutoff, attenuation, filter_length)
return torch.tensor(prototype_filter, dtype=torch.float32) | Design the optimal prototype filter for a multiband system given the desired specs. Parameters ---------- attenuation : float The desired stopband attenuation in dB. num_bands : int Number of bands for the multiband filter system. filter_length : int, optional Desired length of the filter. If not provided, it's computed based on the given specs. Returns ------- ndarray The optimal prototype filter coefficients. |
156,939 | import math
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from scipy.optimize import fmin
from scipy.signal import firwin, kaiser, kaiser_beta, kaiserord
The provided code snippet includes necessary dependencies for implementing the `pad_to_nearest_power_of_two` function. Write a Python function `def pad_to_nearest_power_of_two(x)` to solve the following problem:
Pads the input tensor 'x' on both sides such that its last dimension becomes the nearest larger power of two. Parameters: ----------- x : torch.Tensor The input tensor to be padded. Returns: -------- torch.Tensor The padded tensor.
Here is the function:
def pad_to_nearest_power_of_two(x):
"""
Pads the input tensor 'x' on both sides such that its last dimension
becomes the nearest larger power of two.
Parameters:
-----------
x : torch.Tensor
The input tensor to be padded.
Returns:
--------
torch.Tensor
The padded tensor.
"""
current_length = x.shape[-1]
target_length = 2**math.ceil(math.log2(current_length))
total_padding = target_length - current_length
left_padding = total_padding // 2
right_padding = total_padding - left_padding
return nn.functional.pad(x, (left_padding, right_padding)) | Pads the input tensor 'x' on both sides such that its last dimension becomes the nearest larger power of two. Parameters: ----------- x : torch.Tensor The input tensor to be padded. Returns: -------- torch.Tensor The padded tensor. |
156,940 | import math
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from scipy.optimize import fmin
from scipy.signal import firwin, kaiser, kaiser_beta, kaiserord
The provided code snippet includes necessary dependencies for implementing the `apply_alias_cancellation` function. Write a Python function `def apply_alias_cancellation(x)` to solve the following problem:
Applies alias cancellation by inverting the sign of every second element of every second row, starting from the second row's first element in a tensor. This operation helps ensure that the aliasing introduced in each band during the decomposition will be counteracted during the reconstruction. Parameters: ----------- x : torch.Tensor The input tensor. Returns: -------- torch.Tensor Tensor with specific elements' sign inverted for alias cancellation.
Here is the function:
def apply_alias_cancellation(x):
"""
Applies alias cancellation by inverting the sign of every
second element of every second row, starting from the second
row's first element in a tensor.
This operation helps ensure that the aliasing introduced in
each band during the decomposition will be counteracted during
the reconstruction.
Parameters:
-----------
x : torch.Tensor
The input tensor.
Returns:
--------
torch.Tensor
Tensor with specific elements' sign inverted for alias cancellation.
"""
# Create a mask of the same shape as 'x', initialized with all ones
mask = torch.ones_like(x)
# Update specific elements in the mask to -1 to perform inversion
mask[..., 1::2, ::2] = -1
# Apply the mask to the input tensor 'x'
return x * mask | Applies alias cancellation by inverting the sign of every second element of every second row, starting from the second row's first element in a tensor. This operation helps ensure that the aliasing introduced in each band during the decomposition will be counteracted during the reconstruction. Parameters: ----------- x : torch.Tensor The input tensor. Returns: -------- torch.Tensor Tensor with specific elements' sign inverted for alias cancellation. |
156,941 | import math
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from scipy.optimize import fmin
from scipy.signal import firwin, kaiser, kaiser_beta, kaiserord
The provided code snippet includes necessary dependencies for implementing the `ensure_odd_length` function. Write a Python function `def ensure_odd_length(tensor)` to solve the following problem:
Pads the last dimension of a tensor to ensure its size is odd. Parameters: ----------- tensor : torch.Tensor Input tensor whose last dimension might need padding. Returns: -------- torch.Tensor The original tensor if its last dimension was already odd, or the padded tensor with an odd-sized last dimension.
Here is the function:
def ensure_odd_length(tensor):
"""
Pads the last dimension of a tensor to ensure its size is odd.
Parameters:
-----------
tensor : torch.Tensor
Input tensor whose last dimension might need padding.
Returns:
--------
torch.Tensor
The original tensor if its last dimension was already odd,
or the padded tensor with an odd-sized last dimension.
"""
last_dim_size = tensor.shape[-1]
if last_dim_size % 2 == 0:
tensor = nn.functional.pad(tensor, (0, 1))
return tensor | Pads the last dimension of a tensor to ensure its size is odd. Parameters: ----------- tensor : torch.Tensor Input tensor whose last dimension might need padding. Returns: -------- torch.Tensor The original tensor if its last dimension was already odd, or the padded tensor with an odd-sized last dimension. |
156,942 | import math
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from scipy.optimize import fmin
from scipy.signal import firwin, kaiser, kaiser_beta, kaiserord
The provided code snippet includes necessary dependencies for implementing the `polyphase_analysis` function. Write a Python function `def polyphase_analysis(signal, filter_bank)` to solve the following problem:
Applies the polyphase method to efficiently analyze the signal using a filter bank. Parameters: ----------- signal : torch.Tensor Input signal tensor with shape (Batch x Channels x Length). filter_bank : torch.Tensor Filter bank tensor with shape (Bands x Length). Returns: -------- torch.Tensor Signal split into sub-bands. (Batch x Channels x Bands x Length)
Here is the function:
def polyphase_analysis(signal, filter_bank):
"""
Applies the polyphase method to efficiently analyze the signal using a filter bank.
Parameters:
-----------
signal : torch.Tensor
Input signal tensor with shape (Batch x Channels x Length).
filter_bank : torch.Tensor
Filter bank tensor with shape (Bands x Length).
Returns:
--------
torch.Tensor
Signal split into sub-bands. (Batch x Channels x Bands x Length)
"""
num_bands = filter_bank.shape[0]
num_channels = signal.shape[1]
# Rearrange signal for polyphase processing.
# Also combine Batch x Channel into one dimension for now.
#signal = rearrange(signal, "b c (t n) -> b (c n) t", n=num_bands)
signal = rearrange(signal, "b c (t n) -> (b c) n t", n=num_bands)
# Rearrange the filter bank for matching signal shape
filter_bank = rearrange(filter_bank, "c (t n) -> c n t", n=num_bands)
# Apply convolution with appropriate padding to maintain spatial dimensions
padding = filter_bank.shape[-1] // 2
filtered_signal = nn.functional.conv1d(signal, filter_bank, padding=padding)
# Truncate the last dimension post-convolution to adjust the output shape
filtered_signal = filtered_signal[..., :-1]
# Rearrange the first dimension back into Batch x Channels
filtered_signal = rearrange(filtered_signal, "(b c) n t -> b c n t", c=num_channels)
return filtered_signal | Applies the polyphase method to efficiently analyze the signal using a filter bank. Parameters: ----------- signal : torch.Tensor Input signal tensor with shape (Batch x Channels x Length). filter_bank : torch.Tensor Filter bank tensor with shape (Bands x Length). Returns: -------- torch.Tensor Signal split into sub-bands. (Batch x Channels x Bands x Length) |
156,943 | import math
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange
from scipy.optimize import fmin
from scipy.signal import firwin, kaiser, kaiser_beta, kaiserord
The provided code snippet includes necessary dependencies for implementing the `polyphase_synthesis` function. Write a Python function `def polyphase_synthesis(signal, filter_bank)` to solve the following problem:
Polyphase Inverse: Apply polyphase filter bank synthesis to reconstruct a signal. Parameters ---------- signal : torch.Tensor Decomposed signal to be reconstructed (shape: Batch x Channels x Bands x Length). filter_bank : torch.Tensor Analysis filter bank (shape: Bands x Length). should_rearrange : bool, optional Flag to determine if the filters should be rearranged for polyphase synthesis. Default is True. Returns ------- torch.Tensor Reconstructed signal (shape: Batch x Channels X Length)
Here is the function:
def polyphase_synthesis(signal, filter_bank):
"""
Polyphase Inverse: Apply polyphase filter bank synthesis to reconstruct a signal.
Parameters
----------
signal : torch.Tensor
Decomposed signal to be reconstructed (shape: Batch x Channels x Bands x Length).
filter_bank : torch.Tensor
Analysis filter bank (shape: Bands x Length).
should_rearrange : bool, optional
Flag to determine if the filters should be rearranged for polyphase synthesis. Default is True.
Returns
-------
torch.Tensor
Reconstructed signal (shape: Batch x Channels X Length)
"""
num_bands = filter_bank.shape[0]
num_channels = signal.shape[1]
# Rearrange the filter bank
filter_bank = filter_bank.flip(-1)
filter_bank = rearrange(filter_bank, "c (t n) -> n c t", n=num_bands)
# Combine Batch x Channels into one dimension for now.
signal = rearrange(signal, "b c n t -> (b c) n t")
# Apply convolution with appropriate padding
padding_amount = filter_bank.shape[-1] // 2 + 1
reconstructed_signal = nn.functional.conv1d(signal, filter_bank, padding=int(padding_amount))
# Scale the result
reconstructed_signal = reconstructed_signal[..., :-1] * num_bands
# Reorganize the output and truncate
reconstructed_signal = reconstructed_signal.flip(1)
reconstructed_signal = rearrange(reconstructed_signal, "(b c) n t -> b c (t n)", c=num_channels, n=num_bands)
reconstructed_signal = reconstructed_signal[..., 2 * filter_bank.shape[1]:]
return reconstructed_signal | Polyphase Inverse: Apply polyphase filter bank synthesis to reconstruct a signal. Parameters ---------- signal : torch.Tensor Decomposed signal to be reconstructed (shape: Batch x Channels x Bands x Length). filter_bank : torch.Tensor Analysis filter bank (shape: Bands x Length). should_rearrange : bool, optional Flag to determine if the filters should be rearranged for polyphase synthesis. Default is True. Returns ------- torch.Tensor Reconstructed signal (shape: Batch x Channels X Length) |
156,944 | import torch
from einops import rearrange
from torch import nn
from .blocks import AdaRMSNorm
from .transformer import Attention, FeedForward, RotaryEmbedding, LayerNorm
def checkpoint(function, *args, **kwargs):
kwargs.setdefault("use_reentrant", False)
return torch.utils.checkpoint.checkpoint(function, *args, **kwargs) | null |
156,945 | import torch
from torch import nn
from torch.nn import functional as F
from einops import rearrange
from vector_quantize_pytorch import ResidualVQ, FSQ
from dac.nn.quantize import ResidualVectorQuantize as DACResidualVQ
def vae_sample(mean, scale):
stdev = nn.functional.softplus(scale) + 1e-4
var = stdev * stdev
logvar = torch.log(var)
latents = torch.randn_like(mean) * stdev + mean
kl = (mean * mean + var - logvar - 1).sum(1).mean()
return latents, kl | null |
156,946 | import torch
from torch import nn
from torch.nn import functional as F
from einops import rearrange
from vector_quantize_pytorch import ResidualVQ, FSQ
from dac.nn.quantize import ResidualVectorQuantize as DACResidualVQ
def compute_mean_kernel(x, y):
kernel_input = (x[:, None] - y[None]).pow(2).mean(2) / x.shape[-1]
return torch.exp(-kernel_input).mean()
def compute_mmd(latents):
latents_reshaped = latents.permute(0, 2, 1).reshape(-1, latents.shape[1])
noise = torch.randn_like(latents_reshaped)
latents_kernel = compute_mean_kernel(latents_reshaped, latents_reshaped)
noise_kernel = compute_mean_kernel(noise, noise)
latents_noise_kernel = compute_mean_kernel(latents_reshaped, noise)
mmd = latents_kernel + noise_kernel - 2 * latents_noise_kernel
return mmd.mean() | null |
156,947 | import json
def create_model_from_config(model_config):
model_type = model_config.get('model_type', None)
assert model_type is not None, 'model_type must be specified in model config'
if model_type == 'autoencoder':
from .autoencoders import create_autoencoder_from_config
return create_autoencoder_from_config(model_config)
elif model_type == 'diffusion_uncond':
from .diffusion import create_diffusion_uncond_from_config
return create_diffusion_uncond_from_config(model_config)
elif model_type == 'diffusion_cond' or model_type == 'diffusion_cond_inpaint' or model_type == "diffusion_prior":
from .diffusion import create_diffusion_cond_from_config
return create_diffusion_cond_from_config(model_config)
elif model_type == 'diffusion_autoencoder':
from .autoencoders import create_diffAE_from_config
return create_diffAE_from_config(model_config)
elif model_type == 'musicgen':
from .musicgen import create_musicgen_from_config
return create_musicgen_from_config(model_config)
elif model_type == 'lm':
from .lm import create_audio_lm_from_config
return create_audio_lm_from_config(model_config)
else:
raise NotImplementedError(f'Unknown model type: {model_type}')
def create_model_from_config_path(model_config_path):
with open(model_config_path) as f:
model_config = json.load(f)
return create_model_from_config(model_config) | null |
156,948 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from functools import reduce
import typing as tp
from einops import rearrange
from audiotools import AudioSignal, STFTParams
from dac.model.discriminator import WNConv1d, WNConv2d
def get_hinge_losses(score_real, score_fake):
gen_loss = -score_fake.mean()
dis_loss = torch.relu(1 - score_real).mean() + torch.relu(1 + score_fake).mean()
return dis_loss, gen_loss | null |
156,949 | from clusterfuzz._internal.base import modules
import contextlib
import multiprocessing
import os
import sys
import time
import traceback
from clusterfuzz._internal.base import dates
from clusterfuzz._internal.base import errors
from clusterfuzz._internal.base import task_utils
from clusterfuzz._internal.base import tasks as taskslib
from clusterfuzz._internal.base import untrusted
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.bot.fuzzers import init as fuzzers_init
from clusterfuzz._internal.bot.tasks import update_task
from clusterfuzz._internal.bot.tasks import utasks
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import ndb_init
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.metrics import monitor
from clusterfuzz._internal.metrics import monitoring_metrics
from clusterfuzz._internal.metrics import profiler
from clusterfuzz._internal.system import environment
class _Monitor:
"""Monitor one task."""
def __init__(self, task, time_module=time):
self.task = task
self.time_module = time_module
self.start_time = None
def __enter__(self):
monitoring_metrics.TASK_COUNT.increment({
'task': self.task.command or '',
'job': self.task.job or '',
})
self.start_time = self.time_module.time()
def __exit__(self, exc_type, value, trackback):
if not environment.get_value('LOG_TASK_TIMES'):
return
duration = self.time_module.time() - self.start_time
monitoring_metrics.TASK_TOTAL_RUN_TIME.increment_by(
int(duration), {
'task': self.task.command or '',
'job': self.task.job or '',
})
def schedule_utask_mains():
"""Schedules utask_mains from preprocessed utasks on Google Cloud Batch."""
from clusterfuzz._internal.google_cloud_utils import batch
logs.log('Attempting to combine batch tasks.')
utask_mains = taskslib.get_utask_mains()
if not utask_mains:
logs.log('No utask mains.')
return
logs.log(f'Combining {len(utask_mains)} batch tasks.')
batch_tasks = []
with lease_all_tasks(utask_mains):
batch_tasks = [
batch.BatchTask(task.command, task.job, task.argument)
for task in utask_mains
]
batch.create_uworker_main_batch_jobs(batch_tasks)
The provided code snippet includes necessary dependencies for implementing the `task_loop` function. Write a Python function `def task_loop()` to solve the following problem:
Executes tasks indefinitely.
Here is the function:
def task_loop():
"""Executes tasks indefinitely."""
# Defer heavy task imports to prevent issues with multiprocessing.Process
from clusterfuzz._internal.bot.tasks import commands
clean_exit = False
while True:
stacktrace = ''
exception_occurred = False
task = None
# This caches the current environment on first run. Don't move this.
environment.reset_environment()
try:
# Run regular updates.
# TODO(metzman): Move this after utask_main execution so that utasks can't
# be updated on subsequent attempts.
update_task.run()
update_task.track_revision()
if environment.is_uworker():
# Batch tasks only run one at a time.
sys.exit(utasks.uworker_bot_main())
if environment.get_value('SCHEDULE_UTASK_MAINS'):
# If the bot is configured to schedule utask_mains, don't run any other
# tasks because scheduling these tasks is more important than executing
# any one other task.
# TODO(metzman): Convert this to a k8s cron.
schedule_utask_mains()
continue
task = taskslib.get_task()
if not task:
continue
with _Monitor(task):
with task.lease():
# Execute the command and delete the task.
commands.process_command(task)
except SystemExit as e:
exception_occurred = True
clean_exit = e.code == 0
if not clean_exit and not isinstance(e, untrusted.HostError):
logs.log_error('SystemExit occurred while working on task.')
stacktrace = traceback.format_exc()
except commands.AlreadyRunningError:
exception_occurred = False
except task_utils.UworkerMsgParseError:
logs.log_error('Task cannot be retried because of utask parse error.')
task.dont_retry()
exception_occurred = True
stacktrace = traceback.format_exc()
except Exception:
logs.log_error('Error occurred while working on task.')
exception_occurred = True
stacktrace = traceback.format_exc()
if exception_occurred:
# Prevent looping too quickly. See: crbug.com/644830
failure_wait_interval = environment.get_value('FAIL_WAIT')
time.sleep(utils.random_number(1, failure_wait_interval))
break
task_payload = task.payload() if task else None
return stacktrace, clean_exit, task_payload | Executes tasks indefinitely. |
156,950 | from clusterfuzz._internal.base import modules
import os
import sys
import time
from clusterfuzz._internal.base import dates
from clusterfuzz._internal.base import tasks
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.datastore import ndb_init
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.system import process_handler
from clusterfuzz._internal.system import shell
try:
import psutil
except ImportError:
psutil = None
The provided code snippet includes necessary dependencies for implementing the `beat` function. Write a Python function `def beat(previous_state, log_filename)` to solve the following problem:
Run a cycle of heartbeat checks to ensure bot is running.
Here is the function:
def beat(previous_state, log_filename):
"""Run a cycle of heartbeat checks to ensure bot is running."""
# Handle case when run_bot.py script is stuck. If yes, kill its process.
task_end_time = tasks.get_task_end_time()
if psutil and task_end_time and dates.time_has_expired(
task_end_time, seconds=tasks.TASK_COMPLETION_BUFFER):
# Get absolute path to |run_bot| script. We use this to identify unique
# instances of bot running on a particular host.
startup_scripts_directory = environment.get_startup_scripts_directory()
bot_file_path = os.path.join(startup_scripts_directory, 'run_bot')
for process in psutil.process_iter():
try:
command_line = ' '.join(process.cmdline())
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
continue
# Find the process running the main bot script.
if bot_file_path not in command_line:
continue
process_id = process.pid
logs.log(
'Killing stale bot (pid %d) which seems to have stuck.' % process_id)
try:
process_handler.terminate_root_and_child_processes(process_id)
except Exception:
logs.log_error('Failed to terminate stale bot processes.')
# Minor cleanup to avoid disk space issues on bot restart.
process_handler.terminate_stale_application_instances()
shell.clear_temp_directory()
shell.clear_testcase_directories()
# Concerned stale processes should be killed. Now, delete the stale task.
tasks.track_task_end()
# Figure out when the log file was last modified.
try:
current_state = str(os.path.getmtime(log_filename))
except Exception:
current_state = None
# Only update the heartbeat if the log file was modified.
if current_state and current_state != previous_state:
# Try updating the heartbeat. If an error occurs, just
# wait and return None.
if not data_handler.update_heartbeat():
return None
# Heartbeat is successfully updated.
return current_state | Run a cycle of heartbeat checks to ensure bot is running. |
156,951 | from clusterfuzz._internal.base import modules
import atexit
import os
import subprocess
import sys
import time
from clusterfuzz._internal.base import persistent_cache
from clusterfuzz._internal.base.untrusted import untrusted_noop
from clusterfuzz._internal.bot.tasks import update_task
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import ndb_init
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.system import shell
LOOP_SLEEP_INTERVAL = 3
def start_bot(bot_command):
"""Start the bot process."""
command = shell.get_command(bot_command)
# Wait until the process terminates or until run timed out.
run_timeout = environment.get_value('RUN_TIMEOUT')
if run_timeout and run_timeout > MAX_SUBPROCESS_TIMEOUT:
logs.log_error(
'Capping RUN_TIMEOUT to max allowed value: %d' % MAX_SUBPROCESS_TIMEOUT)
run_timeout = MAX_SUBPROCESS_TIMEOUT
try:
result = subprocess.run(
command,
timeout=run_timeout,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False)
exit_code = result.returncode
output = result.stdout
except subprocess.TimeoutExpired as e:
exit_code = 0
output = e.stdout
except Exception:
logs.log_error('Unable to start bot process (%s).' % bot_command)
return 1
if output:
output = output.decode('utf-8', errors='ignore')
log_message = f'Command: {command} (exit={exit_code})\n{output}'
if exit_code == 0:
logs.log(log_message)
elif exit_code == 1:
# Anecdotally, exit=1 means there's a fatal Python exception.
logs.log_error(log_message)
else:
logs.log_warn(log_message)
return exit_code
def sleep(seconds):
"""time.sleep wrapper for mocking."""
time.sleep(seconds)
def start_heartbeat(heartbeat_command):
"""Start the heartbeat (in another process)."""
global _heartbeat_handle
if _heartbeat_handle:
# If heartbeat is already started, no work to do. Bail out.
return
try:
command = shell.get_command(heartbeat_command)
process_handle = subprocess.Popen(command) # pylint: disable=consider-using-with
except Exception:
logs.log_error(
'Unable to start heartbeat process (%s).' % heartbeat_command)
return
# If heartbeat is successfully started, set its handle now.
_heartbeat_handle = process_handle
# Artificial delay to let heartbeat's start time update first.
sleep(HEARTBEAT_START_WAIT_TIME)
def stop_heartbeat():
"""Stop the heartbeat process."""
global _heartbeat_handle
if not _heartbeat_handle:
# If there is no heartbeat started yet, no work to do. Bail out.
return
try:
_heartbeat_handle.kill()
except Exception:
pass
_heartbeat_handle = None
def start_android_heartbeat():
"""Start the android heartbeat (in another process)."""
global _android_heartbeat_handle
if _android_heartbeat_handle:
# If heartbeat is already started, no work to do. Bail out.
return
base_directory = environment.get_startup_scripts_directory()
android_beat_script_path = os.path.join(base_directory,
ANDROID_HEARTBEAT_SCRIPT)
android_beat_interpreter = shell.get_interpreter(android_beat_script_path)
assert android_beat_interpreter
android_beat_command = [android_beat_interpreter, android_beat_script_path]
try:
process_handle = subprocess.Popen(android_beat_command)
except Exception:
logs.log_error('Unable to start android heartbeat process (%s).' %
android_beat_command)
return
# If heartbeat is successfully started, set its handle now.
_android_heartbeat_handle = process_handle
def stop_android_heartbeat():
"""Stop the android heartbeat process."""
global _android_heartbeat_handle
if not _android_heartbeat_handle:
# If there is no heartbeat started yet, no work to do. Bail out.
return
try:
_android_heartbeat_handle.kill()
except Exception as e:
logs.log_error('Unable to stop android heartbeat process: %s' % str(e))
_android_heartbeat_handle = None
def update_source_code_if_needed():
"""Update source code if needed."""
try:
# Update the bot source, if there's a newer version.
newer_source_revision = update_task.get_newer_source_revision()
if newer_source_revision is not None:
# If source code needs update, stop the heartbeat first. As otherwise,
# we can run into exceptions if source code changed from underneath
# a running process.
stop_heartbeat()
update_task.update_source_code()
except Exception:
logs.log_error('Failed to update source.')
The provided code snippet includes necessary dependencies for implementing the `run_loop` function. Write a Python function `def run_loop(bot_command, heartbeat_command)` to solve the following problem:
Run infinite loop with bot's command.
Here is the function:
def run_loop(bot_command, heartbeat_command):
"""Run infinite loop with bot's command."""
atexit.register(stop_heartbeat)
if environment.is_android():
atexit.register(stop_android_heartbeat)
while True:
update_source_code_if_needed()
if not environment.is_uworker():
if environment.is_android():
start_android_heartbeat()
start_heartbeat(heartbeat_command)
exit_code = start_bot(bot_command)
if environment.is_uworker():
logs.log(f'Batch job exited with code: {exit_code}. Exiting.')
sys.exit(exit_code)
# See if our run timed out, if yes bail out.
try:
if data_handler.bot_run_timed_out():
break
except Exception:
logs.log_error('Failed to check for bot run timeout.')
sleep(LOOP_SLEEP_INTERVAL) | Run infinite loop with bot's command. |
156,952 | from clusterfuzz._internal.base import modules
import atexit
import os
import subprocess
import sys
import time
from clusterfuzz._internal.base import persistent_cache
from clusterfuzz._internal.base.untrusted import untrusted_noop
from clusterfuzz._internal.bot.tasks import update_task
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import ndb_init
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.system import shell
The provided code snippet includes necessary dependencies for implementing the `set_start_time` function. Write a Python function `def set_start_time()` to solve the following problem:
Set START_TIME.
Here is the function:
def set_start_time():
"""Set START_TIME."""
environment.set_value('START_TIME', time.time()) | Set START_TIME. |
156,953 | from http import HTTPStatus
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
import threading
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.system import process_handler
RESPONDER_IP = '0.0.0.0'
RESPONDER_PORT = 7123
class RequestHandler(BaseHTTPRequestHandler):
"""Handler for GET request form the health checker."""
def do_GET(self): # pylint: disable=invalid-name
"""Handle a GET request."""
if process_handler.scripts_are_running(EXPECTED_SCRIPTS):
# Note: run_bot.py is expected to go down during source updates
# (which can take a few minutes)
# Health checks should be resilient to this
# and set a threshold / check interval to account for this.
response_code = HTTPStatus.OK
else:
response_code = HTTPStatus.INTERNAL_SERVER_ERROR
self.send_response(response_code)
self.end_headers()
The provided code snippet includes necessary dependencies for implementing the `run_server` function. Write a Python function `def run_server()` to solve the following problem:
Start a HTTP server to respond to the health checker.
Here is the function:
def run_server():
"""Start a HTTP server to respond to the health checker."""
if utils.is_oss_fuzz() or environment.is_android_real_device():
# OSS-Fuzz & Android multiple instances per host model isn't supported
# yet.
return
health_check_responder_server = HTTPServer((RESPONDER_IP, RESPONDER_PORT),
RequestHandler)
server_thread = threading.Thread(
target=health_check_responder_server.serve_forever)
server_thread.start() | Start a HTTP server to respond to the health checker. |
156,954 | import json
from clusterfuzz._internal.base import external_users
from clusterfuzz._internal.base import memoize
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
from handlers import base_handler
from libs import access
from libs import handler
from libs import helpers
def _get_all_project_results():
"""Return all results."""
jobs = list(data_types.Job.query())
return _get_project_results_for_jobs(jobs)
def _get_project_results_for_external_user(external_jobs):
"""Return results for external user."""
jobs = list(data_types.Job.query())
jobs = [job for job in jobs if job.name in external_jobs]
return _get_project_results_for_jobs(jobs)
The provided code snippet includes necessary dependencies for implementing the `get_results` function. Write a Python function `def get_results()` to solve the following problem:
Return results.
Here is the function:
def get_results():
"""Return results."""
is_user = access.has_access()
user_email = helpers.get_user_email()
external_jobs = external_users.allowed_jobs_for_user(user_email)
is_external_user = not is_user and external_jobs
if not is_user and not is_external_user:
raise helpers.AccessDeniedError()
if is_user:
projects = _get_all_project_results()
else:
projects = _get_project_results_for_external_user(external_jobs)
results = {
'info': {
'projects': projects,
'is_internal_user': is_user,
},
}
return results | Return results. |
156,955 | import json
from clusterfuzz._internal.crash_analysis import crash_analyzer
from clusterfuzz._internal.crash_analysis.crash_comparer import CrashComparer
from clusterfuzz._internal.crash_analysis.stack_parsing import stack_analyzer
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.metrics import logs
from handlers import base_handler
from libs import handler
from libs import helpers
OLD_PROTOCOL = '1'
NEW_PROTOCOL = '2'
def _mark_as_fixed(testcase, revision):
"""Mark bug as fixed."""
testcase.open = False
# Bisection not available for external reproduction infrastructure. Assume
# range (crash revision : current revision).
testcase.fixed = f'{testcase.crash_revision}:{revision}'
data_handler.update_progression_completion_metadata(
testcase, revision, message=f'fixed in r{revision}')
def _mark_errored(testcase, revision, error):
"""Mark testcase as errored out."""
message = 'Received error from external infra, marking testcase as NA.'
logs.log_warn(message, error=error, testcase_id=testcase.key.id())
testcase.fixed = 'NA'
testcase.open = False
data_handler.update_progression_completion_metadata(
testcase, revision, message=message)
class CrashComparer:
"""Compares two crash results."""
COMPARE_THRESHOLD = 0.8
SAME_FRAMES_THRESHOLD = 2
def __init__(self, crash_state_1, crash_state_2, compare_threshold=None):
self.crash_state_1 = crash_state_1
self.crash_state_2 = crash_state_2
self.compare_threshold = compare_threshold or self.COMPARE_THRESHOLD
def is_similar(self):
"""Return a bool for whether the two crash results are similar."""
# If one of the crash state is empty, it can't match anything.
if not self.crash_state_1 or not self.crash_state_2:
return False
# Optimization: Do a == check first before others.
if self.crash_state_1 == self.crash_state_2:
return True
# If there is a fuzzer hash string in state, then rely on exact comparison.
# Since we failed the check above, our hashes don't match.
if 'FuzzerHash=' in self.crash_state_1:
return False
# TODO(aarya): Improve this algorithm and leverage other parts of
# stacktrace.
crash_state_lines_1 = self.crash_state_1.splitlines()
crash_state_lines_2 = self.crash_state_2.splitlines()
if (longest_common_subsequence(crash_state_lines_1, crash_state_lines_2) >=
self.SAME_FRAMES_THRESHOLD):
return True
lines_compared = 0
similarity_ratio_sum = 0.0
for i in range(len(crash_state_lines_1)):
if i >= len(crash_state_lines_2):
break
similarity_ratio = _similarity_ratio(crash_state_lines_1[i],
crash_state_lines_2[i])
lines_compared += 1
similarity_ratio_sum += similarity_ratio
similarity_ratio_average = similarity_ratio_sum / lines_compared
return similarity_ratio_average > self.compare_threshold
The provided code snippet includes necessary dependencies for implementing the `handle_update` function. Write a Python function `def handle_update(testcase, revision, stacktraces, error, protocol_version)` to solve the following problem:
Handle update.
Here is the function:
def handle_update(testcase, revision, stacktraces, error, protocol_version):
"""Handle update."""
def is_still_crashing(st_index, stacktrace):
"""Check if the the given stackstrace indicates
the testcase is still crashing"""
state = stack_analyzer.get_crash_data(
stacktrace,
fuzz_target=fuzz_target_name,
symbolize_flag=False,
already_symbolized=True,
detect_ooms_and_hangs=True)
crash_comparer = CrashComparer(state.crash_state, testcase.crash_state)
if not crash_comparer.is_similar():
return False
logs.log(f'State for trial {st_index} of {testcase_id} '
f'remains similar'
f'(old_state={testcase.crash_state}, '
f'new_state={state.crash_state}).')
is_security = crash_analyzer.is_security_issue(
state.crash_stacktrace, state.crash_type, state.crash_address)
if is_security != testcase.security_flag:
return False
logs.log(f'Security flag for trial {st_index} of {testcase_id} '
f'still matches'
f'({testcase.security_flag}).')
return True
testcase_id = testcase.key.id()
logs.log('Got external update for testcase.', testcase_id=testcase_id)
if error:
_mark_errored(testcase, revision, error)
return
last_tested_revision = (
testcase.get_metadata('last_tested_revision') or testcase.crash_revision)
if revision < last_tested_revision:
logs.log_warn(f'Revision {revision} less than previously tested '
f'revision {last_tested_revision}.')
return
if protocol_version not in [OLD_PROTOCOL, NEW_PROTOCOL]:
logs.log_error(f'Invalid protocol_version provided: '
f'{protocol_version} '
f'is not one of {{{OLD_PROTOCOL, NEW_PROTOCOL}}} '
f'(testcase_id={testcase_id}).')
return
if not stacktraces:
logs.log_error(f'Empty JSON stacktrace list provided '
f'(testcase_id={testcase_id}).')
return
fuzz_target = testcase.get_fuzz_target()
if fuzz_target:
fuzz_target_name = fuzz_target.binary
else:
fuzz_target_name = None
# Record use of fuzz target to avoid garbage collection (since fuzz_task does
# not run).
data_handler.record_fuzz_target(fuzz_target.engine, fuzz_target.binary,
testcase.job_type)
for st_index, stacktrace in enumerate(stacktraces):
if is_still_crashing(st_index, stacktrace):
logs.log(f'stacktrace {st_index} of {testcase_id} still crashes.')
testcase.last_tested_crash_stacktrace = stacktrace
data_handler.update_progression_completion_metadata(
testcase, revision, is_crash=True)
return
# All trials resulted in a non-crash. Close the testcase.
logs.log(f'No matching crash detected in {testcase_id} '
f'over {len(stacktraces)} trials, marking as fixed.')
_mark_as_fixed(testcase, revision) | Handle update. |
156,956 | import datetime
import html
import re
import urllib.parse
from flask import request
from googleapiclient.errors import HttpError
import yaml
from clusterfuzz._internal.base import external_users
from clusterfuzz._internal.base import memoize
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import big_query
from clusterfuzz._internal.metrics import fuzzer_stats
from clusterfuzz._internal.metrics import logs
from handlers import base_handler
from libs import access
from libs import handler
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `_get_filter_from_job` function. Write a Python function `def _get_filter_from_job(job)` to solve the following problem:
Creates a job filter from |job|.
Here is the function:
def _get_filter_from_job(job):
"""Creates a job filter from |job|."""
return [str(job)] if job else None | Creates a job filter from |job|. |
156,957 | import datetime
import html
import re
import urllib.parse
from flask import request
from googleapiclient.errors import HttpError
import yaml
from clusterfuzz._internal.base import external_users
from clusterfuzz._internal.base import memoize
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import big_query
from clusterfuzz._internal.metrics import fuzzer_stats
from clusterfuzz._internal.metrics import logs
from handlers import base_handler
from libs import access
from libs import handler
from libs import helpers
def _parse_date(date_str):
"""Parse YYYY-MM-DD."""
if not date_str:
return None
pattern = re.compile(r'^(\d{4})-(\d{2})-(\d{2})$')
match = pattern.match(date_str)
if not match:
return None
year, month, day = (int(val) for val in match.groups())
return datetime.date(year, month, day)
def _build_todays_results(fuzzer, jobs, group_by, date_start, date_end):
"""Wrapper around _build_results that is intended for use by queries where
date_end is today. Caches results for 15 minutes."""
return _build_results(fuzzer, jobs, group_by, date_start, date_end)
def _build_old_results(fuzzer, jobs, group_by, date_start, date_end):
"""Wrapper around _build_results that is intended for use by queries where
date_end is before today. Caches results for 24 hours."""
return _build_results(fuzzer, jobs, group_by, date_start, date_end)
The provided code snippet includes necessary dependencies for implementing the `build_results` function. Write a Python function `def build_results(fuzzer, jobs, group_by, date_start, date_end)` to solve the following problem:
Wrapper around the caching wrappers for _build_results. Decides which of those wrappers to call based on how long query should be cached for.
Here is the function:
def build_results(fuzzer, jobs, group_by, date_start, date_end):
"""Wrapper around the caching wrappers for _build_results. Decides which of
those wrappers to call based on how long query should be cached for."""
datetime_end = _parse_date(date_end)
if not datetime_end:
raise helpers.EarlyExitError('Missing end date.', 400)
if datetime_end < utils.utcnow().date():
logs.log('Building results for older stats %s %s %s %s %s.' %
(fuzzer, jobs, group_by, date_start, date_end))
return _build_old_results(fuzzer, jobs, group_by, date_start, date_end)
logs.log('Building results for stats including today %s %s %s %s %s.' %
(fuzzer, jobs, group_by, date_start, date_end))
return _build_todays_results(fuzzer, jobs, group_by, date_start, date_end) | Wrapper around the caching wrappers for _build_results. Decides which of those wrappers to call based on how long query should be cached for. |
156,958 | import datetime
import html
import re
import urllib.parse
from flask import request
from googleapiclient.errors import HttpError
import yaml
from clusterfuzz._internal.base import external_users
from clusterfuzz._internal.base import memoize
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import big_query
from clusterfuzz._internal.metrics import fuzzer_stats
from clusterfuzz._internal.metrics import logs
from handlers import base_handler
from libs import access
from libs import handler
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `_get_date` function. Write a Python function `def _get_date(date_value, days_ago)` to solve the following problem:
Returns |date_value| if it is not empty otherwise returns the date |days_ago| number of days ago.
Here is the function:
def _get_date(date_value, days_ago):
"""Returns |date_value| if it is not empty otherwise returns the date
|days_ago| number of days ago."""
if date_value:
return date_value
date_datetime = utils.utcnow() - datetime.timedelta(days=days_ago)
return date_datetime.strftime('%Y-%m-%d') | Returns |date_value| if it is not empty otherwise returns the date |days_ago| number of days ago. |
156,959 | import json
from flask import request
from clusterfuzz._internal.google_cloud_utils import big_query
from handlers import base_handler
from libs import crash_access
from libs import filters
from libs import handler
from libs import helpers
from libs.query import big_query_query
PAGE_SIZE = 30
FILTERS = [
TypeFilter(),
RevisionFilter(),
KeywordFilter(),
IncludeZeroFilter(),
]
def get(params, query, offset, limit):
"""Get the data from BigQuery."""
sql = SQL.format(
table_id='%ss' % params['type'],
where_clause=query.get_where_clause(),
prefix=params['type'],
offset=offset,
limit=limit)
client = big_query.Client()
result = client.query(query=sql, offset=offset, limit=limit)
return result.rows, result.total_count
The provided code snippet includes necessary dependencies for implementing the `get_result` function. Write a Python function `def get_result()` to solve the following problem:
Get the result for the crash stats page.
Here is the function:
def get_result():
"""Get the result for the crash stats page."""
params = dict(request.iterparams())
params['type'] = params.get('type', 'regression')
page = helpers.cast(request.get('page') or 1, int, "'page' is not an int.")
is_revision_empty = 'revision' not in params
query = big_query_query.Query()
crash_access.add_scope(query, params, 'security_flag', 'job_type',
'fuzzer_name')
if is_revision_empty:
total_count = 0
rows = []
else:
filters.add(query, params, FILTERS)
rows, total_count = get(
params=params,
query=query,
offset=(page - 1) * PAGE_SIZE,
limit=PAGE_SIZE)
helpers.log('Regression', helpers.VIEW_OPERATION)
result = {
'totalPages': (total_count // PAGE_SIZE) + 1,
'page': page,
'pageSize': PAGE_SIZE,
'items': rows,
'totalCount': total_count,
'isRevisionEmpty': is_revision_empty
}
return result, params | Get the result for the crash stats page. |
156,960 | import base64
import cgi
import datetime
import json
import logging
import os
import re
import sys
import traceback
import urllib.parse
from flask import redirect as flask_redirect
from flask import request
from flask import Response
from flask.views import MethodView
from google.cloud import ndb
import jinja2
import jira
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
from libs import auth
from libs import form
from libs import helpers
_JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), '..', 'templates')),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def add_jinja2_filter(name, fn):
_JINJA_ENVIRONMENT.filters[name] = fn | null |
156,961 | import base64
import cgi
import datetime
import json
import logging
import os
import re
import sys
import traceback
import urllib.parse
from flask import redirect as flask_redirect
from flask import request
from flask import Response
from flask.views import MethodView
from google.cloud import ndb
import jinja2
import jira
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
from libs import auth
from libs import form
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `format_time` function. Write a Python function `def format_time(dt)` to solve the following problem:
Format datetime object for display.
Here is the function:
def format_time(dt):
"""Format datetime object for display."""
return '{t.day} {t:%b} {t:%y} {t:%X} PDT'.format(t=dt) | Format datetime object for display. |
156,962 | import base64
import cgi
import datetime
import json
import logging
import os
import re
import sys
import traceback
import urllib.parse
from flask import redirect as flask_redirect
from flask import request
from flask import Response
from flask.views import MethodView
from google.cloud import ndb
import jinja2
import jira
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
from libs import auth
from libs import form
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `splitlines` function. Write a Python function `def splitlines(text)` to solve the following problem:
Split text into lines.
Here is the function:
def splitlines(text):
"""Split text into lines."""
return text.splitlines() | Split text into lines. |
156,963 | import base64
import cgi
import datetime
import json
import logging
import os
import re
import sys
import traceback
import urllib.parse
from flask import redirect as flask_redirect
from flask import request
from flask import Response
from flask.views import MethodView
from google.cloud import ndb
import jinja2
import jira
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
from libs import auth
from libs import form
from libs import helpers
def split_br(text):
return re.split(r'\s*<br */>\s*', text, flags=re.IGNORECASE) | null |
156,964 | import base64
import cgi
import datetime
import json
import logging
import os
import re
import sys
import traceback
import urllib.parse
from flask import redirect as flask_redirect
from flask import request
from flask import Response
from flask.views import MethodView
from google.cloud import ndb
import jinja2
import jira
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
from libs import auth
from libs import form
from libs import helpers
class JsonEncoder(json.JSONEncoder):
"""Json encoder."""
_EPOCH = datetime.datetime.utcfromtimestamp(0)
def default(self, o): # pylint: disable=arguments-differ,method-hidden
if isinstance(o, ndb.Model):
dict_obj = o.to_dict()
dict_obj['id'] = o.key.id()
return dict_obj
if isinstance(o, datetime.datetime):
return int((o - self._EPOCH).total_seconds())
if hasattr(o, 'to_dict'):
return o.to_dict()
if isinstance(o, cgi.FieldStorage):
return str(o)
if isinstance(o, bytes):
return o.decode('utf-8')
if isinstance(o, jira.resources.Resource):
if o.raw:
return o.raw
return json.JSONEncoder.default(self, o)
The provided code snippet includes necessary dependencies for implementing the `encode_json` function. Write a Python function `def encode_json(value)` to solve the following problem:
Dump base64-encoded JSON string (to avoid XSS).
Here is the function:
def encode_json(value):
"""Dump base64-encoded JSON string (to avoid XSS)."""
return base64.b64encode(json.dumps(
value, cls=JsonEncoder).encode('utf-8')).decode('utf-8') | Dump base64-encoded JSON string (to avoid XSS). |
156,965 | import base64
import cgi
import datetime
import json
import logging
import os
import re
import sys
import traceback
import urllib.parse
from flask import redirect as flask_redirect
from flask import request
from flask import Response
from flask.views import MethodView
from google.cloud import ndb
import jinja2
import jira
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
from libs import auth
from libs import form
from libs import helpers
_MENU_ITEMS = []
class _MenuItem:
"""A menu item used for rendering an item in the main navigation."""
def __init__(self, name, href):
self.name = name
self.href = href
The provided code snippet includes necessary dependencies for implementing the `add_menu` function. Write a Python function `def add_menu(name, href)` to solve the following problem:
Add menu item to the main navigation.
Here is the function:
def add_menu(name, href):
"""Add menu item to the main navigation."""
_MENU_ITEMS.append(_MenuItem(name, href)) | Add menu item to the main navigation. |
156,966 | import base64
import cgi
import datetime
import json
import logging
import os
import re
import sys
import traceback
import urllib.parse
from flask import redirect as flask_redirect
from flask import request
from flask import Response
from flask.views import MethodView
from google.cloud import ndb
import jinja2
import jira
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
from libs import auth
from libs import form
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `make_login_url` function. Write a Python function `def make_login_url(dest_url)` to solve the following problem:
Make the switch account url.
Here is the function:
def make_login_url(dest_url):
"""Make the switch account url."""
return '/login?' + urllib.parse.urlencode({'dest': dest_url}) | Make the switch account url. |
156,967 | import base64
import cgi
import datetime
import json
import logging
import os
import re
import sys
import traceback
import urllib.parse
from flask import redirect as flask_redirect
from flask import request
from flask import Response
from flask.views import MethodView
from google.cloud import ndb
import jinja2
import jira
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
from libs import auth
from libs import form
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `make_logout_url` function. Write a Python function `def make_logout_url(dest_url)` to solve the following problem:
Make the switch account url.
Here is the function:
def make_logout_url(dest_url):
"""Make the switch account url."""
return '/logout?' + urllib.parse.urlencode({
'csrf_token': form.generate_csrf_token(),
'dest': dest_url,
}) | Make the switch account url. |
156,968 | import base64
import cgi
import datetime
import json
import logging
import os
import re
import sys
import traceback
import urllib.parse
from flask import redirect as flask_redirect
from flask import request
from flask import Response
from flask.views import MethodView
from google.cloud import ndb
import jinja2
import jira
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
from libs import auth
from libs import form
from libs import helpers
_SAFE_URL_PATTERN = re.compile(
r'^(?:(?:https?|mailto|ftp):|[^:/?#]*(?:[/?#]|$))', flags=re.IGNORECASE)
The provided code snippet includes necessary dependencies for implementing the `check_redirect_url` function. Write a Python function `def check_redirect_url(url)` to solve the following problem:
Check redirect URL is safe.
Here is the function:
def check_redirect_url(url):
"""Check redirect URL is safe."""
if not _SAFE_URL_PATTERN.match(url):
raise helpers.EarlyExitError('Invalid redirect.', 403) | Check redirect URL is safe. |
156,969 | import datetime
from flask import request
from clusterfuzz._internal.base import tasks
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.datastore import ndb_utils
from handlers import base_handler
from libs import filters
from libs import handler
from libs import helpers
from libs.query import datastore_query
PAGE_SIZE = 10
MORE_LIMIT = 50 - PAGE_SIZE
FILTERS = [
filters.Keyword([], 'keywords', 'q'),
]
def _convert_heartbeats_to_dicts(heartbeats):
"""Format heartbeats for template."""
alive_cutoff = _get_alive_cutoff()
result = []
for heartbeat in heartbeats:
result.append({
'bot_name':
heartbeat.bot_name,
'source_version':
heartbeat.source_version,
'task_payload':
heartbeat.task_payload,
'platform_id':
heartbeat.platform_id,
'task_end_time':
utils.utc_datetime_to_timestamp(heartbeat.task_end_time)
if heartbeat.task_end_time else '',
'last_beat_time':
utils.utc_datetime_to_timestamp(heartbeat.last_beat_time)
if heartbeat.last_beat_time else '',
'alive':
'alive' if heartbeat.last_beat_time > alive_cutoff else 'dead'
})
return result
The provided code snippet includes necessary dependencies for implementing the `get_results` function. Write a Python function `def get_results()` to solve the following problem:
Get results for the bots page.
Here is the function:
def get_results():
"""Get results for the bots page."""
# Return bots sorted alphabetically by bot_name
query = datastore_query.Query(data_types.Heartbeat)
query.order('bot_name', is_desc=False)
params = dict(request.iterparams())
filters.add(query, params, FILTERS)
page = helpers.cast(request.get('page', 1), int, "'page' is not an int.")
items, total_pages, total_items, has_more = query.fetch_page(
page=page, page_size=PAGE_SIZE, projection=None, more_limit=MORE_LIMIT)
items = _convert_heartbeats_to_dicts(items)
helpers.log('Bots', helpers.VIEW_OPERATION)
result = {
'hasMore': has_more,
'items': items,
'page': page,
'pageSize': PAGE_SIZE,
'totalItems': total_items,
'totalPages': total_pages,
}
return result, params | Get results for the bots page. |
156,970 | import datetime
import re
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.metrics import fuzzer_stats
from handlers import base_handler
from libs import handler
from libs import helpers
VALID_DATE_REGEX = re.compile(r'^([0-9\-]+|latest)$')
def _get_project_report_url(job, date):
"""Return url for the report requested."""
project = data_handler.get_project_name(job)
if not project:
return None
if date == 'latest':
date = None
else:
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d').date()
except:
raise helpers.EarlyExitError('Invalid date.', 400)
info = fuzzer_stats.get_coverage_info(project, date)
if not info:
return None
return info.html_report_url
The provided code snippet includes necessary dependencies for implementing the `get_report_url` function. Write a Python function `def get_report_url(report_type, argument, date)` to solve the following problem:
Get report url for a redirect from the coverage report handler.
Here is the function:
def get_report_url(report_type, argument, date):
"""Get report url for a redirect from the coverage report handler."""
# It's very easy to add support for per fuzzer reports, but we don't need it.
if report_type != 'job':
raise helpers.EarlyExitError('Invalid report type.', 400)
job = argument
if not job:
raise helpers.EarlyExitError('Job name cannot be empty.', 400)
if not data_types.Job.VALID_NAME_REGEX.match(job):
raise helpers.EarlyExitError('Invalid job name.', 400)
if not date or not VALID_DATE_REGEX.match(date):
raise helpers.EarlyExitError('Invalid date.', 400)
return _get_project_report_url(job, date) | Get report url for a redirect from the coverage report handler. |
156,971 | import json
from flask import request
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.datastore import ndb_utils
from clusterfuzz._internal.metrics import crash_stats as crash_stats_common
from handlers import base_handler
from libs import crash_access
from libs import crash_stats
from libs import filters
from libs import handler
from libs import helpers
PAGE_SIZE = 10
GROUP_FILTERS = [
filters.Boolean('is_new', 'new'),
]
FILTERS = [
TimeFilter(),
filters.Boolean('security_flag', 'security'),
filters.Boolean('reproducible_flag', 'reproducible'),
filters.String('job_type', 'job'),
filters.String('project', 'project'),
FuzzerFilter(),
PlatformFilter(),
KeywordFilter(),
]
def attach_testcases(rows):
"""Attach testcase to each crash."""
testcases = {}
for index, row in enumerate(rows):
testcases[index] = {
'open_testcase':
query_testcase(
project_name=row['projectName'],
crash_type=row['crashType'],
crash_state=row['crashState'],
security_flag=row['isSecurity'],
is_open=True),
'closed_testcase':
query_testcase(
project_name=row['projectName'],
crash_type=row['crashType'],
crash_state=row['crashState'],
security_flag=row['isSecurity'],
is_open=False)
}
for index, row in enumerate(rows):
testcase = (list(testcases[index]['open_testcase']) or
list(testcases[index]['closed_testcase']) or [None])[0]
if testcase:
testcase = {
'id': testcase.key.id(),
'issueNumber': testcase.bug_information,
'groupIssueNumber': testcase.group_bug_information
}
row['testcase'] = testcase
The provided code snippet includes necessary dependencies for implementing the `get_result` function. Write a Python function `def get_result()` to solve the following problem:
Get the result for the crash stats page.
Here is the function:
def get_result():
"""Get the result for the crash stats page."""
params = dict(request.iterparams())
page = helpers.cast(request.get('page') or 1, int, "'page' is not an int.")
group_by = params.get('group', 'platform')
params['group'] = group_by
sort_by = params.get('sort', 'total_count')
params['sort'] = sort_by
params['number'] = params.get('number', 'count')
# Conditions for individual records.
query = crash_stats.Query()
query.group_by = group_by
query.sort_by = sort_by
crash_access.add_scope(query, params, 'security_flag', 'job_type',
'fuzzer_name')
filters.add(query, params, FILTERS)
# Conditions after grouping.
group_query = crash_stats.Query()
filters.add(group_query, params, GROUP_FILTERS)
try:
total_count, rows = crash_stats.get(
query=query,
group_query=group_query,
offset=(page - 1) * PAGE_SIZE,
limit=PAGE_SIZE)
except ValueError:
raise helpers.EarlyExitError('Invalid filters', 400)
attach_testcases(rows)
helpers.log('CrashStats', helpers.VIEW_OPERATION)
result = {
'totalPages': (total_count // PAGE_SIZE) + 1,
'page': page,
'pageSize': PAGE_SIZE,
'items': rows,
'totalCount': total_count
}
return result, params | Get the result for the crash stats page. |
156,972 | import json
from flask import request
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.datastore import ndb_utils
from clusterfuzz._internal.metrics import crash_stats as crash_stats_common
from handlers import base_handler
from libs import crash_access
from libs import crash_stats
from libs import filters
from libs import handler
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `get_all_platforms` function. Write a Python function `def get_all_platforms()` to solve the following problem:
Get all platforms including parent platform.
Here is the function:
def get_all_platforms():
"""Get all platforms including parent platform."""
items = data_types.Testcase.query(
projection=[data_types.Testcase.platform], distinct=True)
return sorted(
list(
set([item.platform.lower() for item in items if item.platform] +
['android']))) | Get all platforms including parent platform. |
156,973 | from flask import request
from google.cloud import ndb
from clusterfuzz._internal.base import tasks
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.datastore import ndb_utils
from clusterfuzz._internal.fuzzing import fuzzer_selection
from handlers import base_handler
from libs import filters
from libs import form
from libs import gcs
from libs import handler
from libs import helpers
from libs.query import datastore_query
The provided code snippet includes necessary dependencies for implementing the `get_queues` function. Write a Python function `def get_queues()` to solve the following problem:
Return list of task queues.
Here is the function:
def get_queues():
"""Return list of task queues."""
queues = []
for name, display_name in tasks.TASK_QUEUE_DISPLAY_NAMES.items():
queue = {
'name': name,
'display_name': display_name,
}
queues.append(queue)
queues.sort(key=lambda q: q['display_name'])
return queues | Return list of task queues. |
156,974 | from flask import request
from google.cloud import ndb
from clusterfuzz._internal.base import tasks
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.datastore import ndb_utils
from clusterfuzz._internal.fuzzing import fuzzer_selection
from handlers import base_handler
from libs import filters
from libs import form
from libs import gcs
from libs import handler
from libs import helpers
from libs.query import datastore_query
PAGE_SIZE = 10
MORE_LIMIT = 50 - PAGE_SIZE
FILTERS = [
filters.Keyword([], 'keywords', 'q'),
]
def _job_to_dict(job):
"""Return a dict of job items along with associated fuzzers."""
result = job.to_dict()
result['id'] = job.key.id()
# Adding all associated fuzzers with each job.
fuzzers = data_types.Fuzzer.query()
result['fuzzers'] = [
fuzzer.name for fuzzer in fuzzers if job.name in fuzzer.jobs
]
return result
The provided code snippet includes necessary dependencies for implementing the `get_results` function. Write a Python function `def get_results()` to solve the following problem:
Get results for the jobs page.
Here is the function:
def get_results():
"""Get results for the jobs page."""
# Return jobs sorted alphabetically by name
query = datastore_query.Query(data_types.Job)
query.order('name', is_desc=False)
params = dict(request.iterparams())
filters.add(query, params, FILTERS)
page = helpers.cast(request.get('page', 1), int, "'page' is not an int.")
items, total_pages, total_items, has_more = query.fetch_page(
page=page, page_size=PAGE_SIZE, projection=None, more_limit=MORE_LIMIT)
helpers.log('Jobs', helpers.VIEW_OPERATION)
result = {
'hasMore': has_more,
'items': [_job_to_dict(item) for item in items],
'page': page,
'pageSize': PAGE_SIZE,
'totalItems': total_items,
'totalPages': total_pages,
}
return result, params | Get results for the jobs page. |
156,975 | from flask import request
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import db_config
from clusterfuzz._internal.datastore import data_types
from handlers import base_handler
from libs import form
from libs import handler
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `get_value_by_name` function. Write a Python function `def get_value_by_name(item_list, name)` to solve the following problem:
Return value for entry whose name matches the one in item list.
Here is the function:
def get_value_by_name(item_list, name):
"""Return value for entry whose name matches the one in item list."""
for item in item_list:
if item['name'] == name:
return item['value']
return None | Return value for entry whose name matches the one in item list. |
156,976 | import re
import sys
from clusterfuzz._internal.datastore import search_tokenizer
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `is_empty` function. Write a Python function `def is_empty(value)` to solve the following problem:
Determine if the param's value is considered as empty.
Here is the function:
def is_empty(value):
"""Determine if the param's value is considered as empty."""
return not value | Determine if the param's value is considered as empty. |
156,977 | import re
import sys
from clusterfuzz._internal.datastore import search_tokenizer
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `has_params` function. Write a Python function `def has_params(params, filters)` to solve the following problem:
Check if there's any param.
Here is the function:
def has_params(params, filters):
"""Check if there's any param."""
return any(params.get(fltr.param_key) for fltr in filters) | Check if there's any param. |
156,978 | import re
import sys
from clusterfuzz._internal.datastore import search_tokenizer
from libs import helpers
KEYWORD_FIELD_REGEX = (
'(?: +|^)%s:((?:"[^"]*")|(?:\'[^\']*\')|(?:[^ ]*))(?: +|$)')
The provided code snippet includes necessary dependencies for implementing the `extract_keyword_field` function. Write a Python function `def extract_keyword_field(keyword, field)` to solve the following problem:
Extract the value from the keyword given the field and return the new keyword.
Here is the function:
def extract_keyword_field(keyword, field):
"""Extract the value from the keyword given the field and return the new
keyword."""
regex = re.compile(KEYWORD_FIELD_REGEX % field, flags=re.IGNORECASE)
match = re.search(regex, keyword)
if match:
value = match.group(1)
if value.startswith('"') and value.endswith('"'):
value = value.strip('"')
elif value.startswith("'") and value.endswith("'"):
value = value.strip("'")
return re.sub(regex, ' ', keyword), value
return keyword, None | Extract the value from the keyword given the field and return the new keyword. |
156,979 | import re
import sys
from clusterfuzz._internal.datastore import search_tokenizer
from libs import helpers
def get_string(value):
"""Get sanitized string."""
return value.strip()
class SimpleFilter(Filter):
"""A simple filter that reads value from only one key."""
def __init__(self,
field,
param_key,
transformers=None,
required=False,
operator=None):
self.field = field
self.param_key = param_key
self.transformers = transformers or []
self.required = required
self.extras = {}
if operator:
self.extras['operator'] = operator
def add(self, query, params):
"""Set query according to params."""
value = params.get(self.param_key)
if is_empty(value):
if self.required:
raise helpers.EarlyExitError("'%s' is required." % self.param_key, 400)
return
try:
for transformer in self.transformers:
value = transformer(value)
except ValueError:
raise helpers.EarlyExitError(
"Invalid '%s': %s" % (self.param_key, sys.exc_info()[1]), 400)
query.filter(self.field, value, **self.extras)
The provided code snippet includes necessary dependencies for implementing the `String` function. Write a Python function `def String(field, param_key, required=False)` to solve the following problem:
Return a string filter.
Here is the function:
def String(field, param_key, required=False):
"""Return a string filter."""
return SimpleFilter(
field, param_key, transformers=[get_string], required=required) | Return a string filter. |
156,980 | import re
import sys
from clusterfuzz._internal.datastore import search_tokenizer
from libs import helpers
def get_boolean(value):
"""Convert yes/no to boolean or raise Exception."""
if value == 'yes':
return True
if value == 'no':
return False
raise ValueError("The value must be 'yes' or 'no'.")
class SimpleFilter(Filter):
"""A simple filter that reads value from only one key."""
def __init__(self,
field,
param_key,
transformers=None,
required=False,
operator=None):
self.field = field
self.param_key = param_key
self.transformers = transformers or []
self.required = required
self.extras = {}
if operator:
self.extras['operator'] = operator
def add(self, query, params):
"""Set query according to params."""
value = params.get(self.param_key)
if is_empty(value):
if self.required:
raise helpers.EarlyExitError("'%s' is required." % self.param_key, 400)
return
try:
for transformer in self.transformers:
value = transformer(value)
except ValueError:
raise helpers.EarlyExitError(
"Invalid '%s': %s" % (self.param_key, sys.exc_info()[1]), 400)
query.filter(self.field, value, **self.extras)
The provided code snippet includes necessary dependencies for implementing the `Boolean` function. Write a Python function `def Boolean(field, param_key, required=False)` to solve the following problem:
Return a boolean filter that converts yes/no to True/False.
Here is the function:
def Boolean(field, param_key, required=False):
"""Return a boolean filter that converts yes/no to True/False."""
return SimpleFilter(
field, param_key, transformers=[get_boolean], required=required) | Return a boolean filter that converts yes/no to True/False. |
156,981 | import re
import sys
from clusterfuzz._internal.datastore import search_tokenizer
from libs import helpers
def get_boolean(value):
"""Convert yes/no to boolean or raise Exception."""
if value == 'yes':
return True
if value == 'no':
return False
raise ValueError("The value must be 'yes' or 'no'.")
class SimpleFilter(Filter):
"""A simple filter that reads value from only one key."""
def __init__(self,
field,
param_key,
transformers=None,
required=False,
operator=None):
self.field = field
self.param_key = param_key
self.transformers = transformers or []
self.required = required
self.extras = {}
if operator:
self.extras['operator'] = operator
def add(self, query, params):
"""Set query according to params."""
value = params.get(self.param_key)
if is_empty(value):
if self.required:
raise helpers.EarlyExitError("'%s' is required." % self.param_key, 400)
return
try:
for transformer in self.transformers:
value = transformer(value)
except ValueError:
raise helpers.EarlyExitError(
"Invalid '%s': %s" % (self.param_key, sys.exc_info()[1]), 400)
query.filter(self.field, value, **self.extras)
The provided code snippet includes necessary dependencies for implementing the `NegativeBoolean` function. Write a Python function `def NegativeBoolean(field, param_key, required=False)` to solve the following problem:
Return a boolean filter that converts yes/no to False/True.
Here is the function:
def NegativeBoolean(field, param_key, required=False):
"""Return a boolean filter that converts yes/no to False/True."""
return SimpleFilter(
field,
param_key,
transformers=[get_boolean, lambda v: not v],
required=required) | Return a boolean filter that converts yes/no to False/True. |
156,982 | import re
import sys
from clusterfuzz._internal.datastore import search_tokenizer
from libs import helpers
class SimpleFilter(Filter):
"""A simple filter that reads value from only one key."""
def __init__(self,
field,
param_key,
transformers=None,
required=False,
operator=None):
self.field = field
self.param_key = param_key
self.transformers = transformers or []
self.required = required
self.extras = {}
if operator:
self.extras['operator'] = operator
def add(self, query, params):
"""Set query according to params."""
value = params.get(self.param_key)
if is_empty(value):
if self.required:
raise helpers.EarlyExitError("'%s' is required." % self.param_key, 400)
return
try:
for transformer in self.transformers:
value = transformer(value)
except ValueError:
raise helpers.EarlyExitError(
"Invalid '%s': %s" % (self.param_key, sys.exc_info()[1]), 400)
query.filter(self.field, value, **self.extras)
The provided code snippet includes necessary dependencies for implementing the `Int` function. Write a Python function `def Int(field, param_key, required=False, operator=None)` to solve the following problem:
return an int filter.
Here is the function:
def Int(field, param_key, required=False, operator=None):
"""return an int filter."""
return SimpleFilter(
field,
param_key,
transformers=[int],
required=required,
operator=operator) | return an int filter. |
156,983 | import logging
import sys
import traceback
from clusterfuzz._internal.base import errors
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.issue_management import issue_tracker_utils
from libs import auth
class EarlyExitError(Exception):
"""Serve as an exception for exiting a handler's method early."""
def __init__(self, message, status, trace_dump=None):
super().__init__(message)
self.status = status
self.trace_dump = trace_dump
if self.trace_dump is None:
if sys.exc_info()[0] is not None:
self.trace_dump = traceback.format_exc()
else:
self.trace_dump = ''.join(traceback.format_stack())
def to_dict(self):
"""Build dict that is used for JSON serialisation."""
return {
'traceDump': self.trace_dump,
'message': str(self),
'email': get_user_email(),
'status': self.status,
'type': self.__class__.__name__
}
The provided code snippet includes necessary dependencies for implementing the `get_testcase` function. Write a Python function `def get_testcase(testcase_id)` to solve the following problem:
Get a valid testcase or raise EarlyExitError.
Here is the function:
def get_testcase(testcase_id):
"""Get a valid testcase or raise EarlyExitError."""
testcase = None
try:
testcase = data_handler.get_testcase_by_id(testcase_id)
except errors.InvalidTestcaseError:
pass
if not testcase:
raise EarlyExitError("Testcase (id=%s) doesn't exist" % testcase_id, 404)
return testcase | Get a valid testcase or raise EarlyExitError. |
156,984 | import logging
import sys
import traceback
from clusterfuzz._internal.base import errors
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.issue_management import issue_tracker_utils
from libs import auth
The provided code snippet includes necessary dependencies for implementing the `should_render_json` function. Write a Python function `def should_render_json(accepts, content_type)` to solve the following problem:
Check accepts and content_type to see if we should render JSON.
Here is the function:
def should_render_json(accepts, content_type):
"""Check accepts and content_type to see if we should render JSON."""
return 'application/json' in accepts or content_type == 'application/json' | Check accepts and content_type to see if we should render JSON. |
156,985 | import logging
import sys
import traceback
from clusterfuzz._internal.base import errors
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.issue_management import issue_tracker_utils
from libs import auth
class _DoNotCatchException(Exception):
"""Serve as a dummy exception to avoid catching any exception."""
class EarlyExitError(Exception):
"""Serve as an exception for exiting a handler's method early."""
def __init__(self, message, status, trace_dump=None):
super().__init__(message)
self.status = status
self.trace_dump = trace_dump
if self.trace_dump is None:
if sys.exc_info()[0] is not None:
self.trace_dump = traceback.format_exc()
else:
self.trace_dump = ''.join(traceback.format_stack())
def to_dict(self):
"""Build dict that is used for JSON serialisation."""
return {
'traceDump': self.trace_dump,
'message': str(self),
'email': get_user_email(),
'status': self.status,
'type': self.__class__.__name__
}
def _is_not_empty(value):
"""Check if value is empty value or a tuple of empty values."""
if isinstance(value, tuple):
return any(bool(elem) for elem in value)
return bool(value)
The provided code snippet includes necessary dependencies for implementing the `get_or_exit` function. Write a Python function `def get_or_exit(fn, not_found_message, error_message, not_found_exception=_DoNotCatchException, non_empty_fn=_is_not_empty)` to solve the following problem:
Get an entity using `fn`. If the returning entity is nothing (e.g. None or a tuple on Nones), it raises 404. Args: fn: the function to get an entity. It's a function because fn(..) might raise an exception. not_found_message: the 404 HTTP error is raised with not_found_message for an empty entity. error_message: the 500 HTTP error is raised with error_message for any other exception from fn(..). not_found_exception: the type of exception that will be considered as 'not found' as opposed to other errors.
Here is the function:
def get_or_exit(fn,
not_found_message,
error_message,
not_found_exception=_DoNotCatchException,
non_empty_fn=_is_not_empty):
"""Get an entity using `fn`. If the returning entity is nothing (e.g. None or
a tuple on Nones), it raises 404.
Args:
fn: the function to get an entity. It's a function because fn(..) might
raise an exception.
not_found_message: the 404 HTTP error is raised with not_found_message for
an empty entity.
error_message: the 500 HTTP error is raised with error_message for any
other exception from fn(..).
not_found_exception: the type of exception that will be considered as
'not found' as opposed to other errors."""
result = None
try:
result = fn()
except not_found_exception:
pass
except Exception:
raise EarlyExitError(
'%s (%s: %s)' % (error_message, sys.exc_info()[0], str(
sys.exc_info()[1])), 500)
if non_empty_fn(result):
return result
raise EarlyExitError(not_found_message, 404) | Get an entity using `fn`. If the returning entity is nothing (e.g. None or a tuple on Nones), it raises 404. Args: fn: the function to get an entity. It's a function because fn(..) might raise an exception. not_found_message: the 404 HTTP error is raised with not_found_message for an empty entity. error_message: the 500 HTTP error is raised with error_message for any other exception from fn(..). not_found_exception: the type of exception that will be considered as 'not found' as opposed to other errors. |
156,986 | import logging
import sys
import traceback
from clusterfuzz._internal.base import errors
from clusterfuzz._internal.datastore import data_handler
from clusterfuzz._internal.issue_management import issue_tracker_utils
from libs import auth
class EarlyExitError(Exception):
"""Serve as an exception for exiting a handler's method early."""
def __init__(self, message, status, trace_dump=None):
super().__init__(message)
self.status = status
self.trace_dump = trace_dump
if self.trace_dump is None:
if sys.exc_info()[0] is not None:
self.trace_dump = traceback.format_exc()
else:
self.trace_dump = ''.join(traceback.format_stack())
def to_dict(self):
"""Build dict that is used for JSON serialisation."""
return {
'traceDump': self.trace_dump,
'message': str(self),
'email': get_user_email(),
'status': self.status,
'type': self.__class__.__name__
}
The provided code snippet includes necessary dependencies for implementing the `get_integer_key` function. Write a Python function `def get_integer_key(request)` to solve the following problem:
Convenience function for getting an integer datastore key ID.
Here is the function:
def get_integer_key(request):
"""Convenience function for getting an integer datastore key ID."""
key = request.get('key')
try:
return int(key)
except (ValueError, KeyError):
raise EarlyExitError('Invalid key format.', 400) | Convenience function for getting an integer datastore key ID. |
156,987 | import datetime
import functools
import json
import re
from flask import g
from flask import make_response
from flask import request
import google.auth
from google.auth.transport import requests as google_requests
from google.oauth2 import id_token
import requests
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import pubsub
from clusterfuzz._internal.system import environment
from libs import access
from libs import auth
from libs import csp
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `cron` function. Write a Python function `def cron()` to solve the following problem:
Wrap a handler with cron.
Here is the function:
def cron():
"""Wrap a handler with cron."""
def decorator(func):
"""Decorator."""
@functools.wraps(func)
def wrapper(self):
"""Wrapper."""
if not self.is_cron():
raise helpers.AccessDeniedError('You are not a cron.')
result = func(self)
if result is None:
return 'OK'
return result
return wrapper
return decorator | Wrap a handler with cron. |
156,988 | import datetime
import functools
import json
import re
from flask import g
from flask import make_response
from flask import request
import google.auth
from google.auth.transport import requests as google_requests
from google.oauth2 import id_token
import requests
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import pubsub
from clusterfuzz._internal.system import environment
from libs import access
from libs import auth
from libs import csp
from libs import helpers
def check_admin_access(func):
"""Wrap a handler with admin checking.
This decorator must be below post(..) and get(..) when used.
"""
def wrapper(self):
"""Wrapper."""
if not auth.is_current_user_admin():
raise helpers.AccessDeniedError('Admin access is required.')
return func(self)
return wrapper
The provided code snippet includes necessary dependencies for implementing the `check_admin_access_if_oss_fuzz` function. Write a Python function `def check_admin_access_if_oss_fuzz(func)` to solve the following problem:
Wrap a handler with an admin check if this is OSS-Fuzz. This decorator must be below post(..) and get(..) when used.
Here is the function:
def check_admin_access_if_oss_fuzz(func):
"""Wrap a handler with an admin check if this is OSS-Fuzz.
This decorator must be below post(..) and get(..) when used.
"""
@functools.wraps(func)
def wrapper(self):
"""Wrapper."""
if utils.is_oss_fuzz():
return check_admin_access(func)(self)
return func(self)
return wrapper | Wrap a handler with an admin check if this is OSS-Fuzz. This decorator must be below post(..) and get(..) when used. |
156,989 | import datetime
import functools
import json
import re
from flask import g
from flask import make_response
from flask import request
import google.auth
from google.auth.transport import requests as google_requests
from google.oauth2 import id_token
import requests
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import pubsub
from clusterfuzz._internal.system import environment
from libs import access
from libs import auth
from libs import csp
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `unsupported_on_local_server` function. Write a Python function `def unsupported_on_local_server(func)` to solve the following problem:
Wrap a handler to raise error when running in local App Engine development environment. This decorator must be below post(..) and get(..) when used.
Here is the function:
def unsupported_on_local_server(func):
"""Wrap a handler to raise error when running in local App Engine
development environment.
This decorator must be below post(..) and get(..) when used.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
"""Wrapper."""
if environment.is_running_on_app_engine_development():
raise helpers.EarlyExitError(
'This feature is not available in local App Engine Development '
'environment.', 400)
return func(self, *args, **kwargs)
return wrapper | Wrap a handler to raise error when running in local App Engine development environment. This decorator must be below post(..) and get(..) when used. |
156,990 | import datetime
import functools
import json
import re
from flask import g
from flask import make_response
from flask import request
import google.auth
from google.auth.transport import requests as google_requests
from google.oauth2 import id_token
import requests
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import pubsub
from clusterfuzz._internal.system import environment
from libs import access
from libs import auth
from libs import csp
from libs import helpers
CLUSTERFUZZ_AUTHORIZATION_HEADER = 'x-clusterfuzz-authorization'
CLUSTERFUZZ_AUTHORIZATION_IDENTITY = 'x-clusterfuzz-identity'
def get_email_and_access_token(authorization):
"""Get user email from the request.
See: https://developers.google.com/identity/protocols/OAuth2InstalledApp
"""
if not authorization.startswith(BEARER_PREFIX):
raise helpers.UnauthorizedError(
'The Authorization header is invalid. It should have been started with'
" '%s'." % BEARER_PREFIX)
access_token = authorization.split(' ')[1]
response = requests.get(
'https://www.googleapis.com/oauth2/v3/tokeninfo',
params={'access_token': access_token},
timeout=HTTP_GET_TIMEOUT_SECS)
if response.status_code != 200:
raise helpers.UnauthorizedError(
f'Failed to authorize. The Authorization header ({authorization}) '
'might be invalid.')
try:
data = json.loads(response.text)
# Whitelist service accounts. They have different client IDs (or aud).
# Therefore, we check against their email directly.
if data.get('email_verified') and data.get('email') in _auth_config().get(
'whitelisted_oauth_emails', default=[]):
return data['email'], authorization
# Validate that this is an explicitly whitelisted client ID.
whitelisted_client_ids = _auth_config().get(
'whitelisted_oauth_client_ids', default=[])
if data.get('aud') not in whitelisted_client_ids:
raise helpers.UnauthorizedError(
"The access token doesn't belong to one of the allowed OAuth clients"
': %s.' % response.text)
if not data.get('email_verified'):
raise helpers.UnauthorizedError('The email (%s) is not verified: %s.' %
(data.get('email'), response.text))
return data['email'], authorization
except (KeyError, ValueError) as e:
raise helpers.EarlyExitError(
'Parsing the JSON response body failed: %s' % response.text, 500) from e
def get(response_content_type):
"""Wrap a GET handler and set response's content type."""
def decorator(func):
"""Decorator."""
def wrapper(self, *args, **kwargs):
"""Wrapper."""
if response_content_type == JSON:
self.is_json = True
extend_request(request, request.args)
response = make_response(func(self, *args, **kwargs))
if response_content_type == JSON:
response.headers['Content-Type'] = 'application/json'
elif response_content_type == TEXT:
response.headers['Content-Type'] = 'text/plain'
elif response_content_type == HTML:
# Don't enforce content security policies in local development mode.
if not environment.is_running_on_app_engine_development():
response.headers['Content-Security-Policy'] = csp.get_default()
return response
return wrapper
return decorator
The provided code snippet includes necessary dependencies for implementing the `oauth` function. Write a Python function `def oauth(func)` to solve the following problem:
Wrap a handler with OAuth authentication by reading the Authorization header and getting user email.
Here is the function:
def oauth(func):
"""Wrap a handler with OAuth authentication by reading the Authorization
header and getting user email.
"""
@functools.wraps(func)
def wrapper(self):
"""Wrapper."""
auth_header = request.headers.get('Authorization')
if auth_header:
email, returned_auth_header = get_email_and_access_token(auth_header)
setattr(g, '_oauth_email', email)
response = make_response(func(self))
response.headers[CLUSTERFUZZ_AUTHORIZATION_HEADER] = str(
returned_auth_header)
response.headers[CLUSTERFUZZ_AUTHORIZATION_IDENTITY] = str(email)
return response
return func(self)
return wrapper | Wrap a handler with OAuth authentication by reading the Authorization header and getting user email. |
156,991 | import datetime
import functools
import json
import re
from flask import g
from flask import make_response
from flask import request
import google.auth
from google.auth.transport import requests as google_requests
from google.oauth2 import id_token
import requests
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import pubsub
from clusterfuzz._internal.system import environment
from libs import access
from libs import auth
from libs import csp
from libs import helpers
BEARER_PREFIX = 'Bearer '
def get(response_content_type):
"""Wrap a GET handler and set response's content type."""
def decorator(func):
"""Decorator."""
def wrapper(self, *args, **kwargs):
"""Wrapper."""
if response_content_type == JSON:
self.is_json = True
extend_request(request, request.args)
response = make_response(func(self, *args, **kwargs))
if response_content_type == JSON:
response.headers['Content-Type'] = 'application/json'
elif response_content_type == TEXT:
response.headers['Content-Type'] = 'text/plain'
elif response_content_type == HTML:
# Don't enforce content security policies in local development mode.
if not environment.is_running_on_app_engine_development():
response.headers['Content-Security-Policy'] = csp.get_default()
return response
return wrapper
return decorator
The provided code snippet includes necessary dependencies for implementing the `pubsub_push` function. Write a Python function `def pubsub_push(func)` to solve the following problem:
Wrap a handler with pubsub push authentication.
Here is the function:
def pubsub_push(func):
"""Wrap a handler with pubsub push authentication."""
@functools.wraps(func)
def wrapper(self):
"""Wrapper."""
try:
bearer_token = request.headers.get('Authorization', '')
if not bearer_token.startswith(BEARER_PREFIX):
raise helpers.UnauthorizedError('Missing or invalid bearer token.')
token = bearer_token.split(' ')[1]
claim = id_token.verify_oauth2_token(token, google_requests.Request())
except google.auth.exceptions.GoogleAuthError as e:
raise helpers.UnauthorizedError('Invalid ID token.') from e
if (not claim.get('email_verified') or
claim.get('email') != utils.service_account_email()):
raise helpers.UnauthorizedError('Invalid ID token.')
message = pubsub.raw_message_to_message(json.loads(request.data.decode()))
return func(self, message)
return wrapper | Wrap a handler with pubsub push authentication. |
156,992 | import datetime
import functools
import json
import re
from flask import g
from flask import make_response
from flask import request
import google.auth
from google.auth.transport import requests as google_requests
from google.oauth2 import id_token
import requests
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import pubsub
from clusterfuzz._internal.system import environment
from libs import access
from libs import auth
from libs import csp
from libs import helpers
The provided code snippet includes necessary dependencies for implementing the `check_user_access` function. Write a Python function `def check_user_access(need_privileged_access)` to solve the following problem:
Wrap a handler with check_user_access. This decorator must be below post(..) and get(..) when used.
Here is the function:
def check_user_access(need_privileged_access):
"""Wrap a handler with check_user_access.
This decorator must be below post(..) and get(..) when used.
"""
def decorator(func):
"""Decorator."""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
"""Wrapper."""
if not access.has_access(need_privileged_access=need_privileged_access):
raise helpers.AccessDeniedError()
return func(self, *args, **kwargs)
return wrapper
return decorator | Wrap a handler with check_user_access. This decorator must be below post(..) and get(..) when used. |
156,993 | import datetime
import functools
import json
import re
from flask import g
from flask import make_response
from flask import request
import google.auth
from google.auth.transport import requests as google_requests
from google.oauth2 import id_token
import requests
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import pubsub
from clusterfuzz._internal.system import environment
from libs import access
from libs import auth
from libs import csp
from libs import helpers
def get(response_content_type):
"""Wrap a GET handler and set response's content type."""
def decorator(func):
"""Decorator."""
def wrapper(self, *args, **kwargs):
"""Wrapper."""
if response_content_type == JSON:
self.is_json = True
extend_request(request, request.args)
response = make_response(func(self, *args, **kwargs))
if response_content_type == JSON:
response.headers['Content-Type'] = 'application/json'
elif response_content_type == TEXT:
response.headers['Content-Type'] = 'text/plain'
elif response_content_type == HTML:
# Don't enforce content security policies in local development mode.
if not environment.is_running_on_app_engine_development():
response.headers['Content-Security-Policy'] = csp.get_default()
return response
return wrapper
return decorator
The provided code snippet includes necessary dependencies for implementing the `check_testcase_access` function. Write a Python function `def check_testcase_access(func)` to solve the following problem:
Wrap a handler with check_testcase_access. It expects the param `testcaseId`. And it expects func to have testcase as its first argument. This decorator must be below post(..) and get(..) when used.
Here is the function:
def check_testcase_access(func):
"""Wrap a handler with check_testcase_access.
It expects the param
`testcaseId`. And it expects func to have testcase as its first argument.
This decorator must be below post(..) and get(..) when used.
"""
@functools.wraps(func)
def wrapper(self):
"""Wrapper."""
testcase_id = helpers.cast(
request.get('testcaseId'), int,
"The param 'testcaseId' is not a number.")
testcase = access.check_access_and_get_testcase(testcase_id)
return func(self, testcase)
return wrapper | Wrap a handler with check_testcase_access. It expects the param `testcaseId`. And it expects func to have testcase as its first argument. This decorator must be below post(..) and get(..) when used. |
156,994 | import datetime
import functools
import json
import re
from flask import g
from flask import make_response
from flask import request
import google.auth
from google.auth.transport import requests as google_requests
from google.oauth2 import id_token
import requests
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import pubsub
from clusterfuzz._internal.system import environment
from libs import access
from libs import auth
from libs import csp
from libs import helpers
def _auth_config():
"""Return a config with auth root."""
global _auth_config_obj
if not _auth_config_obj:
_auth_config_obj = local_config.AuthConfig()
return _auth_config_obj
def get(response_content_type):
"""Wrap a GET handler and set response's content type."""
def decorator(func):
"""Decorator."""
def wrapper(self, *args, **kwargs):
"""Wrapper."""
if response_content_type == JSON:
self.is_json = True
extend_request(request, request.args)
response = make_response(func(self, *args, **kwargs))
if response_content_type == JSON:
response.headers['Content-Type'] = 'application/json'
elif response_content_type == TEXT:
response.headers['Content-Type'] = 'text/plain'
elif response_content_type == HTML:
# Don't enforce content security policies in local development mode.
if not environment.is_running_on_app_engine_development():
response.headers['Content-Security-Policy'] = csp.get_default()
return response
return wrapper
return decorator
The provided code snippet includes necessary dependencies for implementing the `allowed_cors` function. Write a Python function `def allowed_cors(func)` to solve the following problem:
Wrap a handler with 'Access-Control-Allow-Origin to allow cross-domain AJAX calls.
Here is the function:
def allowed_cors(func):
"""Wrap a handler with 'Access-Control-Allow-Origin to allow cross-domain
AJAX calls."""
@functools.wraps(func)
def wrapper(self):
"""Wrapper."""
origin = request.headers.get('Origin')
whitelisted_cors_urls = _auth_config().get('whitelisted_cors_urls')
response = make_response(func(self))
if origin and whitelisted_cors_urls:
for domain_regex in whitelisted_cors_urls:
if re.match(domain_regex, origin):
response.headers['Access-Control-Allow-Origin'] = origin
response.headers['Vary'] = 'Origin'
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Allow-Methods'] = (
'GET,OPTIONS,POST')
response.headers['Access-Control-Allow-Headers'] = (
'Accept,Authorization,Content-Type')
response.headers['Access-Control-Max-Age'] = '3600'
break
return response
return wrapper | Wrap a handler with 'Access-Control-Allow-Origin to allow cross-domain AJAX calls. |
156,995 | import datetime
import functools
import json
import re
from flask import g
from flask import make_response
from flask import request
import google.auth
from google.auth.transport import requests as google_requests
from google.oauth2 import id_token
import requests
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import pubsub
from clusterfuzz._internal.system import environment
from libs import access
from libs import auth
from libs import csp
from libs import helpers
JSON = 'json'
FORM = 'form'
HTML = 'html'
TEXT = 'text'
def extend_request(req, params):
"""Extends a request."""
def _iterparams():
yield from params.items()
def _get(key, default_value=None):
"""Return the value of the key or the default value."""
return params.get(key, default_value)
req.get = _get
req.iterparams = _iterparams
def extend_json_request(req):
"""Extends a request to support JSON."""
try:
params = json.loads(req.data)
except ValueError as e:
raise helpers.EarlyExitError(
'Parsing the JSON request body failed: %s' % req.data, 400) from e
extend_request(req, params)
The provided code snippet includes necessary dependencies for implementing the `post` function. Write a Python function `def post(request_content_type, response_content_type)` to solve the following problem:
Wrap a POST handler, parse request, and set response's content type.
Here is the function:
def post(request_content_type, response_content_type):
"""Wrap a POST handler, parse request, and set response's content type."""
def decorator(func):
"""Decorator."""
@functools.wraps(func)
def wrapper(self):
"""Wrapper."""
if response_content_type == JSON:
self.is_json = True
if request_content_type == JSON:
extend_json_request(request)
elif request_content_type == FORM:
extend_request(request, request.form)
else:
extend_request(request, request.args)
response = make_response(func(self))
if response_content_type == JSON:
response.headers['Content-Type'] = 'application/json'
elif response_content_type == TEXT:
response.headers['Content-Type'] = 'text/plain'
elif response_content_type == HTML:
# Don't enforce content security policies in local development mode.
if not environment.is_running_on_app_engine_development():
response.headers['Content-Security-Policy'] = csp.get_default()
return response
return wrapper
return decorator | Wrap a POST handler, parse request, and set response's content type. |
156,996 | import datetime
import functools
import json
import re
from flask import g
from flask import make_response
from flask import request
import google.auth
from google.auth.transport import requests as google_requests
from google.oauth2 import id_token
import requests
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import pubsub
from clusterfuzz._internal.system import environment
from libs import access
from libs import auth
from libs import csp
from libs import helpers
def get(response_content_type):
"""Wrap a GET handler and set response's content type."""
def decorator(func):
"""Decorator."""
def wrapper(self, *args, **kwargs):
"""Wrapper."""
if response_content_type == JSON:
self.is_json = True
extend_request(request, request.args)
response = make_response(func(self, *args, **kwargs))
if response_content_type == JSON:
response.headers['Content-Type'] = 'application/json'
elif response_content_type == TEXT:
response.headers['Content-Type'] = 'text/plain'
elif response_content_type == HTML:
# Don't enforce content security policies in local development mode.
if not environment.is_running_on_app_engine_development():
response.headers['Content-Security-Policy'] = csp.get_default()
return response
return wrapper
return decorator
The provided code snippet includes necessary dependencies for implementing the `require_csrf_token` function. Write a Python function `def require_csrf_token(func)` to solve the following problem:
Wrap a handler to require a valid CSRF token.
Here is the function:
def require_csrf_token(func):
"""Wrap a handler to require a valid CSRF token."""
def wrapper(self, *args, **kwargs):
"""Check to see if this handler has a valid CSRF token provided to it."""
token_value = request.get('csrf_token')
user = auth.get_current_user()
if not user:
raise helpers.AccessDeniedError('Not logged in.')
query = data_types.CSRFToken.query(
data_types.CSRFToken.value == token_value,
data_types.CSRFToken.user_email == user.email)
token = query.get()
if not token:
raise helpers.AccessDeniedError('Invalid CSRF token.')
# Make sure that the token is not expired.
if token.expiration_time < datetime.datetime.utcnow():
token.key.delete()
raise helpers.AccessDeniedError('Expired CSRF token.')
return func(self, *args, **kwargs)
return wrapper | Wrap a handler to require a valid CSRF token. |
156,997 | import base64
import collections
import datetime
import json
import time
import urllib.parse
import googleapiclient
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.google_cloud_utils import blobs
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
STORAGE_URL = 'https://storage.googleapis.com/%s'
DEFAULT_URL_VALID_SECONDS = 30 * 60
def sign_data(data):
"""Sign data with the default App Engine service account."""
iam = googleapiclient.discovery.build('iamcredentials', 'v1')
service_account = 'projects/-/serviceAccounts/' + utils.service_account_email(
)
response = iam.projects().serviceAccounts().signBlob( # pylint: disable=no-member
name=service_account,
body={
'delegates': [],
'payload': base64.b64encode(data).decode('utf-8'),
}).execute()
try:
return base64.b64decode(response['signedBlob'])
except Exception as e:
raise GcsError('Invalid response: ' + str(e))
def _get_expiration_time(expiry_seconds):
"""Return a timestamp |expiry_seconds| from now."""
return int(time.time() + expiry_seconds)
The provided code snippet includes necessary dependencies for implementing the `get_signed_url` function. Write a Python function `def get_signed_url(bucket_name, path, method='GET', expiry=DEFAULT_URL_VALID_SECONDS)` to solve the following problem:
Return a signed url.
Here is the function:
def get_signed_url(bucket_name,
path,
method='GET',
expiry=DEFAULT_URL_VALID_SECONDS):
"""Return a signed url."""
timestamp = _get_expiration_time(expiry)
blob = '%s\n\n\n%d\n/%s/%s' % (method, timestamp, bucket_name, path)
local_server = environment.get_value('LOCAL_GCS_SERVER_HOST')
if local_server:
url = local_server + '/' + bucket_name
signed_blob = b'SIGNATURE'
service_account_name = 'service_account'
else:
url = STORAGE_URL % bucket_name
signed_blob = sign_data(blob.encode('utf-8'))
service_account_name = utils.service_account_email()
params = {
'GoogleAccessId': service_account_name,
'Expires': timestamp,
'Signature': base64.b64encode(signed_blob).decode('utf-8'),
}
return str(url + '/' + path + '?' + urllib.parse.urlencode(params)) | Return a signed url. |
156,998 | import base64
import collections
import datetime
import json
import time
import urllib.parse
import googleapiclient
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.google_cloud_utils import blobs
from clusterfuzz._internal.google_cloud_utils import storage
from clusterfuzz._internal.system import environment
def prepare_upload(bucket_name, path, expiry=DEFAULT_URL_VALID_SECONDS):
"""Prepare a signed GCS upload."""
expiration_time = (
datetime.datetime.utcnow() + datetime.timedelta(seconds=expiry))
conditions = [
{
'key': path
},
{
'bucket': bucket_name
},
['content-length-range', 0, MAX_UPLOAD_SIZE],
['starts-with', '$x-goog-meta-filename', ''],
]
policy = base64.b64encode(
json.dumps({
'expiration': expiration_time.isoformat() + 'Z',
'conditions': conditions,
}).encode('utf-8'))
local_server = environment.get_value('LOCAL_GCS_SERVER_HOST')
if local_server:
url = local_server
signature = b'SIGNATURE'
service_account_name = 'service_account'
else:
url = STORAGE_URL % bucket_name
signature = base64.b64encode(sign_data(policy))
service_account_name = utils.service_account_email()
return GcsUpload(url, bucket_name, path, service_account_name, policy,
signature)
The provided code snippet includes necessary dependencies for implementing the `prepare_blob_upload` function. Write a Python function `def prepare_blob_upload()` to solve the following problem:
Prepare a signed GCS blob upload.
Here is the function:
def prepare_blob_upload():
"""Prepare a signed GCS blob upload."""
return prepare_upload(storage.blobs_bucket(), blobs.generate_new_blob_name()) | Prepare a signed GCS blob upload. |
156,999 | from google.cloud.ndb import exceptions
from libs.query import base
The provided code snippet includes necessary dependencies for implementing the `_get_key_fn` function. Write a Python function `def _get_key_fn(attribute_name)` to solve the following problem:
Return the function to get attr of an item. This is used in sorting.
Here is the function:
def _get_key_fn(attribute_name):
"""Return the function to get attr of an item. This is used in sorting."""
def get_key(item):
return getattr(item, attribute_name)
return get_key | Return the function to get attr of an item. This is used in sorting. |
157,000 | from google.cloud.ndb import exceptions
from libs.query import base
The provided code snippet includes necessary dependencies for implementing the `compute_projection` function. Write a Python function `def compute_projection(projection, order_property)` to solve the following problem:
Set projection.
Here is the function:
def compute_projection(projection, order_property):
"""Set projection."""
if projection is None:
return None
combined_projection = set(projection)
combined_projection.add(order_property)
return list(combined_projection) | Set projection. |
157,001 | from google.cloud.ndb import exceptions
from libs.query import base
class _KeyQuery:
"""Query only keys. It supports an OR condition."""
def __init__(self, model):
self.model = model
self.or_filters = []
self.filters = []
self.order_property = None
self.order_desc = False
def union(self, *queries):
"""Specify the OR condition."""
self.or_filters.append(queries)
def filter(self, operator, prop, value):
"""Specify the filter."""
if operator == 'IN':
subqueries = []
for v in value:
q = _KeyQuery(self.model)
q.filter('=', prop, v)
subqueries.append(q)
self.union(*subqueries)
else:
self.filters.append((operator, prop, value))
def order(self, prop, is_desc):
"""Specify the order."""
self.order_property, self.order_desc = prop, is_desc
def flatten(self):
"""Flatten self into multiple queries if or_filters is not empty."""
if not self.or_filters:
return [self]
for qs in self.or_filters:
for q in qs:
q.order(self.order_property, self.order_desc)
queries = []
for query in self.or_filters[0]:
for q in query.flatten():
queries.append(q)
for or_queries in self.or_filters[1:]:
new_queries = []
for oq in or_queries:
for fq in oq.flatten():
for q in queries:
new_queries.append(_combine(q, fq))
queries = new_queries
for q in queries:
for (prop_op, prop, value) in self.filters:
q.filter(prop_op, prop, value)
return queries
def to_datastore_query(self):
"""Return the corresponding datastore query."""
assert not self.or_filters
query = self.model.query()
properties = self.model._properties # pylint: disable=protected-access
for (prop_op, prop, value) in self.filters:
if prop_op == '=':
filter_func = properties[prop].__eq__
elif prop_op == '!=':
filter_func = properties[prop].__ne__
elif prop_op == '<':
filter_func = properties[prop].__le__
elif prop_op == '>':
filter_func = properties[prop].__gt__
elif prop_op == '<=':
filter_func = properties[prop].__le__
elif prop_op == '>=':
filter_func = properties[prop].__ge__
query = query.filter(filter_func(value))
if self.order_property:
order_property = properties[self.order_property]
if self.order_desc:
order_property = -order_property
query = query.order(order_property)
return query
def _build_runs(self, total):
"""Construct queries and run them."""
queries = self.flatten()
runs = []
# TODO(tanin): Improve the speed by detecting if we need union (or OR).
# If we don't need union, we can set keys_only=True and projection=None in
# order to improve speed; it's likely to be 2x faster.
for q in queries:
runs.append(
_Run(
q.to_datastore_query(),
keys_only=False,
projection=[self.order_property],
limit=total))
return runs
def _get_total_count(self, runs, offset, limit, items, more_limit):
"""Get total count by querying more items."""
max_total_count = offset + limit + more_limit
current_count = len(items)
if current_count > max_total_count:
return max_total_count, True
more_limit += 1
more_runs = []
for run in runs:
try:
cursor = run.result.cursor_after()
except exceptions.BadArgumentError:
# iterator had no results.
cursor = None
more_runs.append(
_Run(
run.query,
start_cursor=cursor,
keys_only=True,
projection=None,
limit=more_limit))
keys = {item.key.id() for item in items}
for run in more_runs:
for key in run.result:
keys.add(key)
total_count = min(len(keys), max_total_count)
has_more = len(keys) >= max_total_count
return total_count, has_more
def fetch(self, offset, limit, more_limit):
"""Construct multiple queries based on the or_filters, query them,
combined the results, return items and total_count."""
runs = self._build_runs(offset + limit)
items = {}
for run in runs:
for item in run.result:
if item.key.id() not in items:
items[item.key.id()] = item
items = sorted(
list(items.values()),
reverse=self.order_desc,
key=_get_key_fn(self.order_property))
total_count, has_more = self._get_total_count(runs, offset, limit, items,
more_limit)
return items[offset:(offset + limit)], total_count, has_more
The provided code snippet includes necessary dependencies for implementing the `_combine` function. Write a Python function `def _combine(q1, q2)` to solve the following problem:
Combine KeyQuery q1 and q2. We ignore or_filters because we assume q1 and q2 are flat. In other words, they are results of _KeyQuery.flatten(..).
Here is the function:
def _combine(q1, q2):
"""Combine KeyQuery q1 and q2. We ignore or_filters because we assume q1 and
q2 are flat. In other words, they are results of _KeyQuery.flatten(..)."""
assert not q1.or_filters
assert not q2.or_filters
assert q1.order_property == q2.order_property
assert q1.order_desc == q2.order_desc
result = _KeyQuery(q1.model)
result.filters = q1.filters + q2.filters
result.order_property = q1.order_property
result.order_desc = q1.order_desc
return result | Combine KeyQuery q1 and q2. We ignore or_filters because we assume q1 and q2 are flat. In other words, they are results of _KeyQuery.flatten(..). |
157,002 | import collections
from firebase_admin import auth
from google.cloud import ndb
from googleapiclient.discovery import build
import jwt
import requests
from clusterfuzz._internal.base import memoize
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.config import local_config
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
from libs import request_cache
The provided code snippet includes necessary dependencies for implementing the `real_auth_domain` function. Write a Python function `def real_auth_domain()` to solve the following problem:
Get the real auth domain
Here is the function:
def real_auth_domain():
"""Get the real auth domain"""
real_domain = local_config.ProjectConfig().get('firebase.real_auth_domain')
if real_domain:
return real_domain
return utils.get_application_id() + '.firebaseapp.com' | Get the real auth domain |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.