id
stringlengths 19
21
| content
stringlengths 722
86.7k
|
|---|---|
evocodebench_data_1
|
"""
Common utilities.
"""
from asyncio import AbstractEventLoop
import json
import logging
import logging.handlers
import os
import platform
import sys
from typing import AsyncGenerator, Generator
import warnings
import requests
from chat.constants import LOGDIR
handler = None
visited_loggers = set()
def build_logger(logger_name, logger_filename):
global handler
formatter = logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# Set the format of root handlers
if not logging.getLogger().handlers:
if sys.version_info[1] >= 9:
# This is for windows
logging.basicConfig(level=logging.INFO, encoding="utf-8")
else:
if platform.system() == "Windows":
warnings.warn(
"If you are running on Windows, "
"we recommend you use Python >= 3.9 for UTF-8 encoding."
)
logging.basicConfig(level=logging.INFO)
logging.getLogger().handlers[0].setFormatter(formatter)
# Redirect stdout and stderr to loggers
stdout_logger = logging.getLogger("stdout")
stdout_logger.setLevel(logging.INFO)
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger("stderr")
stderr_logger.setLevel(logging.ERROR)
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
# Get logger
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
os.makedirs(LOGDIR, exist_ok=True)
filename = os.path.join(LOGDIR, logger_filename)
handler = logging.handlers.TimedRotatingFileHandler(
filename, when="D", utc=True, encoding="utf-8"
)
handler.setFormatter(formatter)
for l in [stdout_logger, stderr_logger, logger]:
if l in visited_loggers:
continue
visited_loggers.add(l)
l.addHandler(handler)
return logger
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.terminal = sys.stdout
self.logger = logger
self.log_level = log_level
self.linebuf = ""
def __getattr__(self, attr):
return getattr(self.terminal, attr)
def write(self, buf):
temp_linebuf = self.linebuf + buf
self.linebuf = ""
for line in temp_linebuf.splitlines(True):
# From the io.TextIOWrapper docs:
# On output, if newline is None, any '\n' characters written
# are translated to the system default line separator.
# By default sys.stdout.write() expects '\n' newlines and then
# translates them so this is still cross platform.
if line[-1] == "\n":
encoded_message = line.encode("utf-8", "ignore").decode("utf-8")
self.logger.log(self.log_level, encoded_message.rstrip())
else:
self.linebuf += line
def flush(self):
if self.linebuf != "":
encoded_message = self.linebuf.encode("utf-8", "ignore").decode("utf-8")
self.logger.log(self.log_level, encoded_message.rstrip())
self.linebuf = ""
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def get_gpu_memory(max_gpus=None):
"""Get available memory for each GPU."""
import torch
gpu_memory = []
num_gpus = (
torch.cuda.device_count()
if max_gpus is None
else min(max_gpus, torch.cuda.device_count())
)
for gpu_id in range(num_gpus):
with torch.cuda.device(gpu_id):
device = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device)
total_memory = gpu_properties.total_memory / (1024**3)
allocated_memory = torch.cuda.memory_allocated() / (1024**3)
available_memory = total_memory - allocated_memory
gpu_memory.append(available_memory)
return gpu_memory
def violates_moderation(text):
"""
Check whether the text violates OpenAI moderation API.
"""
import openai
try:
flagged = openai.Moderation.create(input=text)["results"][0]["flagged"]
except openai.error.OpenAIError as e:
flagged = False
except (KeyError, IndexError) as e:
flagged = False
return flagged
def clean_flant5_ckpt(ckpt_path):
"""
Flan-t5 trained with HF+FSDP saves corrupted weights for shared embeddings,
Use this function to make sure it can be correctly loaded.
"""
import torch
index_file = os.path.join(ckpt_path, "pytorch_model.bin.index.json")
index_json = json.load(open(index_file, "r"))
weightmap = index_json["weight_map"]
share_weight_file = weightmap["shared.weight"]
share_weight = torch.load(os.path.join(ckpt_path, share_weight_file))[
"shared.weight"
]
for weight_name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]:
weight_file = weightmap[weight_name]
weight = torch.load(os.path.join(ckpt_path, weight_file))
weight[weight_name] = share_weight
torch.save(weight, os.path.join(ckpt_path, weight_file))
def pretty_print_semaphore(semaphore):
"""Print a semaphore in better format."""
if semaphore is None:
return "None"
return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
"""A javascript function to get url parameters for the gradio web server."""
get_window_url_params_js = """
function() {
const params = new URLSearchParams(window.location.search);
url_params = Object.fromEntries(params);
console.log("url_params", url_params);
return url_params;
}
"""
def iter_over_async(
async_gen: AsyncGenerator, event_loop: AbstractEventLoop
) -> Generator:
"""
Convert async generator to sync generator
:param async_gen: the AsyncGenerator to convert
:param event_loop: the event loop to run on
:returns: Sync generator
"""
ait = async_gen.__aiter__()
async def get_next():
try:
obj = await ait.__anext__()
return False, obj
except StopAsyncIteration:
return True, None
while True:
done, obj = event_loop.run_until_complete(get_next())
if done:
break
yield obj
def detect_language(text: str) -> str:
"""Detect the langauge of a string."""
import polyglot # pip3 install polyglot pyicu pycld2
from polyglot.detect import Detector
from polyglot.detect.base import logger as polyglot_logger
import pycld2
polyglot_logger.setLevel("ERROR")
try:
lang_code = Detector(text).language.name
except (pycld2.error, polyglot.detect.base.UnknownLanguage):
lang_code = "unknown"
return lang_code
def parse_gradio_auth_creds(filename: str):
"""Parse a username:password file for gradio authorization."""
gradio_auth_creds = []
with open(filename, "r", encoding="utf8") as file:
for line in file.readlines():
gradio_auth_creds += [x.strip() for x in line.split(",") if x.strip()]
if gradio_auth_creds:
auth = [tuple(cred.split(":")) for cred in gradio_auth_creds]
else:
auth = None
return auth
def is_partial_stop(output: str, stop_str: str):
"""Check whether the output contains a partial stop str."""
for i in range(0, min(len(output), len(stop_str))):
if stop_str.startswith(output[-i:]):
return True
return False
def run_cmd(cmd: str):
"""Run a bash command."""
print(cmd)
return os.system(cmd)
def is_sentence_complete(output: str):
"""Check whether the output is a complete sentence."""
end_symbols = (".", "?", "!", "...", "。", "?", "!", "…", '"', "'", "”")
return output.endswith(end_symbols)
# Models don't use the same configuration key for determining the maximum
# sequence length. Store them here so we can sanely check them.
# NOTE: The ordering here is important. Some models have two of these and we
# have a preference for which value gets used.
SEQUENCE_LENGTH_KEYS = [
"max_sequence_length",
"seq_length",
"max_position_embeddings",
"max_seq_len",
"model_max_length",
]
def get_context_length(config):
"""Get the context length of a model from a huggingface model config."""
rope_scaling = getattr(config, "rope_scaling", None)
if rope_scaling:
rope_scaling_factor = config.rope_scaling["factor"]
else:
rope_scaling_factor = 1
for key in SEQUENCE_LENGTH_KEYS:
val = getattr(config, key, None)
if val is not None:
return int(rope_scaling_factor * val)
return 2048
|
evocodebench_data_2
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# Riskfolio-Lib, Copyright (c) 2020-2023, Dany Cajas, Licensed under BSD 3 clause.
# Statsmodels, Copyright (C) 2006, Jonathan E. Taylor, Licensed under BSD 3 clause.
from enum import auto
import numpy as np
import scipy.cluster.hierarchy as sch
import scipy.optimize as sco
import scipy.spatial.distance as scd
import scipy.special as scs
from scipy.sparse import csr_matrix
from skfolio.utils.tools import AutoEnum
__all__ = [
"NBinsMethod",
"n_bins_freedman",
"n_bins_knuth",
"is_cholesky_dec",
"assert_is_square",
"assert_is_symmetric",
"assert_is_distance",
"cov_nearest",
"cov_to_corr",
"corr_to_cov",
"commutation_matrix",
"compute_optimal_n_clusters",
"rand_weights",
"rand_weights_dirichlet",
]
class NBinsMethod(AutoEnum):
"""Enumeration of the Number of Bins Methods
Parameters
----------
FREEDMAN : str
Freedman method
KNUTH : str
Knuth method
"""
FREEDMAN = auto()
KNUTH = auto()
def n_bins_freedman(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using the Freedman-Diaconis rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "On the histogram as a density estimator: L2 theory".
Freedman & Diaconis (1981).
"""
if x.ndim != 1:
raise ValueError("`x` must be a 1d-array")
n = len(x)
p_25, p_75 = np.percentile(x, [25, 75])
d = 2 * (p_75 - p_25) / (n ** (1 / 3))
if d == 0:
return 5
n_bins = max(1, np.ceil((np.max(x) - np.min(x)) / d))
return int(round(n_bins))
def n_bins_knuth(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using Knuth's rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "Optimal Data-Based Binning for Histograms".
Knuth.
"""
x = np.sort(x)
n = len(x)
def func(y: float):
y = y[0]
if y <= 0:
return np.inf
bin_edges = np.linspace(x[0], x[-1], int(y) + 1)
hist, _ = np.histogram(x, bin_edges)
return -(
n * np.log(y)
+ scs.gammaln(0.5 * y)
- y * scs.gammaln(0.5)
- scs.gammaln(n + 0.5 * y)
+ np.sum(scs.gammaln(hist + 0.5))
)
n_bins_init = n_bins_freedman(x)
n_bins = sco.fmin(func, n_bins_init, disp=0)[0]
return int(round(n_bins))
def rand_weights_dirichlet(n: int) -> np.array:
"""Produces n random weights that sum to one from a dirichlet distribution
(uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
return np.random.dirichlet(np.ones(n))
def rand_weights(n: int, zeros: int = 0) -> np.array:
"""Produces n random weights that sum to one from an uniform distribution
(non-uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
zeros : int, default=0
The number of weights to randomly set to zeros.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
k = np.random.rand(n)
if zeros > 0:
zeros_idx = np.random.choice(n, zeros, replace=False)
k[zeros_idx] = 0
return k / sum(k)
def is_cholesky_dec(x: np.ndarray) -> bool:
"""Returns True if Cholesky decomposition can be computed.
The matrix must be Hermitian (symmetric if real-valued) and positive-definite.
No checking is performed to verify whether the matrix is Hermitian or not.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if Cholesky decomposition can be applied to the matrix, False otherwise.
"""
# Around 100 times faster than checking for positive eigenvalues with np.linalg.eigh
try:
np.linalg.cholesky(x)
return True
except np.linalg.linalg.LinAlgError:
return False
def is_positive_definite(x: np.ndarray) -> bool:
"""Returns True if the matrix is positive definite.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if if the matrix is positive definite, False otherwise.
"""
return np.all(np.linalg.eigvals(x) > 0)
def assert_is_square(x: np.ndarray) -> None:
"""Raises an error if the matrix is not square.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is not square.
"""
if x.ndim != 2 or x.shape[0] != x.shape[1]:
raise ValueError("The matrix must be square")
def assert_is_symmetric(x: np.ndarray) -> None:
"""Raises an error if the matrix is not symmetric.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Raises
------
ValueError: if the matrix is not symmetric.
"""
assert_is_square(x)
if not np.allclose(x, x.T):
raise ValueError("The matrix must be symmetric")
def assert_is_distance(x: np.ndarray) -> None:
"""Raises an error if the matrix is not a distance matrix.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is a distance matrix.
"""
assert_is_symmetric(x)
if not np.allclose(np.diag(x), np.zeros(x.shape[0]), atol=1e-5):
raise ValueError(
"The distance matrix must have diagonal elements close to zeros"
)
def cov_to_corr(cov: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Convert a covariance matrix to a correlation matrix.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
Returns
-------
corr, std : tuple[ndarray of shape (n, n), ndarray of shape (n, )]
Correlation matrix and standard-deviation vector
"""
if cov.ndim != 2:
raise ValueError(f"`cov` must be a 2D array, got a {cov.ndim}D array")
std = np.sqrt(np.diag(cov))
corr = cov / std / std[:, None]
return corr, std
def corr_to_cov(corr: np.ndarray, std: np.ndarray):
"""Convert a correlation matrix to a covariance matrix given its
standard-deviation vector.
Parameters
----------
corr : ndarray of shape (n, n)
Correlation matrix.
std : ndarray of shape (n, )
Standard-deviation vector.
Returns
-------
cov : ndarray of shape (n, n)
Covariance matrix
"""
if std.ndim != 1:
raise ValueError(f"`std` must be a 1D array, got a {std.ndim}D array")
if corr.ndim != 2:
raise ValueError(f"`corr` must be a 2D array, got a {corr.ndim}D array")
cov = corr * std * std[:, None]
return cov
_CLIPPING_VALUE = 1e-13
def cov_nearest(cov: np.ndarray, higham: bool = False, higham_max_iteration: int = 100):
"""Compute the nearest covariance matrix that is positive definite and with a
cholesky decomposition than can be computed. The variance is left unchanged.
First, it converts the covariance matrix to a correlation matrix.
Then, it finds the nearest correlation matrix and converts it back to a covariance
matrix using the initial standard deviation.
Cholesky decomposition can fail for symmetric positive definite (SPD) matrix due
to floating point error and inversely, Cholesky decomposition can success for
non-SPD matrix. Therefore, we need to test for both. We always start by testing
for Cholesky decomposition which is significantly faster than checking for positive
eigenvalues.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
higham : bool, default=False
If this is set to True, the Higham & Nick (2002) algorithm [1]_ is used,
otherwise the eigenvalues are clipped to threshold above zeros (1e-13).
The default (`False`) is to use the clipping method as the Higham & Nick
algorithm can be slow for large datasets.
higham_max_iteration : int, default=100
Maximum number of iteration of the Higham & Nick (2002) algorithm.
The default value is `100`.
Returns
-------
cov : ndarray
The nearest covariance matrix.
References
----------
.. [1] "Computing the nearest correlation matrix - a problem from finance"
IMA Journal of Numerical Analysis
Higham & Nick (2002)
"""
assert_is_square(cov)
assert_is_symmetric(cov)
# Around 100 times faster than checking eigenvalues with np.linalg.eigh
if is_cholesky_dec(cov) and is_positive_definite(cov):
return cov
corr, std = cov_to_corr(cov)
if higham:
eps = np.finfo(np.float64).eps * 5
diff = np.zeros(corr.shape)
x = corr.copy()
for _ in range(higham_max_iteration):
x_adj = x - diff
eig_vals, eig_vecs = np.linalg.eigh(x_adj)
x = eig_vecs * np.maximum(eig_vals, eps) @ eig_vecs.T
diff = x - x_adj
np.fill_diagonal(x, 1)
cov = corr_to_cov(x, std)
if is_cholesky_dec(cov) and is_positive_definite(cov):
break
else:
raise ValueError("Unable to find the nearest positive definite matrix")
else:
eig_vals, eig_vecs = np.linalg.eigh(corr)
# Clipping the eigenvalues with a value smaller than 1e-13 can cause scipy to
# consider the matrix non-psd is some corner cases (see test/test_stats.py)
x = eig_vecs * np.maximum(eig_vals, _CLIPPING_VALUE) @ eig_vecs.T
x, _ = cov_to_corr(x)
cov = corr_to_cov(x, std)
return cov
def commutation_matrix(x):
"""Compute the commutation matrix.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
K : ndarray of shape (m * n, m * n)
The commutation matrix.
"""
(m, n) = x.shape
row = np.arange(m * n)
col = row.reshape((m, n), order="F").ravel()
data = np.ones(m * n, dtype=np.int8)
k = csr_matrix((data, (row, col)), shape=(m * n, m * n))
return k
def compute_optimal_n_clusters(distance: np.ndarray, linkage_matrix: np.ndarray) -> int:
r"""Compute the optimal number of clusters based on Two-Order Difference to Gap
Statistic [1]_.
The Two-Order Difference to Gap Statistic has been developed to improve the
performance and stability of the Tibshiranis Gap statistic.
It applies the two-order difference of the within-cluster dispersion to replace the
reference null distribution in the Gap statistic.
The number of cluster :math:`k` is determined by:
.. math:: \begin{cases}
\begin{aligned}
&\max_{k} & & W_{k+2} + W_{k} - 2 W_{k+1} \\
&\text{s.t.} & & 1 \ge c \ge max\bigl(8, \sqrt{n}\bigr) \\
\end{aligned}
\end{cases}
with :math:`n` the sample size and :math:`W_{k}` the within-cluster dispersions
defined as:
.. math:: W_{k} = \sum_{i=1}^{k} \frac{D_{i}}{2|C_{i}|}
where :math:`|C_{i}|` is the cardinality of cluster :math:`i` and :math:`D_{i}` its
density defined as:
.. math:: D_{i} = \sum_{u \in C_{i}} \sum_{v \in C_{i}} d(u,v)
with :math:`d(u,v)` the distance between u and v.
Parameters
----------
distance : ndarray of shape (n, n)
Distance matrix.
linkage_matrix : ndarray of shape (n - 1, 4)
Linkage matrix.
Returns
-------
value : int
Optimal number of clusters.
References
----------
.. [1] "Application of two-order difference to gap statistic".
Yue, Wang & Wei (2009)
"""
cut_tree = sch.cut_tree(linkage_matrix)
n = cut_tree.shape[1]
max_clusters = max(8, round(np.sqrt(n)))
dispersion = []
for k in range(max_clusters):
level = cut_tree[:, n - k - 1]
cluster_density = []
for i in range(np.max(level) + 1):
cluster_idx = np.argwhere(level == i).flatten()
cluster_dists = scd.squareform(
distance[cluster_idx, :][:, cluster_idx], checks=False
)
if cluster_dists.shape[0] != 0:
cluster_density.append(np.nan_to_num(cluster_dists.mean()))
dispersion.append(np.sum(cluster_density))
dispersion = np.array(dispersion)
gaps = np.roll(dispersion, -2) + dispersion - 2 * np.roll(dispersion, -1)
gaps = gaps[:-2]
# k=0 represents one cluster
k = np.argmax(gaps) + 2
return k
|
evocodebench_data_3
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
|
evocodebench_data_4
|
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()
|
evocodebench_data_5
|
from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
)
return values
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
|
evocodebench_data_6
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for constructing geodesic polyhedron, which are used as a basis."""
import itertools
import numpy as np
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j)))
int_weights = np.array(int_weights)
weights = int_weights / v # Barycentric weights.
return weights
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f'v {v} must an integer')
tri_weights = compute_tesselation_weights(v)
verts = []
for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts)
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
def generate_basis(
base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4
):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'tetrahedron', 'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == 'tetrahedron':
verts = np.array([
(np.sqrt(8 / 9), 0, -1 / 3),
(-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3),
(-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3),
(0, 0, 1),
])
faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)])
elif base_shape == 'icosahedron':
a = (np.sqrt(5) + 1) / 2
verts = np.array([
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]) / np.sqrt(a + 2)
faces = np.array([
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
])
elif base_shape == 'octahedron':
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
else:
raise ValueError(f'base_shape {base_shape} not supported')
verts = tesselate_geodesic(verts, faces, angular_tesselation)
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :]
basis = verts[:, ::-1]
return basis
|
evocodebench_data_7
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
from logging import Logger
from time import time
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from torch.utils.data import IterableDataset
from litdata.constants import (
_DEFAULT_CACHE_DIR,
_INDEX_FILENAME,
)
from litdata.streaming import Cache
from litdata.streaming.item_loader import BaseItemLoader
from litdata.streaming.resolver import Dir, _resolve_dir
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer
from litdata.streaming.shuffle import FullShuffle, NoShuffle, Shuffle
from litdata.utilities.env import _DistributedEnv, _is_in_dataloader_worker, _WorkerEnv
logger = Logger(__name__)
class StreamingDataset(IterableDataset):
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class."""
def __init__(
self,
input_dir: Union[str, "Dir"],
item_loader: Optional[BaseItemLoader] = None,
shuffle: bool = False,
drop_last: Optional[bool] = None,
seed: int = 42,
serializers: Optional[Dict[str, Serializer]] = None,
max_cache_size: Union[int, str] = "100GB",
) -> None:
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class.
Arguments:
input_dir: Path to the folder where the input data is stored.
item_loader: The logic to load an item from a chunk.
shuffle: Whether to shuffle the data.
drop_last: If `True`, drops the last items to ensure that
all processes/workers return the same amount of data.
The argument `drop_last` is set to `True` in a distributed setting
and `False` otherwise.
seed: Random seed for shuffling.
serializers: The serializers used to serialize and deserialize the chunks.
max_cache_size: The maximum cache size used by the StreamingDataset.
"""
super().__init__()
if not isinstance(shuffle, bool):
raise ValueError(f"Shuffle should be a boolean. Found {shuffle}")
input_dir = _resolve_dir(input_dir)
self.input_dir = input_dir
self.item_loader = item_loader
self.shuffle: bool = shuffle
self.distributed_env = _DistributedEnv.detect()
if self.distributed_env.world_size > 1:
if drop_last is False:
logger.warn(
"You're operating within a distributed environment and have disabled the `drop_last` option. "
"Please note that this configuration may lead to training interruptions if your system depends "
"on distributed collectives."
)
else:
drop_last = True
self.drop_last = drop_last or False
self.seed = seed
self.max_cache_size = max_cache_size
self.cache: Optional[Cache] = None
self.worker_env: Optional[_WorkerEnv] = None
self.worker_chunks: List[int] = []
self.worker_intervals: List[List[int]] = []
self.current_indexes: List[int] = []
self.chunk_index = 0
self.num_chunks: Optional[int] = None
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.min_items_per_replica: Optional[int] = None
self.current_epoch = 1
self.random_state = None
self.shuffler: Optional[Shuffle] = None
self.serializers = serializers
self._state_dict: Optional[Dict[str, Any]] = None
def set_shuffle(self, shuffle: bool) -> None:
self.shuffle = shuffle
def set_epoch(self, current_epoch: int) -> None:
"""Set the current epoch to the dataset on epoch starts.
When using the StreamingDataLoader, this is done automatically
"""
# If the state dict has been reloaded, don't override the current epoch
# The StreamingDataloader would clean this out
if self._state_dict is None:
self.current_epoch = current_epoch
def _create_cache(self, worker_env: _WorkerEnv) -> Cache:
if _should_replace_path(self.input_dir.path):
cache_path = _try_create_cache_dir(
input_dir=self.input_dir.path if self.input_dir.path else self.input_dir.url
)
if cache_path is not None:
self.input_dir.path = cache_path
cache = Cache(
input_dir=self.input_dir,
item_loader=self.item_loader,
chunk_bytes=1,
serializers=self.serializers,
max_cache_size=self.max_cache_size,
)
cache._reader._try_load_config()
if not cache.filled:
raise ValueError(
f"The provided dataset `{self.input_dir}` doesn't contain any {_INDEX_FILENAME} file."
" HINT: Did you successfully optimize a dataset to the provided `input_dir`?"
)
return cache
def _create_shuffler(self, cache: Cache) -> Shuffle:
seed = self.seed
drop_last = self.drop_last
if self._state_dict is not None:
state: Dict[str, Any] = self._state_dict
seed = state["seed"]
drop_last = state["drop_last"]
return FullShuffle(cache, seed, drop_last) if self.shuffle else NoShuffle(cache, seed, drop_last)
def __len__(self) -> int:
if self.shuffler is None:
cache = self._create_cache(worker_env=_WorkerEnv.detect())
self.shuffler = self._create_shuffler(cache)
return self.shuffler.get_len(self.distributed_env, self.current_epoch)
def __iter__(self) -> "StreamingDataset":
# When the StreamingDataset is used within map or optimize, let's refetch the distributed env.
if os.getenv("DATA_OPTIMIZER_GLOBAL_RANK"):
self.distributed_env = _DistributedEnv.detect()
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
# Handle restart
if self._state_dict:
self._validate_state_dict()
state: Dict[str, Any] = self._state_dict
self.current_epoch = state["current_epoch"]
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
# Handle restart
if self._state_dict:
self._resume(chunks_replica, intervals_replica)
else:
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[
self.distributed_env.global_rank % self.distributed_env.world_size
]
self.worker_chunks = []
self.worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % self.worker_env.world_size != self.worker_env.rank:
continue
self.worker_chunks.append(chunk_index)
self.worker_intervals.append(chunk_interval)
self.num_chunks = len(self.worker_chunks)
self.current_indexes = []
self.chunk_index = 0
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.last_time = time()
return self
def _resume(self, chunks_replica: List[int], intervals_replica: List[Any]) -> None:
assert self._state_dict
assert self.worker_env
assert self.shuffler
state: Dict[str, Any] = self._state_dict
num_workers = state["num_workers"]
batch_size = state["batch_size"]
# TODO: Implement elastic sampling where the number of workers, ranks can change.
num_samples_yielded = self._state_dict["num_samples_yielded"]
# replay sampling from each worker / chunks using the batch size
workers_chunks, workers_intervals = _associate_chunks_to_workers(
num_workers, self.worker_env, chunks_replica, intervals_replica
)
indexes = _replay_sampling(num_samples_yielded, batch_size, num_workers)
chunks_index, indexes = _replay_chunks_sampling(workers_intervals, indexes)
# select the chunks and intervals associated to this worker
worker_rank = self.worker_env.rank
self.num_chunks = len(workers_intervals[worker_rank])
self.chunk_index = chunks_index[worker_rank]
self.worker_chunks = workers_chunks[worker_rank]
self.worker_intervals = workers_intervals[worker_rank]
# replay the indexes for the current chunks
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
# re-shuffle the indexes
current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
# skip any indexes already consumed
current_indexes = current_indexes[indexes[worker_rank] :]
self.current_indexes = current_indexes
self.global_index = num_samples_yielded
# bump the chunk_index
self.chunk_index += 1
def __getitem__(self, index: Union[ChunkedIndex, int]) -> Any:
if self.cache is None:
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
if isinstance(index, int):
index = ChunkedIndex(index, self.cache._get_chunk_index_from_index(index))
return self.cache[index]
def __next__(self) -> Any:
# Prevent to create more batch on a given process
if self.global_index >= len(self):
self.current_epoch += 1
raise StopIteration
# Lazily re-populate the interval to reduce memory usage.
if len(self.current_indexes) == 0:
if self.chunk_index == self.num_chunks:
self.current_epoch += 1
raise StopIteration
# reset index
self.index = 0
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
assert self.shuffler is not None
assert self.num_chunks is not None
self.current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
self.chunk_index += 1
# Get the first index
index = self.current_indexes.pop(0)
# Call the `__getitem__` method.
data = self.__getitem__(
ChunkedIndex(
index=index,
chunk_index=self.worker_chunks[self.chunk_index - 1],
# We provide the chunks indexes only one the first
chunk_indexes=None if self.has_triggered_download else self.worker_chunks,
is_last_index=(self.chunk_index - 1) == len(self.worker_intervals) and len(self.current_indexes) == 1,
)
)
self.has_triggered_download = True
self.global_index += 1
self.index += 1
return data
def state_dict(self, num_samples_yielded: int, num_workers: int, batch_size: int) -> Dict[str, Any]:
if _is_in_dataloader_worker():
raise RuntimeError("The method `state_dict` should only be called in the main process.")
if self._state_dict is not None:
self._state_dict["num_samples_yielded"] = num_samples_yielded
return self._state_dict
state = {
"num_samples_yielded": num_samples_yielded,
"num_workers": num_workers,
"batch_size": batch_size,
"current_epoch": self.current_epoch,
"input_dir_path": self.input_dir.path,
"input_dir_url": self.input_dir.url,
"item_loader": self.item_loader.state_dict() if self.item_loader else None,
"drop_last": self.drop_last,
"seed": self.seed,
"world_size": self.distributed_env.world_size,
"shuffle": self.shuffle,
}
return state
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if state_dict:
# the state is restored within the workers
self._state_dict = state_dict
def _validate_state_dict(self) -> None:
assert self._state_dict
assert self.worker_env
assert self.cache
state: Dict[str, Any] = self._state_dict
if state["shuffle"] != self.shuffle:
raise ValueError(
"The provided `shuffle` state doesn't match the current one. "
f"Found `{self.shuffle}` instead of `{state['shuffle']}`."
)
if state["num_workers"] != self.worker_env.world_size:
raise ValueError(
"The provided `num_workers` state doesn't match the current one. "
f"Found `{self.worker_env.world_size}` instead of `{state['num_workers']}`."
)
# Note: We need to check whether the path has been resolved to its associated cache.
# In this case, validate the cache folder is the same.
if _should_replace_path(state["input_dir_path"]):
cache_path = _try_create_cache_dir(
input_dir=state["input_dir_path"] if state["input_dir_path"] else state["input_dir_url"]
)
if cache_path != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{cache_path}`."
)
elif state["input_dir_path"] != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{state['input_dir_path']}`."
)
if state["input_dir_url"] != self.input_dir.url:
raise ValueError(
"The provided `input_dir` URL state doesn't match the current one. "
f"Found `{self.input_dir.url}` instead of `{state['input_dir_url']}`."
)
if state["seed"] != self.seed:
raise ValueError(
"The provided `seed` state doesn't match the current one. "
f"Found `{self.seed}` instead of `{state['seed']}`."
)
if self.item_loader and state["item_loader"] != self.item_loader.state_dict():
raise ValueError(
"The provided `item_loader` state doesn't match the current one. "
f"Found `{self.item_loader.state_dict()}` instead of `{state['item_loader']}`."
)
if state["drop_last"] != self.drop_last:
raise ValueError(
"The provided `drop_last` state doesn't match the current one. "
f"Found `{self.drop_last}` instead of `{state['drop_last']}`."
)
def _try_create_cache_dir(input_dir: Optional[str]) -> Optional[str]:
hash_object = hashlib.md5((input_dir or "").encode())
if "LIGHTNING_CLUSTER_ID" not in os.environ or "LIGHTNING_CLOUD_PROJECT_ID" not in os.environ:
cache_dir = os.path.join(_DEFAULT_CACHE_DIR, hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
cache_dir = os.path.join("/cache", "chunks", hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _should_replace_path(path: Optional[str]) -> bool:
"""Whether the input path is a special path to be replaced."""
if path is None or path == "":
return True
return path.startswith("/teamspace/datasets/") or path.startswith("/teamspace/s3_connections/")
def is_integer(value: str) -> bool:
try:
int(value)
return True
except Exception:
return False
def _associate_chunks_to_workers(
num_workers: int, worker_env: _WorkerEnv, chunks_replica: List[int], intervals_replica: List[Any]
) -> Any:
workers_chunks = {}
workers_intervals = {}
for worker_idx in range(num_workers):
worker_chunks = []
worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % worker_env.world_size != worker_idx:
continue
worker_chunks.append(chunk_index)
worker_intervals.append(chunk_interval)
workers_chunks[worker_idx] = worker_chunks
workers_intervals[worker_idx] = worker_intervals
return workers_chunks, workers_intervals
def _replay_sampling(num_samples_yielded: int, batch_size: int, num_workers: int) -> Dict[int, int]:
"""This function replays the sampling from the dataloader."""
divisible_num_batches_yielded = num_samples_yielded // (num_workers * batch_size)
indexes = {}
for worker_idx in range(num_workers):
indexes[worker_idx] = divisible_num_batches_yielded * batch_size
num_samples_yielded = num_samples_yielded - (num_workers * divisible_num_batches_yielded * batch_size)
# take care of the reminder
worker_idx = 0 # reset the worker_idx
while True:
if num_samples_yielded >= batch_size:
indexes[worker_idx] += batch_size
worker_idx = (worker_idx + 1) % num_workers
num_samples_yielded -= batch_size
else:
indexes[worker_idx] += num_samples_yielded
break
return indexes
def _replay_chunks_sampling(
workers_intervals: Dict[int, List[Any]], indexes: Dict[int, int]
) -> Tuple[Dict[int, int], Dict[int, int]]:
chunks_index = {}
for worker_idx in range(len(workers_intervals)):
chunks_index[worker_idx] = 0
for worker_idx, intervals in workers_intervals.items():
for interval in intervals:
size = interval[-1] - interval[0]
if indexes[worker_idx] >= size:
indexes[worker_idx] -= size
chunks_index[worker_idx] += 1
return chunks_index, indexes
|
evocodebench_data_8
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# scikit-learn, Copyright (c) 2007-2010 David Cournapeau, Fabian Pedregosa, Olivier
# Grisel Licensed under BSD 3 clause.
from collections.abc import Callable, Iterator
from enum import Enum
from functools import wraps
import numpy as np
import numpy.typing as npt
import pandas as pd
import sklearn as sk
import sklearn.base as skb
__all__ = [
"AutoEnum",
"cached_property_slots",
"cache_method",
"input_to_array",
"args_names",
"format_measure",
"bisection",
"safe_split",
"fit_single_estimator",
"fit_and_predict",
"deduplicate_names",
"default_asset_names",
"check_estimator",
]
GenericAlias = type(list[int])
class AutoEnum(str, Enum):
"""Base Enum class used in `skfolio`"""
@staticmethod
def _generate_next_value_(
name: str, start: int, count: int, last_values: any
) -> str:
"""Overriding `auto()`"""
return name.lower()
@classmethod
def has(cls, value: str) -> bool:
"""Check if a value is in the Enum.
Parameters
----------
value : str
Input value.
Returns
-------
x : bool
True if the value is in the Enum, False otherwise.
"""
return value in cls._value2member_map_
def __repr__(self) -> str:
"""Representation of the Enum"""
return self.name
# noinspection PyPep8Naming
class cached_property_slots:
"""Cached property decorator for slots"""
def __init__(self, func):
self.func = func
self.public_name = None
self.private_name = None
self.__doc__ = func.__doc__
def __set_name__(self, owner, name):
self.public_name = name
self.private_name = f"_{name}"
def __get__(self, instance, owner=None):
if instance is None:
return self
if self.private_name is None:
raise TypeError(
"Cannot use cached_property instance without calling __set_name__"
" on it."
)
try:
value = getattr(instance, self.private_name)
except AttributeError:
value = self.func(instance)
setattr(instance, self.private_name, value)
return value
def __set__(self, instance, owner=None):
raise AttributeError(
f"'{type(instance).__name__}' object attribute '{self.public_name}' is"
" read-only"
)
__class_getitem__ = classmethod(GenericAlias)
def _make_key(args, kwds) -> int:
"""Make a cache key from optionally typed positional and keyword arguments"""
key = args
if kwds:
for item in kwds.items():
key += item
return hash(key)
def cache_method(cache_name: str) -> Callable:
"""Decorator that caches class methods results into a class dictionary.
Parameters
----------
cache_name : str
Name of the dictionary class attribute.
Returns
-------
func : Callable
Decorating function that caches class methods.
"""
# To avoid memory leakage and proper garbage collection, self should not be part of
# the cache key.
# This is a known issue when we use functools.lru_cache on class methods.
def decorating_function(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
func_name = method.__name__
key = _make_key(args, kwargs)
try:
cache = getattr(self, cache_name)
except AttributeError:
raise AttributeError(
"You first need to create a dictionary class attribute named "
f"'{cache_name}'"
) from None
if not isinstance(cache, dict):
raise AttributeError(
f"'The cache named '{cache_name}' must be a "
f"dictionary, got {type(cache)}"
)
if func_name not in cache:
cache[func_name] = {}
c = cache[func_name]
if key not in c:
c[key] = method(self, *args, **kwargs)
return c[key]
return wrapper
return decorating_function
def args_names(func: object) -> list[str]:
"""Returns the argument names of a function.
Parameters
----------
func : object
Function.
Returns
-------
args : list[str]
The list of function arguments.
"""
return [
v for v in func.__code__.co_varnames[: func.__code__.co_argcount] if v != "self"
]
def check_estimator(
estimator: skb.BaseEstimator | None, default: skb.BaseEstimator, check_type: any
):
"""Check the estimator type and returns its cloned version it provided, otherwise
return the default estimator.
Parameters
----------
estimator : BaseEstimator, optional
Estimator.
default : BaseEstimator
Default estimator to return when `estimator` is `None`.
check_type : any
Expected type of the estimator to check against.
Returns
-------
estimator: Estimator
The checked estimator or the default.
"""
if estimator is None:
return default
if not isinstance(estimator, check_type):
raise TypeError(f"Expected type {check_type}, got {type(estimator)}")
return sk.clone(estimator)
def input_to_array(
items: dict | npt.ArrayLike,
n_assets: int,
fill_value: any,
dim: int,
assets_names: np.ndarray | None,
name: str,
) -> np.ndarray:
"""Convert a collection of items (array-like or dictionary) into
a numpy array and verify its shape.
Parameters
----------
items : np.ndarray | dict | list
Items to verify and convert to array.
n_assets : int
Expected number of assets.
Used to verify the shape of the converted array.
fill_value : any
When `items` is a dictionary, elements that are not in `asset_names` are filled
with `fill_value` in the converted array.
dim : int
Dimension of the final array.
Possible values are `1` or `2`.
assets_names : ndarray, optional
Asset names used when `items` is a dictionary.
name : str
Name of the items used for error messages.
Returns
-------
values : ndarray of shape (n_assets) for dim=1 or (n_groups, n_assets) for dim=2
Converted array.
"""
if dim not in [1, 2]:
raise ValueError(f"dim must be 1 or 2, got {dim}")
if isinstance(items, dict):
if assets_names is None:
raise ValueError(
f"If `{name}` is provided as a dictionary, you must input `X` as a"
" DataFrame with assets names in columns"
)
if dim == 1:
arr = np.array([items.get(asset, fill_value) for asset in assets_names])
else:
# add assets and convert dict to ordered array
arr = {}
for asset in assets_names:
elem = items.get(asset)
if elem is None:
elem = [asset]
elif np.isscalar(elem):
elem = [asset, elem]
else:
elem = [asset, *elem]
arr[asset] = elem
arr = (
pd.DataFrame.from_dict(arr, orient="index")
.loc[assets_names]
.to_numpy()
.T
)
else:
arr = np.asarray(items)
if arr.ndim != dim:
raise ValueError(f"`{name}` must be a {dim}D array, got a {arr.ndim}D array")
if not isinstance(fill_value, str) and np.isnan(arr).any():
raise ValueError(f"`{name}` contains NaN")
if arr.shape[-1] != n_assets:
if dim == 1:
s = "(n_assets,)"
else:
s = "(n_groups, n_assets)"
raise ValueError(
f"`{name}` must be a of shape {s} with n_assets={n_assets}, "
f"got {arr.shape[0]}"
)
return arr
def format_measure(x: float, percent: bool = False) -> str:
"""Format a measure number into a user-friendly string.
Parameters
----------
x : float
Number to format.
percent : bool, default=False
If this is set to True, the number is formatted in percentage.
Returns
-------
formatted : str
Formatted string.
"""
if np.isnan(x):
return str(x)
if percent:
xn = x * 100
f = "%"
else:
xn = x
f = "f"
if xn == 0:
n = 0
else:
n = min(6, max(int(-np.log10(abs(xn))) + 2, 2))
return "{value:{fmt}}".format(value=x, fmt=f".{n}{f}")
def bisection(x: list[np.ndarray]) -> Iterator[list[np.ndarray, np.ndarray]]:
"""Generator to bisect a list of array.
Parameters
----------
x : list[ndarray]
A list of array.
Yields
------
arr : Iterator[list[ndarray, ndarray]]
Bisected array.
"""
for e in x:
n = len(e)
if n > 1:
mid = n // 2
yield [e[0:mid], e[mid:n]]
def safe_indexing(
X: npt.ArrayLike | pd.DataFrame, indices: npt.ArrayLike | None, axis: int = 0
):
"""
Return rows, items or columns of X using indices.
Parameters
----------
X : array-like
Data from which to sample rows.
indices : array-like, optional
Indices of rows or columns.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
subset :
Subset of X on axis 0.
"""
if indices is None:
return X
if hasattr(X, "iloc"):
return X.take(indices, axis=axis)
if axis == 0:
return X[indices]
return X[:, indices]
def safe_split(
X: npt.ArrayLike,
y: npt.ArrayLike | None = None,
indices: np.ndarray | None = None,
axis: int = 0,
):
"""Create subset of dataset.
Slice X, y according to indices for cross-validation.
Parameters
----------
X : array-like
Data to be indexed.
y : array-like
Data to be indexed.
indices : ndarray of int, optional
Rows or columns to select from X and y.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
X_subset : array-like
Indexed data.
y_subset : array-like
Indexed targets.
"""
X_subset = safe_indexing(X, indices=indices, axis=axis)
if y is not None:
y_subset = safe_indexing(y, indices=indices, axis=axis)
else:
y_subset = None
return X_subset, y_subset
def fit_single_estimator(
estimator: any,
X: npt.ArrayLike,
y: npt.ArrayLike | None = None,
indices: np.ndarray | None = None,
axis: int = 0,
):
"""function used to fit an estimator within a job.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_observations, n_assets)
The data to fit.
y : array-like of shape (n_observations, n_targets), optional
The target array if provided.
indices : ndarray of int, optional
Rows or columns to select from X and y.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
fitted_estimator : estimator
The fitted estimator.
"""
X, y = safe_split(X, y, indices=indices, axis=axis)
estimator.fit(X, y)
return estimator
def fit_and_predict(
estimator: any,
X: npt.ArrayLike,
y: npt.ArrayLike | None,
train: np.ndarray,
test: np.ndarray | list[np.ndarray],
fit_params: dict,
method: str,
column_indices: np.ndarray | None = None,
) -> npt.ArrayLike | list[npt.ArrayLike]:
"""Fit the estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_observations, n_assets)
The data to fit.
y : array-like of shape (n_observations, n_factors) or None
The factor array if provided
train : ndarray of int of shape (n_train_observations,)
Indices of training samples.
test : ndarray of int of shape (n_test_samples,) or list of ndarray
Indices of test samples or list of indices.
fit_params : dict
Parameters that will be passed to ``estimator.fit``.
method : str
Invokes the passed method name of the passed estimator.
column_indices : ndarray, optional
Indices of columns to select.
The default (`None`) is to select all columns.
Returns
-------
predictions : array-like or list of array-like
If `test` is an array, it returns the array-like result of calling
'estimator.method' on `test`.
Otherwise, if `test` is a list of arrays, it returns the list of array-like
results of calling 'estimator.method' on each test set in `test`.
"""
fit_params = fit_params if fit_params is not None else {}
X, y = safe_split(X, y, indices=column_indices, axis=1)
X_train, y_train = safe_split(X, y, indices=train, axis=0)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
if isinstance(test, list):
predictions = []
for t in test:
X_test, _ = safe_split(X, indices=t, axis=0)
predictions.append(func(X_test))
else:
X_test, _ = safe_split(X, indices=test, axis=0)
predictions = func(X_test)
return predictions
def default_asset_names(n_assets: int) -> np.ndarray:
"""Default asset names are `["x0", "x1", ..., "x(n_assets - 1)"]`
Parameters
----------
n_assets : int
Number of assets.
Returns
-------
asset_names : ndarray of str
Default assets names.
"""
return np.asarray([f"x{i}" for i in range(n_assets)], dtype=object)
def deduplicate_names(names: npt.ArrayLike) -> list[str]:
"""Rename duplicated names by appending "_{duplicate_nb}" at the end.
This function is inspired by the pandas function `_maybe_dedup_names`.
Parameters
----------
names : array-like of shape (n_names,)
List of names.
Returns
-------
names : list[str]
Deduplicate names.
"""
names = list(names)
counts = {}
for i, col in enumerate(names):
cur_count = counts.get(col, 0)
if cur_count > 0:
names[i] = f"{col}_{cur_count}"
counts[col] = cur_count + 1
return names
|
evocodebench_data_9
|
import json
import numpy as np
from agents.microagent import MicroAgent
class AgentSerializer:
@staticmethod
def to_dict(agent):
"""
Serialize the MicroAgent object to a dictionary for persistence.
"""
purpose_embedding = agent.purpose_embedding
if isinstance(purpose_embedding, np.ndarray):
purpose_embedding = purpose_embedding.tolist() # Convert ndarray to list
return {
"dynamic_prompt": agent.dynamic_prompt,
"purpose": agent.purpose,
"purpose_embedding": purpose_embedding,
"depth": agent.depth,
"max_depth": agent.max_depth,
"usage_count": agent.usage_count,
"id": agent.id,
"parent_id": agent.parent_id,
"working_agent": agent.working_agent,
"is_prime": agent.is_prime,
"evolve_count": agent.evolve_count,
"number_of_code_executions": agent.number_of_code_executions,
"last_input": agent.last_input,
}
@staticmethod
def from_dict(data, agent_lifecycle, openai_wrapper):
"""
Deserialize a dictionary back into a MicroAgent object.
"""
agent = MicroAgent(
data["dynamic_prompt"],
data["purpose"],
data["depth"],
agent_lifecycle,
openai_wrapper,
data["max_depth"],
data.get("working_agent", False),
data.get("is_prime", False),
id=data["id"],
parent_id=data["parent_id"]
)
if data.get("purpose_embedding") is not None:
agent.purpose_embedding = np.array(data["purpose_embedding"])
else:
agent.purpose_embedding = None
agent.usage_count = data.get("usage_count", 0)
agent.evolve_count = data.get("evolve_count", 0)
agent.number_of_code_executions = data.get("number_of_code_executions", 0)
agent.last_input = data.get("last_input", "")
return agent
|
evocodebench_data_10
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for processing images."""
import types
from typing import Optional, Union
import dm_pix
import jax
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
matplotlib.use('Agg')
_Array = Union[np.ndarray, jnp.ndarray]
def mse_to_psnr(mse):
"""Compute PSNR given an MSE (we assume the maximum pixel value is 1)."""
return -10.0 / jnp.log(10.0) * jnp.log(mse)
def psnr_to_mse(psnr):
"""Compute MSE given a PSNR (we assume the maximum pixel value is 1)."""
return jnp.exp(-0.1 * jnp.log(10.0) * psnr)
def ssim_to_dssim(ssim):
"""Compute DSSIM given an SSIM."""
return (1 - ssim) / 2
def dssim_to_ssim(dssim):
"""Compute DSSIM given an SSIM."""
return 1 - 2 * dssim
def linear_to_srgb(
linear, eps = None, xnp = jnp
):
"""Assumes `linear` is in [0, 1], see https://en.wikipedia.org/wiki/SRGB."""
if eps is None:
eps = xnp.finfo(xnp.float32).eps
srgb0 = 323 / 25 * linear
srgb1 = (211 * xnp.maximum(eps, linear) ** (5 / 12) - 11) / 200
return xnp.where(linear <= 0.0031308, srgb0, srgb1)
def srgb_to_linear(
srgb, eps = None, xnp = jnp
):
"""Assumes `srgb` is in [0, 1], see https://en.wikipedia.org/wiki/SRGB."""
if eps is None:
eps = xnp.finfo(xnp.float32).eps
linear0 = 25 / 323 * srgb
linear1 = xnp.maximum(eps, ((200 * srgb + 11) / (211))) ** (12 / 5)
return xnp.where(srgb <= 0.04045, linear0, linear1)
def downsample(img, factor):
"""Area downsample img (factor must evenly divide img height and width)."""
sh = img.shape
if not (sh[0] % factor == 0 and sh[1] % factor == 0):
raise ValueError(
f'Downsampling factor {factor} does not '
f'evenly divide image shape {sh[:2]}'
)
img = img.reshape((sh[0] // factor, factor, sh[1] // factor, factor) + sh[2:])
img = img.mean((1, 3))
return img
def compute_vignette(coords, weights, powers=(1, 2, 3)):
"""Compute a vignetting as a polynomial function of image plane radius."""
radius_squared = jnp.sum(jnp.square(coords), axis=-1)
features = radius_squared[Ellipsis, None] ** jnp.array(powers)
scaling = jnp.exp(-jnp.sum(jnp.abs(weights) * features[Ellipsis, None], axis=-2))
return scaling
def render_histogram(x, **kwargs):
"""Call pyplot's hist() and render it to a numpy buffer."""
fig = plt.figure()
fig.gca().hist(x, **kwargs)
fig.canvas.draw()
hw = fig.canvas.get_width_height()[::-1]
buf = fig.canvas.tostring_rgb()
array = np.frombuffer(buf, dtype=np.uint8).reshape(hw + (3,))
plt.close(fig)
return array
class MetricHarness:
"""A helper class for evaluating several error metrics."""
def __init__(
self,
disable_ssim=False,
):
if disable_ssim:
self.ssim_fn = None
else:
self.ssim_fn = jax.jit(dm_pix.ssim)
def __call__(self, rgb_pred, rgb_gt, name_fn=lambda s: s):
"""Evaluate the error between a predicted rgb image and the true image."""
metrics = {}
metrics['psnr'] = mse_to_psnr(((rgb_pred - rgb_gt) ** 2).mean())
if self.ssim_fn is not None:
metrics['ssim'] = self.ssim_fn(rgb_pred, rgb_gt)
# Apply the name function and cast all metrics down to a scalar float.
return {name_fn(k): float(v) for (k, v) in metrics.items()}
|
evocodebench_data_11
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Camera pose and ray generation utility functions."""
import enum
import functools
import types
from typing import Final, List, Mapping, Optional, Text, Tuple, TypeAlias
from absl import logging
import chex
from internal import configs
from internal import geometry
from internal import math
from internal import rigid_body
from internal import spin_math
from internal import stepfun
from internal import utils
import jax
from jax import random
import jax.numpy as jnp
import jaxcam
import numpy as np
import scipy
_Array: TypeAlias = np.ndarray | jnp.ndarray
_ScalarArray: TypeAlias = float | _Array
_IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD: Final[float] = 0.95
def convert_to_ndc(
origins,
directions,
pixtocam,
near = 1.0,
xnp = np,
):
"""Converts a set of rays to normalized device coordinates (NDC).
Args:
origins: ndarray(float32), [..., 3], world space ray origins.
directions: ndarray(float32), [..., 3], world space ray directions.
pixtocam: ndarray(float32), [3, 3], inverse intrinsic matrix.
near: float, near plane along the negative z axis.
xnp: either numpy or jax.numpy.
Returns:
origins_ndc: ndarray(float32), [..., 3].
directions_ndc: ndarray(float32), [..., 3].
This function assumes input rays should be mapped into the NDC space for a
perspective projection pinhole camera, with identity extrinsic matrix (pose)
and intrinsic parameters defined by inputs focal, width, and height.
The near value specifies the near plane of the frustum, and the far plane is
assumed to be infinity.
The ray bundle for the identity pose camera will be remapped to parallel rays
within the (-1, -1, -1) to (1, 1, 1) cube. Any other ray in the original
world space can be remapped as long as it has dz < 0 (ray direction has a
negative z-coord); this allows us to share a common NDC space for "forward
facing" scenes.
Note that
projection(origins + t * directions)
will NOT be equal to
origins_ndc + t * directions_ndc
and that the directions_ndc are not unit length. Rather, directions_ndc is
defined such that the valid near and far planes in NDC will be 0 and 1.
See Appendix C in https://arxiv.org/abs/2003.08934 for additional details.
"""
# Shift ray origins to near plane, such that oz = -near.
# This makes the new near bound equal to 0.
t = -(near + origins[Ellipsis, 2]) / directions[Ellipsis, 2]
origins = origins + t[Ellipsis, None] * directions
dx, dy, dz = xnp.moveaxis(directions, -1, 0)
ox, oy, oz = xnp.moveaxis(origins, -1, 0)
xmult = 1.0 / pixtocam[0, 2] # Equal to -2. * focal / cx
ymult = 1.0 / pixtocam[1, 2] # Equal to -2. * focal / cy
# Perspective projection into NDC for the t = 0 near points
# origins + 0 * directions
origins_ndc = xnp.stack(
[xmult * ox / oz, ymult * oy / oz, -xnp.ones_like(oz)], axis=-1
)
# Perspective projection into NDC for the t = infinity far points
# origins + infinity * directions
infinity_ndc = xnp.stack(
[xmult * dx / dz, ymult * dy / dz, xnp.ones_like(oz)], axis=-1
)
# directions_ndc points from origins_ndc to infinity_ndc
directions_ndc = infinity_ndc - origins_ndc
return origins_ndc, directions_ndc
def pad_poses(p):
"""Pad [..., 3, 4] pose matrices with a homogeneous bottom row [0,0,0,1]."""
bottom = np.broadcast_to([0, 0, 0, 1.0], p[Ellipsis, :1, :4].shape)
return np.concatenate([p[Ellipsis, :3, :4], bottom], axis=-2)
def unpad_poses(p):
"""Remove the homogeneous bottom row from [..., 4, 4] pose matrices."""
return p[Ellipsis, :3, :4]
def recenter_poses(poses):
"""Recenter poses around the origin."""
cam2world = average_pose(poses)
transform = np.linalg.inv(pad_poses(cam2world))
poses = transform @ pad_poses(poses)
return unpad_poses(poses), transform
def average_pose(poses, lock_up = False):
"""New pose using average position, z-axis, and up vector of input poses."""
position = poses[:, :3, 3].mean(0)
z_axis = poses[:, :3, 2].mean(0)
up = poses[:, :3, 1].mean(0)
cam2world = viewmatrix(z_axis, up, position, lock_up=lock_up)
return cam2world
def viewmatrix(
lookdir,
up,
position,
lock_up = False,
):
"""Construct lookat view matrix."""
orthogonal_dir = lambda a, b: normalize(np.cross(a, b))
vecs = [None, normalize(up), normalize(lookdir)]
# x-axis is always the normalized cross product of `lookdir` and `up`.
vecs[0] = orthogonal_dir(vecs[1], vecs[2])
# Default is to lock `lookdir` vector, if lock_up is True lock `up` instead.
ax = 2 if lock_up else 1
# Set the not-locked axis to be orthogonal to the other two.
vecs[ax] = orthogonal_dir(vecs[(ax + 1) % 3], vecs[(ax + 2) % 3])
m = np.stack(vecs + [position], axis=1)
return m
def rotation_about_axis(degrees, axis=0):
"""Creates rotation matrix about one of the coordinate axes."""
radians = degrees / 180.0 * np.pi
rot2x2 = np.array(
[[np.cos(radians), -np.sin(radians)], [np.sin(radians), np.cos(radians)]]
)
r = np.eye(3)
r[1:3, 1:3] = rot2x2
r = np.roll(np.roll(r, axis, axis=0), axis, axis=1)
p = np.eye(4)
p[:3, :3] = r
return p
def normalize(x):
"""Normalization helper function."""
return x / np.linalg.norm(x)
def focus_point_fn(poses, xnp = np):
"""Calculate nearest point to all focal axes in poses."""
directions, origins = poses[:, :3, 2:3], poses[:, :3, 3:4]
m = xnp.eye(3) - directions * xnp.transpose(directions, [0, 2, 1])
mt_m = xnp.transpose(m, [0, 2, 1]) @ m
focus_pt = xnp.linalg.inv(mt_m.mean(0)) @ (mt_m @ origins).mean(0)[:, 0]
return focus_pt
# Constants for generate_spiral_path():
NEAR_STRETCH = 0.9 # Push forward near bound for forward facing render path.
FAR_STRETCH = 5.0 # Push back far bound for forward facing render path.
FOCUS_DISTANCE = 0.75 # Relative weighting of near, far bounds for render path.
def generate_spiral_path(
poses,
bounds,
n_frames = 120,
n_rots = 2,
zrate = 0.5,
):
"""Calculates a forward facing spiral path for rendering."""
# Find a reasonable 'focus depth' for this dataset as a weighted average
# of conservative near and far bounds in disparity space.
near_bound = bounds.min() * NEAR_STRETCH
far_bound = bounds.max() * FAR_STRETCH
# All cameras will point towards the world space point (0, 0, -focal).
focal = 1 / (((1 - FOCUS_DISTANCE) / near_bound + FOCUS_DISTANCE / far_bound))
# Get radii for spiral path using 90th percentile of camera positions.
positions = poses[:, :3, 3]
radii = np.percentile(np.abs(positions), 90, 0)
radii = np.concatenate([radii, [1.0]])
# Generate poses for spiral path.
render_poses = []
cam2world = average_pose(poses)
up = poses[:, :3, 1].mean(0)
for theta in np.linspace(0.0, 2.0 * np.pi * n_rots, n_frames, endpoint=False):
t = radii * [np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0]
position = cam2world @ t
lookat = cam2world @ [0, 0, -focal, 1.0]
z_axis = position - lookat
render_poses.append(viewmatrix(z_axis, up, position))
render_poses = np.stack(render_poses, axis=0)
return render_poses
def transform_poses_pca(poses):
"""Transforms poses so principal components lie on XYZ axes.
Args:
poses: a (N, 3, 4) array containing the cameras' camera to world transforms.
Returns:
A tuple (poses, transform), with the transformed poses and the applied
camera_to_world transforms.
"""
t = poses[:, :3, 3]
t_mean = t.mean(axis=0)
t = t - t_mean
eigval, eigvec = np.linalg.eig(t.T @ t)
# Sort eigenvectors in order of largest to smallest eigenvalue.
inds = np.argsort(eigval)[::-1]
eigvec = eigvec[:, inds]
rot = eigvec.T
if np.linalg.det(rot) < 0:
rot = np.diag(np.array([1, 1, -1])) @ rot
transform = np.concatenate([rot, rot @ -t_mean[:, None]], -1)
poses_recentered = unpad_poses(transform @ pad_poses(poses))
transform = np.concatenate([transform, np.eye(4)[3:]], axis=0)
# Flip coordinate system if z component of y-axis is negative
if poses_recentered.mean(axis=0)[2, 1] < 0:
poses_recentered = np.diag(np.array([1, -1, -1])) @ poses_recentered
transform = np.diag(np.array([1, -1, -1, 1])) @ transform
# Just make sure it's it in the [-1, 1]^3 cube
scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3]))
poses_recentered[:, :3, 3] *= scale_factor
transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform
return poses_recentered, transform
def transform_poses_focus(poses):
"""Transforms poses so that the "focus point" of capture is at the origin.
Args:
poses: a (N, 3, 4) array containing the cameras' camera to world transforms.
Returns:
A tuple (poses, transform), with the transformed poses and the applied
camera_to_world transforms.
"""
# Move the focus point to the origin.
focus_point = focus_point_fn(poses)
# Use average up vector as the Z axis.
swap_y_z = np.array([
[1, 0, 0],
[0, 0, 1],
[0, -1, 0.0],
])
rot = average_pose(poses, lock_up=True)[:3, :3] @ swap_y_z
transform = np.concatenate([rot.T, rot.T @ -focus_point[:, None]], -1)
poses_recentered = transform @ pad_poses(poses)
transform = np.concatenate([transform, np.eye(4)[3:]], axis=0)
# Just make sure it's it in the [-1, 1]^3 cube
scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3]))
poses_recentered[:, :3, 3] *= scale_factor
transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform
return poses_recentered, transform
def generate_ellipse_path(
poses,
n_frames = 120,
const_speed = True,
z_variation = 0.0,
z_phase = 0.0,
rad_mult_min = 1.0,
rad_mult_max = 1.0,
render_rotate_xaxis = 0.0,
render_rotate_yaxis = 0.0,
use_avg_z_height = False,
z_height_percentile = None,
lock_up = False,
):
"""Generate an elliptical render path based on the given poses."""
# Calculate the focal point for the path (cameras point toward this).
center = focus_point_fn(poses)
# Default path height sits at z=0 (in middle of zero-mean capture pattern).
xy_offset = center[:2]
# Calculate lengths for ellipse axes based on input camera positions.
xy_radii = np.percentile(np.abs(poses[:, :2, 3] - xy_offset), 90, axis=0)
# Use ellipse that is symmetric about the focal point in xy.
xy_low = xy_offset - xy_radii
xy_high = xy_offset + xy_radii
# Optional height variation, need not be symmetric.
z_min = np.percentile((poses[:, 2, 3]), 10, axis=0)
z_max = np.percentile((poses[:, 2, 3]), 90, axis=0)
if use_avg_z_height or z_height_percentile is not None:
# Center the path vertically around the average camera height, good for
# datasets recentered by transform_poses_focus function.
if z_height_percentile is None:
z_init = poses[:, 2, 3].mean(axis=0)
else:
z_init = np.percentile(poses[:, 2, 3], z_height_percentile, axis=0)
else:
# Center the path at zero, good for datasets recentered by
# transform_poses_pca function.
z_init = 0
z_low = z_init + z_variation * (z_min - z_init)
z_high = z_init + z_variation * (z_max - z_init)
xyz_low = np.array([*xy_low, z_low])
xyz_high = np.array([*xy_high, z_high])
def get_positions(theta):
# Interpolate between bounds with trig functions to get ellipse in x-y.
# Optionally also interpolate in z to change camera height along path.
t_x = np.cos(theta) * 0.5 + 0.5
t_y = np.sin(theta) * 0.5 + 0.5
t_z = np.cos(theta + 2 * np.pi * z_phase) * 0.5 + 0.5
t_xyz = np.stack([t_x, t_y, t_z], axis=-1)
positions = xyz_low + t_xyz * (xyz_high - xyz_low)
# Interpolate between min and max radius multipliers so the camera zooms in
# and out of the scene center.
t = np.sin(theta) * 0.5 + 0.5
rad_mult = rad_mult_min + (rad_mult_max - rad_mult_min) * t
positions = center + (positions - center) * rad_mult[:, None]
return positions
theta = np.linspace(0, 2.0 * np.pi, n_frames + 1, endpoint=True)
positions = get_positions(theta)
if const_speed:
# Resample theta angles so that the velocity is closer to constant.
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
theta = stepfun.sample(None, theta, np.log(lengths), n_frames + 1)
positions = get_positions(theta)
# Throw away duplicated last position.
positions = positions[:-1]
# Set path's up vector to axis closest to average of input pose up vectors.
avg_up = poses[:, :3, 1].mean(0)
avg_up = avg_up / np.linalg.norm(avg_up)
ind_up = np.argmax(np.abs(avg_up))
up = np.eye(3)[ind_up] * np.sign(avg_up[ind_up])
poses = np.stack([viewmatrix(p - center, up, p, lock_up) for p in positions])
poses = poses @ rotation_about_axis(-render_rotate_yaxis, axis=1)
poses = poses @ rotation_about_axis(render_rotate_xaxis, axis=0)
return poses
def generate_interpolated_path(
poses,
n_interp,
spline_degree = 5,
smoothness = 0.03,
rot_weight = 0.1,
lock_up = False,
fixed_up_vector = None,
lookahead_i = None,
frames_per_colmap = None,
const_speed = False,
n_buffer = None,
periodic = False,
n_interp_as_total = False,
):
"""Creates a smooth spline path between input keyframe camera poses.
Spline is calculated with poses in format (position, lookat-point, up-point).
Args:
poses: (n, 3, 4) array of input pose keyframes.
n_interp: returned path will have n_interp * (n - 1) total poses.
spline_degree: polynomial degree of B-spline.
smoothness: parameter for spline smoothing, 0 forces exact interpolation.
rot_weight: relative weighting of rotation/translation in spline solve.
lock_up: if True, forced to use given Up and allow Lookat to vary.
fixed_up_vector: replace the interpolated `up` with a fixed vector.
lookahead_i: force the look direction to look at the pose `i` frames ahead.
frames_per_colmap: conversion factor for the desired average velocity.
const_speed: renormalize spline to have constant delta between each pose.
n_buffer: Number of buffer frames to insert at the start and end of the
path. Helps keep the ends of a spline path straight.
periodic: make the spline path periodic (perfect loop).
n_interp_as_total: use n_interp as total number of poses in path rather than
the number of poses to interpolate between each input.
Returns:
Array of new camera poses with shape (n_interp * (n - 1), 3, 4), or
(n_interp, 3, 4) if n_interp_as_total is set.
"""
def poses_to_points(poses, dist):
"""Converts from pose matrices to (position, lookat, up) format."""
pos = poses[:, :3, -1]
lookat = poses[:, :3, -1] - dist * poses[:, :3, 2]
up = poses[:, :3, -1] + dist * poses[:, :3, 1]
return np.stack([pos, lookat, up], 1)
def points_to_poses(points):
"""Converts from (position, lookat, up) format to pose matrices."""
poses = []
for i in range(len(points)):
pos, lookat_point, up_point = points[i]
if lookahead_i is not None:
if i + lookahead_i < len(points):
lookat = pos - points[i + lookahead_i][0]
else:
lookat = pos - lookat_point
up = (up_point - pos) if fixed_up_vector is None else fixed_up_vector
poses.append(viewmatrix(lookat, up, pos, lock_up=lock_up))
return np.array(poses)
def insert_buffer_poses(poses, n_buffer):
"""Insert extra poses at the start and end of the path."""
def average_distance(points):
distances = np.linalg.norm(points[1:] - points[0:-1], axis=-1)
return np.mean(distances)
def shift(pose, dz):
result = np.copy(pose)
z = result[:3, 2]
z /= np.linalg.norm(z)
# Move along forward-backward axis. -z is forward.
result[:3, 3] += z * dz
return result
dz = average_distance(poses[:, :3, 3])
prefix = np.stack([shift(poses[0], (i + 1) * dz) for i in range(n_buffer)])
prefix = prefix[::-1] # reverse order
suffix = np.stack(
[shift(poses[-1], -(i + 1) * dz) for i in range(n_buffer)]
)
result = np.concatenate([prefix, poses, suffix])
return result
def remove_buffer_poses(poses, u, n_frames, u_keyframes, n_buffer):
u_keyframes = u_keyframes[n_buffer:-n_buffer]
mask = (u >= u_keyframes[0]) & (u <= u_keyframes[-1])
poses = poses[mask]
u = u[mask]
n_frames = len(poses)
return poses, u, n_frames, u_keyframes
def interp(points, u, k, s):
"""Runs multidimensional B-spline interpolation on the input points."""
sh = points.shape
pts = np.reshape(points, (sh[0], -1))
k = min(k, sh[0] - 1)
tck, u_keyframes = scipy.interpolate.splprep(pts.T, k=k, s=s, per=periodic)
new_points = np.array(scipy.interpolate.splev(u, tck))
new_points = np.reshape(new_points.T, (len(u), sh[1], sh[2]))
return new_points, u_keyframes
if n_buffer is not None:
poses = insert_buffer_poses(poses, n_buffer)
points = poses_to_points(poses, dist=rot_weight)
if n_interp_as_total:
n_frames = n_interp + 1 # Add extra since final pose is discarded.
else:
n_frames = n_interp * (points.shape[0] - 1)
u = np.linspace(0, 1, n_frames, endpoint=True)
new_points, u_keyframes = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
if n_buffer is not None:
poses, u, n_frames, u_keyframes = remove_buffer_poses(
poses, u, n_frames, u_keyframes, n_buffer
)
if frames_per_colmap is not None:
# Recalculate the number of frames to achieve desired average velocity.
positions = poses[:, :3, -1]
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
total_length_colmap = lengths.sum()
print('old n_frames:', n_frames)
print('total_length_colmap:', total_length_colmap)
n_frames = int(total_length_colmap * frames_per_colmap)
print('new n_frames:', n_frames)
u = np.linspace(
np.min(u_keyframes), np.max(u_keyframes), n_frames, endpoint=True
)
new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
if const_speed:
# Resample timesteps so that the velocity is nearly constant.
positions = poses[:, :3, -1]
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
u = stepfun.sample(None, u, np.log(lengths), n_frames + 1)
new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
return poses[:-1], u[:-1], u_keyframes
def safe_interpolate_1d(
x,
spline_degree,
smoothness,
t_input,
t_output,
):
"""Interpolate 1d signal x (defined at t_input and queried at t_output)."""
# TODO(bmild): switch interpolation t values to match those chosen for path.
# One needs at least n=k+1 points to fit a polynomial of degree k to n points.
n = len(x)
spline_degree = min(spline_degree, n - 1)
if spline_degree > 0:
tck = scipy.interpolate.splrep(t_input, x, s=smoothness, k=spline_degree)
return scipy.interpolate.splev(t_output, tck).astype(x.dtype)
else: # n = 0 or 1
fill_value = x[0] if n else 0.0
return np.full(t_output.shape, fill_value, dtype=x.dtype)
def identify_file_names(dir_or_text_file):
"""Load filenames from text file or directory."""
if utils.isdir(dir_or_text_file):
# If `dir_or_text_file` is a directory, grab the filenames.
subset_names = sorted(utils.listdir(dir_or_text_file))
else:
# If `dir_or_text_file` is a text file, treat each line as a filename.
with utils.open_file(dir_or_text_file, 'r') as fp:
names = fp.read()
if isinstance(names, bytes):
names = names.decode('utf-8')
# Decode bytes into string and split into lines.
subset_names = names.splitlines()
return subset_names
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
def get_meters_per_colmap_from_calibration_images(
config, poses, image_names
):
"""Uses calibration images to get how many meters is a single COLMAP unit."""
# By default, the input camera poses are scaled to fit in the [-1, 1]^3 cube.
# This default value implies a scaling of 2 / .25 = 8 meters between the
# farthest apart camera poses.
meters_per_colmap = 8.0
if config.render_calibration_keyframes is not None:
# Use provided calibration keyframes to determine metric world scale.
calib_names = identify_file_names(config.render_calibration_keyframes)
indices = []
for i in range(0, len(calib_names), 2):
# Grab pairs of calibration images filenames.
name0, name1 = calib_names[i : i + 2]
# Check if both are in the set of colmap-posed images.
if name0 in image_names and name1 in image_names:
indices.append((image_names.index(name0), image_names.index(name1)))
if indices:
# Extract colmap-space positions from the camera pose matrices.
positions = poses[indices][Ellipsis, :3, -1]
# Every pair of calibration keyframes should have world space distance
# `render_calibration_distance` according to the capture handbook.
colmap_lengths = np.linalg.norm(
positions[:, 0] - positions[:, 1], axis=-1
)
colmap_length = colmap_lengths.mean(axis=0)
# Ratio of world distance to colmap distance.
meters_per_colmap = config.render_calibration_distance / colmap_length
print('colmap lengths', colmap_lengths)
print('avg', colmap_length)
print('meters_per_colmap', meters_per_colmap)
return meters_per_colmap
def calibrate_spline_speed(
config, poses, image_names
):
"""Uses input config to determine a conversion factor for the spline speed."""
if config.render_spline_meters_per_sec is None:
return None
meters_per_colmap = get_meters_per_colmap_from_calibration_images(
config, poses, image_names
)
meters_per_sec = config.render_spline_meters_per_sec
frames_per_sec = config.render_video_fps
frames_per_colmap = meters_per_colmap / meters_per_sec * frames_per_sec
print('returning frames_per_colmap', frames_per_colmap)
return frames_per_colmap
def create_render_spline_path(
config,
image_names,
poses,
exposures,
):
"""Creates spline interpolation render path from subset of dataset poses.
Args:
config: configs.Config object.
image_names: a list of image filenames.
poses: [N, 3, 4] array of extrinsic camera pose matrices.
exposures: optional list of floating point exposure values.
Returns:
spline_indices: list of indices used to select spline keyframe poses.
render_poses: array of interpolated extrinsic camera poses for the path.
render_exposures: optional list of interpolated exposures for the path.
"""
def remove_outlier_spline_indices(
spline_indices, poses, q_max, q_mult
):
"""Identify spline indices correspond to inlier poses."""
poses = poses[spline_indices]
points = poses[:, :3, -1]
distances = np.linalg.norm(points[1:] - points[:-1], axis=-1)
mask = distances < q_mult * np.quantile(distances, q_max)
mask = np.concatenate([mask, [True]], axis=0) # Keep the last pose.
num_inliers = int(np.sum(mask))
num_total = len(spline_indices)
print(
f'remove_outlier_spline_indices: {num_inliers}/{num_total} spline '
'path poses remaining after outlier removal.'
)
return spline_indices[mask]
# Grab poses corresponding to the image filenames.
spline_indices = identify_file_indices(
config.render_spline_keyframes, image_names
)
if (
config.render_spline_outlier_keyframe_quantile is not None
and config.render_spline_outlier_keyframe_multiplier is not None
):
spline_indices = remove_outlier_spline_indices(
spline_indices,
poses,
q_max=config.render_spline_outlier_keyframe_quantile,
q_mult=config.render_spline_outlier_keyframe_multiplier,
)
keyframes = poses[spline_indices]
frames_per_colmap = calibrate_spline_speed(config, poses, image_names)
if config.render_spline_fixed_up:
# Fix path to use world-space "up" vector instead of "banking" with spline.
all_up_vectors = poses[:, :3, 1] # second column of pose matrix is up.
fixed_up_vector = normalize(all_up_vectors.mean(axis=0))
else:
fixed_up_vector = None
render_poses, frame_timesteps, keyframe_timesteps = (
generate_interpolated_path(
keyframes,
n_interp=config.render_spline_n_interp,
spline_degree=config.render_spline_degree,
smoothness=config.render_spline_smoothness,
rot_weight=config.render_spline_rot_weight,
lock_up=config.render_spline_lock_up,
fixed_up_vector=fixed_up_vector,
lookahead_i=config.render_spline_lookahead_i,
frames_per_colmap=frames_per_colmap,
const_speed=config.render_spline_const_speed,
n_buffer=config.render_spline_n_buffer,
)
)
if config.render_spline_interpolate_exposure:
if exposures is None:
raise ValueError(
'config.render_spline_interpolate_exposure is True but '
'create_render_spline_path() was passed exposures=None.'
)
# Interpolate per-frame exposure value.
log_exposure = np.log(exposures[spline_indices])
# Use aggressive smoothing for exposure interpolation to avoid flickering.
log_exposure_interp = safe_interpolate_1d(
log_exposure,
spline_degree=5,
smoothness=config.render_spline_interpolate_exposure_smoothness,
t_input=keyframe_timesteps,
t_output=frame_timesteps,
)
render_exposures = np.exp(log_exposure_interp)
else:
render_exposures = None
return spline_indices, render_poses, render_exposures
def intrinsic_matrix(
fx,
fy,
cx,
cy,
xnp = np,
):
"""Intrinsic matrix for a pinhole camera in OpenCV coordinate system."""
return xnp.array([
[fx, 0, cx],
[0, fy, cy],
[0, 0, 1.0],
])
def get_pixtocam(
focal,
width,
height,
xnp = np,
):
"""Inverse intrinsic matrix for a perfect pinhole camera."""
camtopix = intrinsic_matrix(focal, focal, width * 0.5, height * 0.5, xnp)
return xnp.linalg.inv(camtopix)
def pixel_coordinates(
width, height, xnp = np
):
"""Tuple of the x and y integer coordinates for a grid of pixels."""
return xnp.meshgrid(xnp.arange(width), xnp.arange(height), indexing='xy')
def _radial_and_tangential_distort(
x,
y,
k1 = 0,
k2 = 0,
k3 = 0,
k4 = 0,
p1 = 0,
p2 = 0,
):
"""Computes the distorted pixel positions."""
r2 = x * x + y * y
radial_distortion = r2 * (k1 + r2 * (k2 + r2 * (k3 + r2 * k4)))
dx_radial = x * radial_distortion
dy_radial = y * radial_distortion
dx_tangential = 2 * p1 * x * y + p2 * (r2 + 2 * x * x)
dy_tangential = 2 * p2 * x * y + p1 * (r2 + 2 * y * y)
return x + dx_radial + dx_tangential, y + dy_radial + dy_tangential
def _compute_residual_and_jacobian(
x,
y,
xd,
yd,
k1 = 0.0,
k2 = 0.0,
k3 = 0.0,
k4 = 0.0,
p1 = 0.0,
p2 = 0.0,
):
"""Auxiliary function of radial_and_tangential_undistort()."""
# Adapted from https://github.com/google/nerfies/blob/main/nerfies/camera.py
# let r(x, y) = x^2 + y^2;
# d(x, y) = 1 + k1 * r(x, y) + k2 * r(x, y) ^2 + k3 * r(x, y)^3 +
# k4 * r(x, y)^4;
r = x * x + y * y
d = 1.0 + r * (k1 + r * (k2 + r * (k3 + r * k4)))
# The perfect projection is:
# xd = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2);
# yd = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2);
#
# Let's define
#
# fx(x, y) = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2) - xd;
# fy(x, y) = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2) - yd;
#
# We are looking for a solution that satisfies
# fx(x, y) = fy(x, y) = 0;
fx = d * x + 2 * p1 * x * y + p2 * (r + 2 * x * x) - xd
fy = d * y + 2 * p2 * x * y + p1 * (r + 2 * y * y) - yd
# Compute derivative of d over [x, y]
d_r = k1 + r * (2.0 * k2 + r * (3.0 * k3 + r * 4.0 * k4))
d_x = 2.0 * x * d_r
d_y = 2.0 * y * d_r
# Compute derivative of fx over x and y.
fx_x = d + d_x * x + 2.0 * p1 * y + 6.0 * p2 * x
fx_y = d_y * x + 2.0 * p1 * x + 2.0 * p2 * y
# Compute derivative of fy over x and y.
fy_x = d_x * y + 2.0 * p2 * y + 2.0 * p1 * x
fy_y = d + d_y * y + 2.0 * p2 * x + 6.0 * p1 * y
return fx, fy, fx_x, fx_y, fy_x, fy_y
def _radial_and_tangential_undistort(
xd,
yd,
k1 = 0,
k2 = 0,
k3 = 0,
k4 = 0,
p1 = 0,
p2 = 0,
eps = 1e-9,
max_iterations=10,
xnp = np,
):
"""Computes undistorted (x, y) from (xd, yd)."""
# From https://github.com/google/nerfies/blob/main/nerfies/camera.py
# Initialize from the distorted point.
x = xnp.copy(xd)
y = xnp.copy(yd)
for _ in range(max_iterations):
fx, fy, fx_x, fx_y, fy_x, fy_y = _compute_residual_and_jacobian(
x=x, y=y, xd=xd, yd=yd, k1=k1, k2=k2, k3=k3, k4=k4, p1=p1, p2=p2
)
denominator = fy_x * fx_y - fx_x * fy_y
x_numerator = fx * fy_y - fy * fx_y
y_numerator = fy * fx_x - fx * fy_x
step_x = xnp.where(
xnp.abs(denominator) > eps,
x_numerator / denominator,
xnp.zeros_like(denominator),
)
step_y = xnp.where(
xnp.abs(denominator) > eps,
y_numerator / denominator,
xnp.zeros_like(denominator),
)
x = x + step_x
y = y + step_y
return x, y
class ProjectionType(enum.Enum):
"""Camera projection type (perspective pinhole, fisheye, or 360 pano)."""
PERSPECTIVE = 'perspective'
FISHEYE = 'fisheye'
PANORAMIC = 'pano'
def pixels_to_rays(
pix_x_int,
pix_y_int,
pixtocams,
camtoworlds,
distortion_params = None,
pixtocam_ndc = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Calculates rays given pixel coordinates, intrinisics, and extrinsics.
Given 2D pixel coordinates pix_x_int, pix_y_int for cameras with
inverse intrinsics pixtocams and extrinsics camtoworlds (and optional
distortion coefficients distortion_params and NDC space projection matrix
pixtocam_ndc), computes the corresponding 3D camera rays.
Vectorized over the leading dimensions of the first four arguments.
Args:
pix_x_int: int array, shape SH, x coordinates of image pixels.
pix_y_int: int array, shape SH, y coordinates of image pixels.
pixtocams: float array, broadcastable to SH + [3, 3], inverse intrinsics.
camtoworlds: float array, broadcastable to SH + [3, 4], camera extrinsics.
distortion_params: dict of floats, optional camera distortion parameters.
pixtocam_ndc: float array, [3, 3], optional inverse intrinsics for NDC.
camtype: camera_utils.ProjectionType, fisheye or perspective camera.
xnp: either numpy or jax.numpy.
Returns:
origins: float array, shape SH + [3], ray origin points.
directions: float array, shape SH + [3], ray direction vectors.
viewdirs: float array, shape SH + [3], normalized ray direction vectors.
radii: float array, shape SH + [1], ray differential radii.
imageplane: float array, shape SH + [2], xy coordinates on the image plane.
If the image plane is at world space distance 1 from the pinhole, then
imageplane will be the xy coordinates of a pixel in that space (so the
camera ray direction at the origin would be (x, y, -1) in OpenGL coords).
"""
# Must add half pixel offset to shoot rays through pixel centers.
def pix_to_dir(x, y):
return xnp.stack([x + 0.5, y + 0.5, xnp.ones_like(x)], axis=-1)
# We need the dx and dy rays to calculate ray radii for mip-NeRF cones.
pixel_dirs_stacked = xnp.stack(
[
pix_to_dir(pix_x_int, pix_y_int),
pix_to_dir(pix_x_int + 1, pix_y_int),
pix_to_dir(pix_x_int, pix_y_int + 1),
],
axis=0,
)
# For jax, need to specify high-precision matmul.
matmul = math.matmul if xnp == jnp else xnp.matmul
mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0]
# Apply inverse intrinsic matrices.
camera_dirs_stacked = mat_vec_mul(pixtocams, pixel_dirs_stacked)
if distortion_params is not None:
# Correct for distortion.
x, y = _radial_and_tangential_undistort(
camera_dirs_stacked[Ellipsis, 0],
camera_dirs_stacked[Ellipsis, 1],
**distortion_params,
xnp=xnp,
)
camera_dirs_stacked = xnp.stack([x, y, xnp.ones_like(x)], -1)
if camtype == ProjectionType.FISHEYE:
theta = xnp.sqrt(xnp.sum(xnp.square(camera_dirs_stacked[Ellipsis, :2]), axis=-1))
theta = xnp.minimum(xnp.pi, theta)
sin_theta_over_theta = xnp.sin(theta) / theta
camera_dirs_stacked = xnp.stack(
[
camera_dirs_stacked[Ellipsis, 0] * sin_theta_over_theta,
camera_dirs_stacked[Ellipsis, 1] * sin_theta_over_theta,
xnp.cos(theta),
],
axis=-1,
)
elif camtype == ProjectionType.PANORAMIC:
theta = camera_dirs_stacked[Ellipsis, 0]
phi = camera_dirs_stacked[Ellipsis, 1]
# Negation on y and z components accounts for expected OpenCV convention.
camera_dirs_stacked = xnp.stack(
[
-xnp.sin(phi) * xnp.sin(theta),
-xnp.cos(phi),
-xnp.sin(phi) * xnp.cos(theta),
],
axis=-1,
)
# Flip from OpenCV to OpenGL coordinate system.
camera_dirs_stacked = matmul(
camera_dirs_stacked, xnp.diag(xnp.array([1.0, -1.0, -1.0]))
)
# Extract 2D image plane (x, y) coordinates.
imageplane = camera_dirs_stacked[0, Ellipsis, :2]
# Apply camera rotation matrices.
directions_stacked = mat_vec_mul(
camtoworlds[Ellipsis, :3, :3], camera_dirs_stacked
)
# Extract the offset rays.
directions, dx, dy = directions_stacked
origins = xnp.broadcast_to(camtoworlds[Ellipsis, :3, -1], directions.shape)
viewdirs = directions / xnp.linalg.norm(directions, axis=-1, keepdims=True)
if pixtocam_ndc is None:
# Distance from each unit-norm direction vector to its neighbors.
dx_norm = xnp.linalg.norm(dx - directions, axis=-1)
dy_norm = xnp.linalg.norm(dy - directions, axis=-1)
else:
# Convert ray origins and directions into projective NDC space.
ndc_fn = functools.partial(convert_to_ndc, pixtocam=pixtocam_ndc, xnp=xnp)
origins_dx, _ = ndc_fn(origins, dx)
origins_dy, _ = ndc_fn(origins, dy)
origins, directions = ndc_fn(origins, directions)
# In NDC space, we use the offset between origins instead of directions.
dx_norm = xnp.linalg.norm(origins_dx - origins, axis=-1)
dy_norm = xnp.linalg.norm(origins_dy - origins, axis=-1)
# Cut the distance in half, multiply it to match the variance of a uniform
# distribution the size of a pixel (1/12, see paper).
# TODO(barron): Add a unit test that this is correct.
radii = (0.5 * (dx_norm + dy_norm))[Ellipsis, None] * 2 / xnp.sqrt(12)
return origins, directions, viewdirs, radii, imageplane
def points_to_pixels(
points,
pixtocams,
camtoworlds,
distortion_params = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Calculates pixel coordinates given 3D points, intrinisics, and extrinsics.
Given 3D point coordinates points and cameras with inverse intrinsics
pixtocams and extrinsics camtoworlds (and optional distortion coefficients
distortion_params), computes the corresponding 2D pixel coordinates.
Vectorized over the leading dimensions of the first four arguments.
Args:
points: float array, [..., 3], 3D coordinates of points to project.
pixtocams: float array, [..., 3, 3], inverse intrinsics.
camtoworlds: float array, [..., 3, 4], camera extrinsics.
distortion_params: dict of floats or float arrays [...], optional camera
distortion parameters.
camtype: camera_utils.ProjectionType, type of camera model.
xnp: either numpy (host compute) or jax.numpy (device compute).
Returns:
coordinates: float array, [..., 2], pixel coordinates.
depth: float array, [...], per-point orthographic depth.
"""
if camtype != ProjectionType.PERSPECTIVE:
raise ValueError(f'points_to_pixels only supports perspective projection, '
f'not {camtype} mode.')
# For jax, need to specify high-precision matmul.
matmul = math.matmul if xnp == jnp else xnp.matmul
mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0]
rotation = camtoworlds[Ellipsis, :3, :3]
rotation_inv = xnp.swapaxes(rotation, -1, -2)
translation = camtoworlds[Ellipsis, :3, -1]
# Points (directions) in the camera coordinate frame.
points_camera = mat_vec_mul(rotation_inv, points - translation)
# Projection to image plane by dividing out -z.
depth = -points_camera[Ellipsis, -1]
camera_dirs = points_camera / depth[Ellipsis, None]
# OpenGL to OpenCV coordinates.
camera_dirs = matmul(camera_dirs, xnp.diag(xnp.array([1.0, -1.0, -1.0])))
if distortion_params is not None:
# Correct for distortion.
x, y = _radial_and_tangential_distort(
camera_dirs[Ellipsis, 0],
camera_dirs[Ellipsis, 1],
**distortion_params,
)
camera_dirs = xnp.stack([x, y, xnp.ones_like(x)], -1)
# Apply intrinsics matrix.
pixel_dirs = mat_vec_mul(xnp.linalg.inv(pixtocams), camera_dirs)
# Remove half pixel offset.
coordinates = pixel_dirs[Ellipsis, :2] - xnp.array([0.5, 0.5])
return coordinates, depth
def rays_planes_intersection(
z_min,
z_max,
origins,
directions,
xnp = np,
):
"""Crops rays to a range of z values.
This is useful for situations where the scene lies within a range of
altitudes, but the cameras are very far away, as with aerial data.
Args:
z_min: float z value of the lower cropping plane.
z_max: float z value of the upper cropping plane.
origins: ray origins points.
directions: ray direction vectors.
xnp: either numpy or jax.numpy.
Returns:
t_min: parametric location of the cropped ray origins
t_max: parametric location of the ends of the cropped rays
"""
t1 = (z_min - origins[Ellipsis, 2]) / directions[Ellipsis, 2]
t2 = (z_max - origins[Ellipsis, 2]) / directions[Ellipsis, 2]
t_min = xnp.maximum(0, xnp.minimum(t1, t2))
t_max = xnp.maximum(t1, t2)
return t_min, t_max
def _intersect_ranges(
r1,
r2,
xnp = np,
):
start = xnp.maximum(r1[0], r2[0])
end = xnp.minimum(r1[1], r2[1])
return (start, end)
def ray_box_intersection(
ray_o, ray_d, corners, xnp = np
):
"""Returns enter/exit distances along the ray for box defined by `corners`."""
t1 = (corners[0] - ray_o) / ray_d
t2 = (corners[1] - ray_o) / ray_d
t_min = xnp.minimum(t1, t2).max(axis=-1)
t_max = xnp.maximum(t1, t2).min(axis=-1)
return t_min, t_max
def modify_rays_with_bbox(
rays, corners, xnp = np
):
"""Sets near/far by bbox intersection and multiplies lossmult by mask."""
lossmult = rays.lossmult
near = rays.near
far = rays.far
t_min, t_max = ray_box_intersection(
rays.origins, rays.directions, corners, xnp=xnp
)
t_min, t_max = t_min[Ellipsis, None], t_max[Ellipsis, None]
hits = t_min <= t_max
inear, ifar = _intersect_ranges((near, far), (t_min, t_max), xnp=xnp)
overlaps = inear <= ifar
valid = hits * overlaps
if lossmult is None:
lossmult = valid.astype(xnp.float32)
else:
lossmult = xnp.where(valid, lossmult, 0.0)
near = xnp.where(valid, inear, 0.0)
far = xnp.where(valid, ifar, 0.0)
return rays.replace(lossmult=lossmult, near=near, far=far)
def ray_sphere_intersection(
ray_o,
ray_d,
center,
radius,
xnp = np,
):
"""Calculates distance to hit a sphere for a ray.
Args:
ray_o: Ray origin (..., 3)
ray_d: Ray direction (..., 3)
center: Sphere center (..., 3)
radius: Sphere radius (..., 1)
xnp: Numpy or Jax module
Returns:
t_min, t_max, hit. When no hit is found, t_min = t_max = 0.
"""
oc = ray_o - center
a = (ray_d**2).sum(axis=-1)
b = 2 * (oc * ray_d).sum(axis=-1)
c = (oc * oc).sum(axis=-1) - radius**2
det = b**2 - 4.0 * a * c
hit = (det >= 0) * (a > 0)
# Nb: Results are 'wrong' if valid = false, this is just to make jax
# not freak out.
det = xnp.where(hit, det, 0.0)
a = xnp.where(hit, a, 1.0)
t_min = xnp.where(hit, (-b - xnp.sqrt(det)) / (2.0 * a), 0.0)
t_max = xnp.where(hit, (-b + xnp.sqrt(det)) / (2.0 * a), 0.0)
return t_min, t_max, hit
def gather_cameras(cameras, cam_idx, xnp=np):
"""Gathers relevant camera parameters for each ray."""
pixtocams, camtoworlds, distortion_params = cameras[:3]
if pixtocams.ndim > 2:
pixtocams_idx = pixtocams[cam_idx]
else:
pixtocams_idx = pixtocams
if camtoworlds.ndim > 2:
camtoworlds_idx = camtoworlds[cam_idx]
else:
camtoworlds_idx = camtoworlds
if distortion_params is not None:
distortion_params_idx = {}
for k, v in distortion_params.items(): # pytype: disable=attribute-error # jax-ndarray
if not xnp.isscalar(v):
distortion_params_idx[k] = v[cam_idx]
else:
distortion_params_idx[k] = v
else:
distortion_params_idx = None
return (
pixtocams_idx,
camtoworlds_idx,
distortion_params_idx,
)
def cast_ray_batch(
cameras,
rays,
camtype = ProjectionType.PERSPECTIVE,
scene_bbox = None,
xnp = np,
):
"""Maps from input cameras and uncast Rays batch to output cast Rays batch.
`cameras` is a Tuple of five sets of camera parameters.
pixtocams: 1 or N stacked [3, 3] inverse intrinsic matrices.
camtoworlds: 1 or N stacked [3, 4] extrinsic pose matrices.
distortion_params: optional, dict[str, float] containing pinhole model
distortion parameters.
pixtocam_ndc: optional, [3, 3] inverse intrinsic matrix for mapping to NDC.
z_range: optional range of Z values
Args:
cameras: described above.
rays: ray data including integer pixel coordinates and camera indices.
These fields can be an arbitrary batch shape.
camtype: camera_utils.ProjectionType, fisheye or perspective camera.
scene_bbox: min and max corner of scene bounding box, if applicable.
xnp: either numpy or jax.numpy.
Returns:
rays: Rays dataclass with computed 3D world space ray data.
"""
# rays.cam_idx has shape [..., 1], remove this hanging dimension.
cam_idx = rays.cam_idx[Ellipsis, 0]
cameras_idx = gather_cameras(cameras, cam_idx, xnp=xnp)
pixtocams, camtoworlds, distortion_params = cameras_idx
pixtocam_ndc, z_range = cameras[3:5]
# Compute rays from pixel coordinates.
origins, directions, viewdirs, radii, imageplane = pixels_to_rays(
rays.pixels[Ellipsis, 0],
rays.pixels[Ellipsis, 1],
pixtocams,
camtoworlds,
distortion_params=distortion_params,
pixtocam_ndc=pixtocam_ndc,
camtype=camtype,
xnp=xnp,
)
if z_range is not None:
t_min, t_max = rays_planes_intersection(
z_range[0], z_range[1], origins, directions, xnp
)
t_min = xnp.broadcast_to(t_min[Ellipsis, None], origins.shape)
t_max = xnp.broadcast_to(t_max[Ellipsis, None], origins.shape)
hit_mask = t_max < t_min
origins = xnp.where(hit_mask, origins, origins + directions * t_min)
directions = xnp.where(hit_mask, directions, directions * (t_max - t_min))
# Preserve all metadata and add the cast rays.
rays = rays.replace(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
)
if scene_bbox is not None:
rays = modify_rays_with_bbox(rays, scene_bbox, xnp=xnp)
return rays
def cast_general_rays(
camtoworld,
pixtocam,
height,
width,
near,
far,
distortion_params = None,
pixtocam_ndc = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Wrapper for generating a general ray batch."""
pix_x_int, pix_y_int = pixel_coordinates(width, height, xnp=xnp)
ray_args = pixels_to_rays(
pix_x_int,
pix_y_int,
pixtocam,
camtoworld,
distortion_params=distortion_params,
pixtocam_ndc=pixtocam_ndc,
camtype=camtype,
xnp=xnp,
)
broadcast_scalar = lambda x: xnp.broadcast_to(x, pix_x_int.shape)[Ellipsis, None]
ray_kwargs = {
'pixels': xnp.stack([pix_x_int, pix_y_int], axis=-1),
'near': broadcast_scalar(near),
'far': broadcast_scalar(far),
'cam_idx': broadcast_scalar(0),
}
return utils.Rays(*ray_args, **ray_kwargs)
def cast_pinhole_rays(
camtoworld,
height,
width,
focal,
near,
far,
xnp = np,
):
"""Generates a pinhole camera ray batch (w/o distortion)."""
return cast_general_rays(
camtoworld,
get_pixtocam(focal, width, height, xnp=xnp),
height,
width,
near,
far,
camtype=ProjectionType.PERSPECTIVE,
xnp=xnp,
)
def cast_spherical_rays(
camtoworld,
height,
width,
near,
far,
xnp,
):
"""Generates a spherical camera ray batch."""
return cast_general_rays(
camtoworld,
xnp.diag(xnp.array([2.0 * np.pi / width, np.pi / height, 1.0])),
height,
width,
near,
far,
camtype=ProjectionType.PANORAMIC,
xnp=xnp,
)
def jax_camera_from_tuple(
camera_tuple,
image_size,
projection_type,
):
"""Converts a camera tuple into a JAX camera.
Args:
camera_tuple: A tuple containing `inv_intrinsics`, the inverse intrinsics
matrix; `extrinsics`, the camera to world matrix; and `distortion_params`,
the dictionary of distortion parameters.
image_size: An array containing the (width, height) image size.
projection_type: The projection type of the camera.
Returns:
A JAX camera class instance encoding the same camera information.
"""
if projection_type.value not in {
ProjectionType.PERSPECTIVE.value,
ProjectionType.FISHEYE.value,
}:
raise ValueError(f'Projection {projection_type} is not supported.')
inv_intrinsics, extrinsic, distortion_params = camera_tuple[:3]
intrinsics = jnp.linalg.inv(inv_intrinsics)
focal_length = intrinsics[0, 0]
principal_point = intrinsics[:2, 2]
pixel_aspect_ratio = intrinsics[1, 1] / intrinsics[0, 0]
radial_distortion = None
tangential_distortion = None
if distortion_params is not None:
if (
'k1' in distortion_params
and 'k2' in distortion_params
and 'k3' in distortion_params
):
radial_keys = ['k1', 'k2', 'k3', 'k4']
radial_distortion = jnp.array(
[distortion_params[k] for k in radial_keys if k in distortion_params]
)
if 'p1' in distortion_params and 'p2' in distortion_params:
tangential_distortion = jnp.array([
distortion_params['p1'],
distortion_params['p2'],
])
extrinsic = jnp.concatenate(
[extrinsic[:3, :4], jnp.array([[0, 0, 0, 1]])], axis=0
)
# Convert to OpenCV coordinates.
extrinsic = math.matmul(extrinsic, jnp.diag(jnp.array([1, -1, -1, 1])))
world_to_cam = jnp.linalg.inv(extrinsic)
camera = jaxcam.Camera.create(
focal_length=focal_length,
pixel_aspect_ratio=pixel_aspect_ratio,
radial_distortion=radial_distortion,
tangential_distortion=tangential_distortion,
principal_point=principal_point,
image_size=image_size,
is_fisheye=(projection_type.value == ProjectionType.FISHEYE.value),
)
camera = jaxcam.update_world_to_camera_matrix(camera, world_to_cam)
return camera
def tuple_from_jax_camera(
jax_camera,
):
"""Converts a JAX camera into a camera tuple."""
focal_x = jax_camera.focal_length
focal_y = jax_camera.focal_length * jax_camera.pixel_aspect_ratio
intrinsic = jnp.block([
[focal_x, jax_camera.skew, jax_camera.principal_point[0]],
[0, focal_y, jax_camera.principal_point[1]],
[0, 0, 1],
])
pix_to_cam = jnp.linalg.inv(intrinsic)
world_to_cam = jaxcam.world_to_camera_matrix(jax_camera)
cam_to_world = jnp.linalg.inv(world_to_cam)
# Convert back to OpenGL coordinates.
cam_to_world = math.matmul(cam_to_world, jnp.diag(jnp.array([1, -1, -1, 1])))
cam_to_world = cam_to_world[:3, :]
distortion_params = None
if jax_camera.has_distortion:
distortion_params = {}
if jax_camera.has_radial_distortion:
distortion_params.update({
'k1': jax_camera.radial_distortion[0],
'k2': jax_camera.radial_distortion[1],
'k3': jax_camera.radial_distortion[2],
'k4': jax_camera.radial_distortion[3],
})
if jax_camera.has_tangential_distortion:
distortion_params.update({
'p1': jax_camera.tangential_distortion[0],
'p2': jax_camera.tangential_distortion[1],
})
return pix_to_cam, cam_to_world, distortion_params
def rotation_distance(
rotation_mat1, rotation_mat2
):
"""Computes the angle between two rotation matrices in degrees.
Args:
rotation_mat1: (3, 3) The first batch of rotation matrix.
rotation_mat2: (3, 3) The second batch of rotation matrix.
Returns:
The angle in degrees between 0 and 180.
"""
axis_angle1 = rigid_body.log_so3(rotation_mat1)
axis_angle2 = rigid_body.log_so3(rotation_mat2)
orientation_error_deg = jnp.degrees(
jnp.linalg.norm(axis_angle1 - axis_angle2, axis=-1)
)
return jnp.where( # pytype: disable=bad-return-type # jnp-type
orientation_error_deg < 180,
orientation_error_deg,
360 - orientation_error_deg,
)
def compute_camera_metrics(
cameras_gt, cameras_pred
):
"""Computes the metrics between two cameras."""
orientation_diffs = jax.vmap(rotation_distance)(
cameras_pred.orientation, cameras_gt.orientation
)
translation_diffs = jnp.abs(cameras_pred.translation - cameras_gt.translation)
diffs = {
'focal_length': jnp.abs(
cameras_pred.focal_length - cameras_gt.focal_length
),
'position': jnp.linalg.norm(
cameras_pred.position - cameras_gt.position, axis=-1
),
'translation_x': translation_diffs[Ellipsis, 0],
'translation_y': translation_diffs[Ellipsis, 1],
'translation_z': translation_diffs[Ellipsis, 2],
'orientation': jnp.abs(orientation_diffs),
'principal_points': jnp.linalg.norm(
cameras_pred.principal_point - cameras_gt.principal_point,
axis=-1,
),
}
if cameras_pred.radial_distortion is not None:
radial_distortion_gt = jnp.zeros(4)
if cameras_gt.has_radial_distortion:
radial_distortion_gt = cameras_gt.radial_distortion
for i in range(cameras_pred.radial_distortion.shape[-1]):
diffs[f'radial_distortion_{i}'] = jnp.abs(
cameras_pred.radial_distortion[Ellipsis, i] - radial_distortion_gt[Ellipsis, i]
)
if cameras_pred.tangential_distortion is not None:
tangential_distortion_gt = jnp.zeros(2)
if cameras_gt.has_tangential_distortion:
tangential_distortion_gt = cameras_gt.radial_distortion
for i in range(cameras_pred.tangential_distortion.shape[-1]):
diffs[f'tangential_distortion_{i}'] = jnp.abs(
cameras_pred.tangential_distortion[Ellipsis, i]
- tangential_distortion_gt[Ellipsis, i]
)
return diffs
def perturb_cameras(
rng,
cameras,
sigma_look_at,
sigma_position,
sigma_focal_length = 0.0,
sigma_dolly_z = 0.0,
single_dolly = True,
dolly_use_average = False,
):
"""Randomly perturb camera positions and orientations.
For position the 3D coordinate is simply shifted according to
an offset vector. For the orientation an offset angle is calculated based
on spherical coordinates. The underlying offsets are randomly chosen using
normal distributions absed on the input sigmas.
Args:
rng: A PRNGKey.
cameras: Cameras to perturb.
sigma_look_at: Strength of look-at position offset. Higher means stronger.
sigma_position: Strength of position offset. Higher means stronger.
sigma_focal_length: Strength of focal length zoom z-axis scale. Higher means
stronger. This is essentially a percentage (0.2 means 20%).
sigma_dolly_z: Strength of Dolly zoom z-axis scale. Higher means stronger.
This is essentially a percentage (0.2 means 20%).
single_dolly: If True, only have a single perturbation for dolly zoom.
dolly_use_average: If True, set the dolly z to the average of the input
instead of perturbing.
Returns:
Perturbed cameras.
"""
# Dolly zoom.
if sigma_dolly_z > 0.0 or dolly_use_average:
# Turn out "percentage" into a log scale. This is equivalent to having
# minval = log(1+s) and maxval = log(1/(1+s)) but sampling from a normal
# distribution.
log_sigma_dolly_z = jnp.log1p(sigma_dolly_z)
rng, dolly_key = random.split(rng)
translation = cameras.translation
x, y, z = jnp.split(translation, 3, -1)
if dolly_use_average:
new_z = jnp.broadcast_to(z.mean(axis=0, keepdims=True), z.shape)
elif single_dolly:
new_z = z * jnp.exp(random.normal(dolly_key, (1,)) * log_sigma_dolly_z)
else:
new_z = z * jnp.exp(random.normal(dolly_key, z.shape) * log_sigma_dolly_z)
new_focal_length = cameras.focal_length * (new_z / z).squeeze(-1)
new_translation = jnp.concatenate([x, y, new_z], axis=-1)
new_position = jax.vmap(spin_math.matmul)(
-cameras.orientation.swapaxes(-1, -2), new_translation
)
cameras = cameras.replace(
position=new_position, focal_length=new_focal_length
)
# Perturb focal length.
rng, key = random.split(rng)
new_focal_length = cameras.focal_length * jnp.exp(
random.normal(key, cameras.shape) * jnp.log1p(sigma_focal_length)
)
cameras = cameras.replace(focal_length=new_focal_length)
camera_positions = cameras.position
up_vectors = -cameras.orientation[Ellipsis, 1, :]
# Perturb camera positions.
rng, key = random.split(rng)
perturb_dir = spin_math.normalize(random.normal(key, camera_positions.shape))
camera_positions_perturbed = np.array(
sigma_position * perturb_dir + camera_positions
)
# Perturb look-at point.
look_at_positions = jax.vmap(geometry.line_closest_point)(
cameras.position, cameras.optical_axis, jnp.zeros_like(cameras.position)
)
rng, key = random.split(rng)
perturb_dir = math.normalize(random.normal(key, camera_positions.shape))
look_at_positions_perturbed = np.array(
sigma_look_at * perturb_dir + look_at_positions
)
# Apply the look-at function.
new_cameras = []
for camera, camera_position, look_at_position, up_vector in zip(
cameras,
camera_positions_perturbed,
look_at_positions_perturbed,
up_vectors,
):
new_cameras.append(
jaxcam.look_at(
camera=camera,
eye=camera_position,
center=look_at_position,
world_up=up_vector,
)
)
cameras = jaxcam.concatenate(new_cameras)
return cameras
|
evocodebench_data_12
|
# formatting done while cleaning
def connect(prev, curr):
has_space = prev.endswith(" ")
has_hyphen = prev.endswith("-")
if has_hyphen:
result = prev[0:-1] + curr
return result
result = prev + ("" if has_space else " ") + curr
return result
def fix_mixedcase_words(word):
# if lower no uppers after
# if upper no
if len(word) < 1 or word.isupper() or word.islower():
return word
else:
# check the first two letters to see if it is just a titled word e.g. Hello
if word[0].isupper() and word[1].islower():
return word.capitalize()
else:
# e.g. return HELLO if HEllo else return hello if heLlo
return word.lower() if word[0].islower() else word.upper()
# formatting done after cleaning
|
evocodebench_data_13
|
from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
)
return values
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
|
evocodebench_data_14
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
|
evocodebench_data_15
|
import ast
import asyncio
import functools
import itertools
import logging
import os
import re
import string
from copy import deepcopy
from typing import List, Callable, Dict, Optional, Any, Collection
import pandas as pd
logger = logging.getLogger("AutoRAG")
def fetch_contents(corpus_data: pd.DataFrame, ids: List[List[str]]) -> List[List[str]]:
flat_ids = itertools.chain.from_iterable(ids)
contents = list(map(lambda x: corpus_data.loc[lambda row: row['doc_id'] == x]['contents'].values[0], flat_ids))
result = []
idx = 0
for sublist in ids:
result.append(contents[idx:idx + len(sublist)])
idx += len(sublist)
return result
def result_to_dataframe(column_names: List[str]):
"""
Decorator for converting results to pd.DataFrame.
"""
def decorator_result_to_dataframe(func: Callable):
@functools.wraps(func)
def wrapper(*args, **kwargs) -> pd.DataFrame:
results = func(*args, **kwargs)
if len(column_names) == 1:
df_input = {column_names[0]: results}
else:
df_input = {column_name: result for result, column_name in zip(results, column_names)}
result_df = pd.DataFrame(df_input)
return result_df
return wrapper
return decorator_result_to_dataframe
def load_summary_file(summary_path: str,
dict_columns: Optional[List[str]] = None) -> pd.DataFrame:
"""
Load summary file from summary_path.
:param summary_path: The path of the summary file.
:param dict_columns: The columns that are dictionary type.
You must fill this parameter if you want to load summary file properly.
Default is ['module_params'].
:return: The summary dataframe.
"""
if not os.path.exists(summary_path):
raise ValueError(f"summary.csv does not exist in {summary_path}.")
summary_df = pd.read_csv(summary_path)
if dict_columns is None:
dict_columns = ['module_params']
if any([col not in summary_df.columns for col in dict_columns]):
raise ValueError(f"{dict_columns} must be in summary_df.columns.")
def convert_dict(elem):
return ast.literal_eval(elem)
summary_df[dict_columns] = summary_df[dict_columns].applymap(convert_dict)
return summary_df
def make_combinations(target_dict: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Make combinations from target_dict.
The target_dict key value must be a string,
and the value can be list of values or single value.
If generates all combinations of values from target_dict,
which means generated dictionaries that contain only one value for each key,
and all dictionaries will be different from each other.
:param target_dict: The target dictionary.
:return: The list of generated dictionaries.
"""
dict_with_lists = dict(map(lambda x: (x[0], x[1] if isinstance(x[1], list) else [x[1]]),
target_dict.items()))
def delete_duplicate(x):
def is_hashable(obj):
try:
hash(obj)
return True
except TypeError:
return False
if any([not is_hashable(elem) for elem in x]):
# TODO: add duplication check for unhashable objects
return x
else:
return list(set(x))
dict_with_lists = dict(map(lambda x: (x[0], delete_duplicate(x[1])), dict_with_lists.items()))
combination = list(itertools.product(*dict_with_lists.values()))
combination_dicts = [dict(zip(dict_with_lists.keys(), combo)) for combo in combination]
return combination_dicts
def explode(index_values: Collection[Any], explode_values: Collection[Collection[Any]]):
"""
Explode index_values and explode_values.
The index_values and explode_values must have the same length.
It will flatten explode_values and keep index_values as a pair.
:param index_values: The index values.
:param explode_values: The exploded values.
:return: Tuple of exploded index_values and exploded explode_values.
"""
assert len(index_values) == len(explode_values), "Index values and explode values must have same length"
df = pd.DataFrame({
'index_values': index_values,
'explode_values': explode_values
})
df = df.explode('explode_values')
return df['index_values'].tolist(), df['explode_values'].tolist()
def replace_value_in_dict(target_dict: Dict, key: str,
replace_value: Any) -> Dict:
"""
Replace the value of the certain key in target_dict.
If there is not targeted key in target_dict, it will return target_dict.
:param target_dict: The target dictionary.
:param key: The key to replace.
:param replace_value: The value to replace.
:return: The replaced dictionary.
"""
replaced_dict = deepcopy(target_dict)
if key not in replaced_dict:
return replaced_dict
replaced_dict[key] = replace_value
return replaced_dict
def normalize_string(s: str) -> str:
"""
Taken from the official evaluation script for v1.1 of the SQuAD dataset.
Lower text and remove punctuation, articles and extra whitespace.
"""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def convert_string_to_tuple_in_dict(d):
"""Recursively converts strings that start with '(' and end with ')' to tuples in a dictionary."""
for key, value in d.items():
# If the value is a dictionary, recurse
if isinstance(value, dict):
convert_string_to_tuple_in_dict(value)
# If the value is a list, iterate through its elements
elif isinstance(value, list):
for i, item in enumerate(value):
# If an item in the list is a dictionary, recurse
if isinstance(item, dict):
convert_string_to_tuple_in_dict(item)
# If an item in the list is a string matching the criteria, convert it to a tuple
elif isinstance(item, str) and item.startswith('(') and item.endswith(')'):
value[i] = ast.literal_eval(item)
# If the value is a string matching the criteria, convert it to a tuple
elif isinstance(value, str) and value.startswith('(') and value.endswith(')'):
d[key] = ast.literal_eval(value)
return d
def convert_env_in_dict(d: Dict):
"""
Recursively converts environment variable string in a dictionary to actual environment variable.
:param d: The dictionary to convert.
:return: The converted dictionary.
"""
env_pattern = re.compile(r".*?\${(.*?)}.*?")
def convert_env(val: str):
matches = env_pattern.findall(val)
for match in matches:
val = val.replace(f"${{{match}}}", os.environ.get(match, ""))
return val
for key, value in d.items():
if isinstance(value, dict):
convert_env_in_dict(value)
elif isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, dict):
convert_env_in_dict(item)
elif isinstance(item, str):
value[i] = convert_env(item)
elif isinstance(value, str):
d[key] = convert_env(value)
return d
async def process_batch(tasks, batch_size: int = 64) -> List[Any]:
"""
Processes tasks in batches asynchronously.
:param tasks: A list of no-argument functions or coroutines to be executed.
:param batch_size: The number of tasks to process in a single batch.
Default is 64.
:return: A list of results from the processed tasks.
"""
results = []
for i in range(0, len(tasks), batch_size):
batch = tasks[i:i + batch_size]
batch_results = await asyncio.gather(*batch)
results.extend(batch_results)
return results
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- -