id
stringlengths 19
21
| content
stringlengths 722
86.7k
|
|---|---|
evocodebench_data_1
|
"""
Common utilities.
"""
from asyncio import AbstractEventLoop
import json
import logging
import logging.handlers
import os
import platform
import sys
from typing import AsyncGenerator, Generator
import warnings
import requests
from chat.constants import LOGDIR
handler = None
visited_loggers = set()
def build_logger(logger_name, logger_filename):
global handler
formatter = logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# Set the format of root handlers
if not logging.getLogger().handlers:
if sys.version_info[1] >= 9:
# This is for windows
logging.basicConfig(level=logging.INFO, encoding="utf-8")
else:
if platform.system() == "Windows":
warnings.warn(
"If you are running on Windows, "
"we recommend you use Python >= 3.9 for UTF-8 encoding."
)
logging.basicConfig(level=logging.INFO)
logging.getLogger().handlers[0].setFormatter(formatter)
# Redirect stdout and stderr to loggers
stdout_logger = logging.getLogger("stdout")
stdout_logger.setLevel(logging.INFO)
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger("stderr")
stderr_logger.setLevel(logging.ERROR)
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
# Get logger
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
os.makedirs(LOGDIR, exist_ok=True)
filename = os.path.join(LOGDIR, logger_filename)
handler = logging.handlers.TimedRotatingFileHandler(
filename, when="D", utc=True, encoding="utf-8"
)
handler.setFormatter(formatter)
for l in [stdout_logger, stderr_logger, logger]:
if l in visited_loggers:
continue
visited_loggers.add(l)
l.addHandler(handler)
return logger
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.terminal = sys.stdout
self.logger = logger
self.log_level = log_level
self.linebuf = ""
def __getattr__(self, attr):
return getattr(self.terminal, attr)
def write(self, buf):
temp_linebuf = self.linebuf + buf
self.linebuf = ""
for line in temp_linebuf.splitlines(True):
# From the io.TextIOWrapper docs:
# On output, if newline is None, any '\n' characters written
# are translated to the system default line separator.
# By default sys.stdout.write() expects '\n' newlines and then
# translates them so this is still cross platform.
if line[-1] == "\n":
encoded_message = line.encode("utf-8", "ignore").decode("utf-8")
self.logger.log(self.log_level, encoded_message.rstrip())
else:
self.linebuf += line
def flush(self):
if self.linebuf != "":
encoded_message = self.linebuf.encode("utf-8", "ignore").decode("utf-8")
self.logger.log(self.log_level, encoded_message.rstrip())
self.linebuf = ""
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def get_gpu_memory(max_gpus=None):
"""Get available memory for each GPU."""
import torch
gpu_memory = []
num_gpus = (
torch.cuda.device_count()
if max_gpus is None
else min(max_gpus, torch.cuda.device_count())
)
for gpu_id in range(num_gpus):
with torch.cuda.device(gpu_id):
device = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device)
total_memory = gpu_properties.total_memory / (1024**3)
allocated_memory = torch.cuda.memory_allocated() / (1024**3)
available_memory = total_memory - allocated_memory
gpu_memory.append(available_memory)
return gpu_memory
def violates_moderation(text):
"""
Check whether the text violates OpenAI moderation API.
"""
import openai
try:
flagged = openai.Moderation.create(input=text)["results"][0]["flagged"]
except openai.error.OpenAIError as e:
flagged = False
except (KeyError, IndexError) as e:
flagged = False
return flagged
def clean_flant5_ckpt(ckpt_path):
"""
Flan-t5 trained with HF+FSDP saves corrupted weights for shared embeddings,
Use this function to make sure it can be correctly loaded.
"""
import torch
index_file = os.path.join(ckpt_path, "pytorch_model.bin.index.json")
index_json = json.load(open(index_file, "r"))
weightmap = index_json["weight_map"]
share_weight_file = weightmap["shared.weight"]
share_weight = torch.load(os.path.join(ckpt_path, share_weight_file))[
"shared.weight"
]
for weight_name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]:
weight_file = weightmap[weight_name]
weight = torch.load(os.path.join(ckpt_path, weight_file))
weight[weight_name] = share_weight
torch.save(weight, os.path.join(ckpt_path, weight_file))
def pretty_print_semaphore(semaphore):
"""Print a semaphore in better format."""
if semaphore is None:
return "None"
return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
"""A javascript function to get url parameters for the gradio web server."""
get_window_url_params_js = """
function() {
const params = new URLSearchParams(window.location.search);
url_params = Object.fromEntries(params);
console.log("url_params", url_params);
return url_params;
}
"""
def iter_over_async(
async_gen: AsyncGenerator, event_loop: AbstractEventLoop
) -> Generator:
"""
Convert async generator to sync generator
:param async_gen: the AsyncGenerator to convert
:param event_loop: the event loop to run on
:returns: Sync generator
"""
ait = async_gen.__aiter__()
async def get_next():
try:
obj = await ait.__anext__()
return False, obj
except StopAsyncIteration:
return True, None
while True:
done, obj = event_loop.run_until_complete(get_next())
if done:
break
yield obj
def detect_language(text: str) -> str:
"""Detect the langauge of a string."""
import polyglot # pip3 install polyglot pyicu pycld2
from polyglot.detect import Detector
from polyglot.detect.base import logger as polyglot_logger
import pycld2
polyglot_logger.setLevel("ERROR")
try:
lang_code = Detector(text).language.name
except (pycld2.error, polyglot.detect.base.UnknownLanguage):
lang_code = "unknown"
return lang_code
def parse_gradio_auth_creds(filename: str):
"""Parse a username:password file for gradio authorization."""
gradio_auth_creds = []
with open(filename, "r", encoding="utf8") as file:
for line in file.readlines():
gradio_auth_creds += [x.strip() for x in line.split(",") if x.strip()]
if gradio_auth_creds:
auth = [tuple(cred.split(":")) for cred in gradio_auth_creds]
else:
auth = None
return auth
def is_partial_stop(output: str, stop_str: str):
"""Check whether the output contains a partial stop str."""
for i in range(0, min(len(output), len(stop_str))):
if stop_str.startswith(output[-i:]):
return True
return False
def run_cmd(cmd: str):
"""Run a bash command."""
print(cmd)
return os.system(cmd)
def is_sentence_complete(output: str):
"""Check whether the output is a complete sentence."""
end_symbols = (".", "?", "!", "...", "。", "?", "!", "…", '"', "'", "”")
return output.endswith(end_symbols)
# Models don't use the same configuration key for determining the maximum
# sequence length. Store them here so we can sanely check them.
# NOTE: The ordering here is important. Some models have two of these and we
# have a preference for which value gets used.
SEQUENCE_LENGTH_KEYS = [
"max_sequence_length",
"seq_length",
"max_position_embeddings",
"max_seq_len",
"model_max_length",
]
def get_context_length(config):
"""Get the context length of a model from a huggingface model config."""
rope_scaling = getattr(config, "rope_scaling", None)
if rope_scaling:
rope_scaling_factor = config.rope_scaling["factor"]
else:
rope_scaling_factor = 1
for key in SEQUENCE_LENGTH_KEYS:
val = getattr(config, key, None)
if val is not None:
return int(rope_scaling_factor * val)
return 2048
|
evocodebench_data_2
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# Riskfolio-Lib, Copyright (c) 2020-2023, Dany Cajas, Licensed under BSD 3 clause.
# Statsmodels, Copyright (C) 2006, Jonathan E. Taylor, Licensed under BSD 3 clause.
from enum import auto
import numpy as np
import scipy.cluster.hierarchy as sch
import scipy.optimize as sco
import scipy.spatial.distance as scd
import scipy.special as scs
from scipy.sparse import csr_matrix
from skfolio.utils.tools import AutoEnum
__all__ = [
"NBinsMethod",
"n_bins_freedman",
"n_bins_knuth",
"is_cholesky_dec",
"assert_is_square",
"assert_is_symmetric",
"assert_is_distance",
"cov_nearest",
"cov_to_corr",
"corr_to_cov",
"commutation_matrix",
"compute_optimal_n_clusters",
"rand_weights",
"rand_weights_dirichlet",
]
class NBinsMethod(AutoEnum):
"""Enumeration of the Number of Bins Methods
Parameters
----------
FREEDMAN : str
Freedman method
KNUTH : str
Knuth method
"""
FREEDMAN = auto()
KNUTH = auto()
def n_bins_freedman(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using the Freedman-Diaconis rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "On the histogram as a density estimator: L2 theory".
Freedman & Diaconis (1981).
"""
if x.ndim != 1:
raise ValueError("`x` must be a 1d-array")
n = len(x)
p_25, p_75 = np.percentile(x, [25, 75])
d = 2 * (p_75 - p_25) / (n ** (1 / 3))
if d == 0:
return 5
n_bins = max(1, np.ceil((np.max(x) - np.min(x)) / d))
return int(round(n_bins))
def n_bins_knuth(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using Knuth's rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "Optimal Data-Based Binning for Histograms".
Knuth.
"""
x = np.sort(x)
n = len(x)
def func(y: float):
y = y[0]
if y <= 0:
return np.inf
bin_edges = np.linspace(x[0], x[-1], int(y) + 1)
hist, _ = np.histogram(x, bin_edges)
return -(
n * np.log(y)
+ scs.gammaln(0.5 * y)
- y * scs.gammaln(0.5)
- scs.gammaln(n + 0.5 * y)
+ np.sum(scs.gammaln(hist + 0.5))
)
n_bins_init = n_bins_freedman(x)
n_bins = sco.fmin(func, n_bins_init, disp=0)[0]
return int(round(n_bins))
def rand_weights_dirichlet(n: int) -> np.array:
"""Produces n random weights that sum to one from a dirichlet distribution
(uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
return np.random.dirichlet(np.ones(n))
def rand_weights(n: int, zeros: int = 0) -> np.array:
"""Produces n random weights that sum to one from an uniform distribution
(non-uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
zeros : int, default=0
The number of weights to randomly set to zeros.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
k = np.random.rand(n)
if zeros > 0:
zeros_idx = np.random.choice(n, zeros, replace=False)
k[zeros_idx] = 0
return k / sum(k)
def is_cholesky_dec(x: np.ndarray) -> bool:
"""Returns True if Cholesky decomposition can be computed.
The matrix must be Hermitian (symmetric if real-valued) and positive-definite.
No checking is performed to verify whether the matrix is Hermitian or not.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if Cholesky decomposition can be applied to the matrix, False otherwise.
"""
# Around 100 times faster than checking for positive eigenvalues with np.linalg.eigh
try:
np.linalg.cholesky(x)
return True
except np.linalg.linalg.LinAlgError:
return False
def is_positive_definite(x: np.ndarray) -> bool:
"""Returns True if the matrix is positive definite.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if if the matrix is positive definite, False otherwise.
"""
return np.all(np.linalg.eigvals(x) > 0)
def assert_is_square(x: np.ndarray) -> None:
"""Raises an error if the matrix is not square.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is not square.
"""
if x.ndim != 2 or x.shape[0] != x.shape[1]:
raise ValueError("The matrix must be square")
def assert_is_symmetric(x: np.ndarray) -> None:
"""Raises an error if the matrix is not symmetric.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Raises
------
ValueError: if the matrix is not symmetric.
"""
assert_is_square(x)
if not np.allclose(x, x.T):
raise ValueError("The matrix must be symmetric")
def assert_is_distance(x: np.ndarray) -> None:
"""Raises an error if the matrix is not a distance matrix.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is a distance matrix.
"""
assert_is_symmetric(x)
if not np.allclose(np.diag(x), np.zeros(x.shape[0]), atol=1e-5):
raise ValueError(
"The distance matrix must have diagonal elements close to zeros"
)
def cov_to_corr(cov: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Convert a covariance matrix to a correlation matrix.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
Returns
-------
corr, std : tuple[ndarray of shape (n, n), ndarray of shape (n, )]
Correlation matrix and standard-deviation vector
"""
if cov.ndim != 2:
raise ValueError(f"`cov` must be a 2D array, got a {cov.ndim}D array")
std = np.sqrt(np.diag(cov))
corr = cov / std / std[:, None]
return corr, std
def corr_to_cov(corr: np.ndarray, std: np.ndarray):
"""Convert a correlation matrix to a covariance matrix given its
standard-deviation vector.
Parameters
----------
corr : ndarray of shape (n, n)
Correlation matrix.
std : ndarray of shape (n, )
Standard-deviation vector.
Returns
-------
cov : ndarray of shape (n, n)
Covariance matrix
"""
if std.ndim != 1:
raise ValueError(f"`std` must be a 1D array, got a {std.ndim}D array")
if corr.ndim != 2:
raise ValueError(f"`corr` must be a 2D array, got a {corr.ndim}D array")
cov = corr * std * std[:, None]
return cov
_CLIPPING_VALUE = 1e-13
def cov_nearest(cov: np.ndarray, higham: bool = False, higham_max_iteration: int = 100):
"""Compute the nearest covariance matrix that is positive definite and with a
cholesky decomposition than can be computed. The variance is left unchanged.
First, it converts the covariance matrix to a correlation matrix.
Then, it finds the nearest correlation matrix and converts it back to a covariance
matrix using the initial standard deviation.
Cholesky decomposition can fail for symmetric positive definite (SPD) matrix due
to floating point error and inversely, Cholesky decomposition can success for
non-SPD matrix. Therefore, we need to test for both. We always start by testing
for Cholesky decomposition which is significantly faster than checking for positive
eigenvalues.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
higham : bool, default=False
If this is set to True, the Higham & Nick (2002) algorithm [1]_ is used,
otherwise the eigenvalues are clipped to threshold above zeros (1e-13).
The default (`False`) is to use the clipping method as the Higham & Nick
algorithm can be slow for large datasets.
higham_max_iteration : int, default=100
Maximum number of iteration of the Higham & Nick (2002) algorithm.
The default value is `100`.
Returns
-------
cov : ndarray
The nearest covariance matrix.
References
----------
.. [1] "Computing the nearest correlation matrix - a problem from finance"
IMA Journal of Numerical Analysis
Higham & Nick (2002)
"""
assert_is_square(cov)
assert_is_symmetric(cov)
# Around 100 times faster than checking eigenvalues with np.linalg.eigh
if is_cholesky_dec(cov) and is_positive_definite(cov):
return cov
corr, std = cov_to_corr(cov)
if higham:
eps = np.finfo(np.float64).eps * 5
diff = np.zeros(corr.shape)
x = corr.copy()
for _ in range(higham_max_iteration):
x_adj = x - diff
eig_vals, eig_vecs = np.linalg.eigh(x_adj)
x = eig_vecs * np.maximum(eig_vals, eps) @ eig_vecs.T
diff = x - x_adj
np.fill_diagonal(x, 1)
cov = corr_to_cov(x, std)
if is_cholesky_dec(cov) and is_positive_definite(cov):
break
else:
raise ValueError("Unable to find the nearest positive definite matrix")
else:
eig_vals, eig_vecs = np.linalg.eigh(corr)
# Clipping the eigenvalues with a value smaller than 1e-13 can cause scipy to
# consider the matrix non-psd is some corner cases (see test/test_stats.py)
x = eig_vecs * np.maximum(eig_vals, _CLIPPING_VALUE) @ eig_vecs.T
x, _ = cov_to_corr(x)
cov = corr_to_cov(x, std)
return cov
def commutation_matrix(x):
"""Compute the commutation matrix.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
K : ndarray of shape (m * n, m * n)
The commutation matrix.
"""
(m, n) = x.shape
row = np.arange(m * n)
col = row.reshape((m, n), order="F").ravel()
data = np.ones(m * n, dtype=np.int8)
k = csr_matrix((data, (row, col)), shape=(m * n, m * n))
return k
def compute_optimal_n_clusters(distance: np.ndarray, linkage_matrix: np.ndarray) -> int:
r"""Compute the optimal number of clusters based on Two-Order Difference to Gap
Statistic [1]_.
The Two-Order Difference to Gap Statistic has been developed to improve the
performance and stability of the Tibshiranis Gap statistic.
It applies the two-order difference of the within-cluster dispersion to replace the
reference null distribution in the Gap statistic.
The number of cluster :math:`k` is determined by:
.. math:: \begin{cases}
\begin{aligned}
&\max_{k} & & W_{k+2} + W_{k} - 2 W_{k+1} \\
&\text{s.t.} & & 1 \ge c \ge max\bigl(8, \sqrt{n}\bigr) \\
\end{aligned}
\end{cases}
with :math:`n` the sample size and :math:`W_{k}` the within-cluster dispersions
defined as:
.. math:: W_{k} = \sum_{i=1}^{k} \frac{D_{i}}{2|C_{i}|}
where :math:`|C_{i}|` is the cardinality of cluster :math:`i` and :math:`D_{i}` its
density defined as:
.. math:: D_{i} = \sum_{u \in C_{i}} \sum_{v \in C_{i}} d(u,v)
with :math:`d(u,v)` the distance between u and v.
Parameters
----------
distance : ndarray of shape (n, n)
Distance matrix.
linkage_matrix : ndarray of shape (n - 1, 4)
Linkage matrix.
Returns
-------
value : int
Optimal number of clusters.
References
----------
.. [1] "Application of two-order difference to gap statistic".
Yue, Wang & Wei (2009)
"""
cut_tree = sch.cut_tree(linkage_matrix)
n = cut_tree.shape[1]
max_clusters = max(8, round(np.sqrt(n)))
dispersion = []
for k in range(max_clusters):
level = cut_tree[:, n - k - 1]
cluster_density = []
for i in range(np.max(level) + 1):
cluster_idx = np.argwhere(level == i).flatten()
cluster_dists = scd.squareform(
distance[cluster_idx, :][:, cluster_idx], checks=False
)
if cluster_dists.shape[0] != 0:
cluster_density.append(np.nan_to_num(cluster_dists.mean()))
dispersion.append(np.sum(cluster_density))
dispersion = np.array(dispersion)
gaps = np.roll(dispersion, -2) + dispersion - 2 * np.roll(dispersion, -1)
gaps = gaps[:-2]
# k=0 represents one cluster
k = np.argmax(gaps) + 2
return k
|
evocodebench_data_3
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
|
evocodebench_data_4
|
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()
|
evocodebench_data_5
|
from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
)
return values
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
|
evocodebench_data_6
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for constructing geodesic polyhedron, which are used as a basis."""
import itertools
import numpy as np
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j)))
int_weights = np.array(int_weights)
weights = int_weights / v # Barycentric weights.
return weights
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f'v {v} must an integer')
tri_weights = compute_tesselation_weights(v)
verts = []
for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts)
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
def generate_basis(
base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4
):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'tetrahedron', 'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == 'tetrahedron':
verts = np.array([
(np.sqrt(8 / 9), 0, -1 / 3),
(-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3),
(-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3),
(0, 0, 1),
])
faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)])
elif base_shape == 'icosahedron':
a = (np.sqrt(5) + 1) / 2
verts = np.array([
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]) / np.sqrt(a + 2)
faces = np.array([
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
])
elif base_shape == 'octahedron':
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
else:
raise ValueError(f'base_shape {base_shape} not supported')
verts = tesselate_geodesic(verts, faces, angular_tesselation)
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :]
basis = verts[:, ::-1]
return basis
|
evocodebench_data_7
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
from logging import Logger
from time import time
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from torch.utils.data import IterableDataset
from litdata.constants import (
_DEFAULT_CACHE_DIR,
_INDEX_FILENAME,
)
from litdata.streaming import Cache
from litdata.streaming.item_loader import BaseItemLoader
from litdata.streaming.resolver import Dir, _resolve_dir
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer
from litdata.streaming.shuffle import FullShuffle, NoShuffle, Shuffle
from litdata.utilities.env import _DistributedEnv, _is_in_dataloader_worker, _WorkerEnv
logger = Logger(__name__)
class StreamingDataset(IterableDataset):
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class."""
def __init__(
self,
input_dir: Union[str, "Dir"],
item_loader: Optional[BaseItemLoader] = None,
shuffle: bool = False,
drop_last: Optional[bool] = None,
seed: int = 42,
serializers: Optional[Dict[str, Serializer]] = None,
max_cache_size: Union[int, str] = "100GB",
) -> None:
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class.
Arguments:
input_dir: Path to the folder where the input data is stored.
item_loader: The logic to load an item from a chunk.
shuffle: Whether to shuffle the data.
drop_last: If `True`, drops the last items to ensure that
all processes/workers return the same amount of data.
The argument `drop_last` is set to `True` in a distributed setting
and `False` otherwise.
seed: Random seed for shuffling.
serializers: The serializers used to serialize and deserialize the chunks.
max_cache_size: The maximum cache size used by the StreamingDataset.
"""
super().__init__()
if not isinstance(shuffle, bool):
raise ValueError(f"Shuffle should be a boolean. Found {shuffle}")
input_dir = _resolve_dir(input_dir)
self.input_dir = input_dir
self.item_loader = item_loader
self.shuffle: bool = shuffle
self.distributed_env = _DistributedEnv.detect()
if self.distributed_env.world_size > 1:
if drop_last is False:
logger.warn(
"You're operating within a distributed environment and have disabled the `drop_last` option. "
"Please note that this configuration may lead to training interruptions if your system depends "
"on distributed collectives."
)
else:
drop_last = True
self.drop_last = drop_last or False
self.seed = seed
self.max_cache_size = max_cache_size
self.cache: Optional[Cache] = None
self.worker_env: Optional[_WorkerEnv] = None
self.worker_chunks: List[int] = []
self.worker_intervals: List[List[int]] = []
self.current_indexes: List[int] = []
self.chunk_index = 0
self.num_chunks: Optional[int] = None
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.min_items_per_replica: Optional[int] = None
self.current_epoch = 1
self.random_state = None
self.shuffler: Optional[Shuffle] = None
self.serializers = serializers
self._state_dict: Optional[Dict[str, Any]] = None
def set_shuffle(self, shuffle: bool) -> None:
self.shuffle = shuffle
def set_epoch(self, current_epoch: int) -> None:
"""Set the current epoch to the dataset on epoch starts.
When using the StreamingDataLoader, this is done automatically
"""
# If the state dict has been reloaded, don't override the current epoch
# The StreamingDataloader would clean this out
if self._state_dict is None:
self.current_epoch = current_epoch
def _create_cache(self, worker_env: _WorkerEnv) -> Cache:
if _should_replace_path(self.input_dir.path):
cache_path = _try_create_cache_dir(
input_dir=self.input_dir.path if self.input_dir.path else self.input_dir.url
)
if cache_path is not None:
self.input_dir.path = cache_path
cache = Cache(
input_dir=self.input_dir,
item_loader=self.item_loader,
chunk_bytes=1,
serializers=self.serializers,
max_cache_size=self.max_cache_size,
)
cache._reader._try_load_config()
if not cache.filled:
raise ValueError(
f"The provided dataset `{self.input_dir}` doesn't contain any {_INDEX_FILENAME} file."
" HINT: Did you successfully optimize a dataset to the provided `input_dir`?"
)
return cache
def _create_shuffler(self, cache: Cache) -> Shuffle:
seed = self.seed
drop_last = self.drop_last
if self._state_dict is not None:
state: Dict[str, Any] = self._state_dict
seed = state["seed"]
drop_last = state["drop_last"]
return FullShuffle(cache, seed, drop_last) if self.shuffle else NoShuffle(cache, seed, drop_last)
def __len__(self) -> int:
if self.shuffler is None:
cache = self._create_cache(worker_env=_WorkerEnv.detect())
self.shuffler = self._create_shuffler(cache)
return self.shuffler.get_len(self.distributed_env, self.current_epoch)
def __iter__(self) -> "StreamingDataset":
# When the StreamingDataset is used within map or optimize, let's refetch the distributed env.
if os.getenv("DATA_OPTIMIZER_GLOBAL_RANK"):
self.distributed_env = _DistributedEnv.detect()
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
# Handle restart
if self._state_dict:
self._validate_state_dict()
state: Dict[str, Any] = self._state_dict
self.current_epoch = state["current_epoch"]
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
# Handle restart
if self._state_dict:
self._resume(chunks_replica, intervals_replica)
else:
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[
self.distributed_env.global_rank % self.distributed_env.world_size
]
self.worker_chunks = []
self.worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % self.worker_env.world_size != self.worker_env.rank:
continue
self.worker_chunks.append(chunk_index)
self.worker_intervals.append(chunk_interval)
self.num_chunks = len(self.worker_chunks)
self.current_indexes = []
self.chunk_index = 0
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.last_time = time()
return self
def _resume(self, chunks_replica: List[int], intervals_replica: List[Any]) -> None:
assert self._state_dict
assert self.worker_env
assert self.shuffler
state: Dict[str, Any] = self._state_dict
num_workers = state["num_workers"]
batch_size = state["batch_size"]
# TODO: Implement elastic sampling where the number of workers, ranks can change.
num_samples_yielded = self._state_dict["num_samples_yielded"]
# replay sampling from each worker / chunks using the batch size
workers_chunks, workers_intervals = _associate_chunks_to_workers(
num_workers, self.worker_env, chunks_replica, intervals_replica
)
indexes = _replay_sampling(num_samples_yielded, batch_size, num_workers)
chunks_index, indexes = _replay_chunks_sampling(workers_intervals, indexes)
# select the chunks and intervals associated to this worker
worker_rank = self.worker_env.rank
self.num_chunks = len(workers_intervals[worker_rank])
self.chunk_index = chunks_index[worker_rank]
self.worker_chunks = workers_chunks[worker_rank]
self.worker_intervals = workers_intervals[worker_rank]
# replay the indexes for the current chunks
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
# re-shuffle the indexes
current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
# skip any indexes already consumed
current_indexes = current_indexes[indexes[worker_rank] :]
self.current_indexes = current_indexes
self.global_index = num_samples_yielded
# bump the chunk_index
self.chunk_index += 1
def __getitem__(self, index: Union[ChunkedIndex, int]) -> Any:
if self.cache is None:
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
if isinstance(index, int):
index = ChunkedIndex(index, self.cache._get_chunk_index_from_index(index))
return self.cache[index]
def __next__(self) -> Any:
# Prevent to create more batch on a given process
if self.global_index >= len(self):
self.current_epoch += 1
raise StopIteration
# Lazily re-populate the interval to reduce memory usage.
if len(self.current_indexes) == 0:
if self.chunk_index == self.num_chunks:
self.current_epoch += 1
raise StopIteration
# reset index
self.index = 0
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
assert self.shuffler is not None
assert self.num_chunks is not None
self.current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
self.chunk_index += 1
# Get the first index
index = self.current_indexes.pop(0)
# Call the `__getitem__` method.
data = self.__getitem__(
ChunkedIndex(
index=index,
chunk_index=self.worker_chunks[self.chunk_index - 1],
# We provide the chunks indexes only one the first
chunk_indexes=None if self.has_triggered_download else self.worker_chunks,
is_last_index=(self.chunk_index - 1) == len(self.worker_intervals) and len(self.current_indexes) == 1,
)
)
self.has_triggered_download = True
self.global_index += 1
self.index += 1
return data
def state_dict(self, num_samples_yielded: int, num_workers: int, batch_size: int) -> Dict[str, Any]:
if _is_in_dataloader_worker():
raise RuntimeError("The method `state_dict` should only be called in the main process.")
if self._state_dict is not None:
self._state_dict["num_samples_yielded"] = num_samples_yielded
return self._state_dict
state = {
"num_samples_yielded": num_samples_yielded,
"num_workers": num_workers,
"batch_size": batch_size,
"current_epoch": self.current_epoch,
"input_dir_path": self.input_dir.path,
"input_dir_url": self.input_dir.url,
"item_loader": self.item_loader.state_dict() if self.item_loader else None,
"drop_last": self.drop_last,
"seed": self.seed,
"world_size": self.distributed_env.world_size,
"shuffle": self.shuffle,
}
return state
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if state_dict:
# the state is restored within the workers
self._state_dict = state_dict
def _validate_state_dict(self) -> None:
assert self._state_dict
assert self.worker_env
assert self.cache
state: Dict[str, Any] = self._state_dict
if state["shuffle"] != self.shuffle:
raise ValueError(
"The provided `shuffle` state doesn't match the current one. "
f"Found `{self.shuffle}` instead of `{state['shuffle']}`."
)
if state["num_workers"] != self.worker_env.world_size:
raise ValueError(
"The provided `num_workers` state doesn't match the current one. "
f"Found `{self.worker_env.world_size}` instead of `{state['num_workers']}`."
)
# Note: We need to check whether the path has been resolved to its associated cache.
# In this case, validate the cache folder is the same.
if _should_replace_path(state["input_dir_path"]):
cache_path = _try_create_cache_dir(
input_dir=state["input_dir_path"] if state["input_dir_path"] else state["input_dir_url"]
)
if cache_path != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{cache_path}`."
)
elif state["input_dir_path"] != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{state['input_dir_path']}`."
)
if state["input_dir_url"] != self.input_dir.url:
raise ValueError(
"The provided `input_dir` URL state doesn't match the current one. "
f"Found `{self.input_dir.url}` instead of `{state['input_dir_url']}`."
)
if state["seed"] != self.seed:
raise ValueError(
"The provided `seed` state doesn't match the current one. "
f"Found `{self.seed}` instead of `{state['seed']}`."
)
if self.item_loader and state["item_loader"] != self.item_loader.state_dict():
raise ValueError(
"The provided `item_loader` state doesn't match the current one. "
f"Found `{self.item_loader.state_dict()}` instead of `{state['item_loader']}`."
)
if state["drop_last"] != self.drop_last:
raise ValueError(
"The provided `drop_last` state doesn't match the current one. "
f"Found `{self.drop_last}` instead of `{state['drop_last']}`."
)
def _try_create_cache_dir(input_dir: Optional[str]) -> Optional[str]:
hash_object = hashlib.md5((input_dir or "").encode())
if "LIGHTNING_CLUSTER_ID" not in os.environ or "LIGHTNING_CLOUD_PROJECT_ID" not in os.environ:
cache_dir = os.path.join(_DEFAULT_CACHE_DIR, hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
cache_dir = os.path.join("/cache", "chunks", hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _should_replace_path(path: Optional[str]) -> bool:
"""Whether the input path is a special path to be replaced."""
if path is None or path == "":
return True
return path.startswith("/teamspace/datasets/") or path.startswith("/teamspace/s3_connections/")
def is_integer(value: str) -> bool:
try:
int(value)
return True
except Exception:
return False
def _associate_chunks_to_workers(
num_workers: int, worker_env: _WorkerEnv, chunks_replica: List[int], intervals_replica: List[Any]
) -> Any:
workers_chunks = {}
workers_intervals = {}
for worker_idx in range(num_workers):
worker_chunks = []
worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % worker_env.world_size != worker_idx:
continue
worker_chunks.append(chunk_index)
worker_intervals.append(chunk_interval)
workers_chunks[worker_idx] = worker_chunks
workers_intervals[worker_idx] = worker_intervals
return workers_chunks, workers_intervals
def _replay_sampling(num_samples_yielded: int, batch_size: int, num_workers: int) -> Dict[int, int]:
"""This function replays the sampling from the dataloader."""
divisible_num_batches_yielded = num_samples_yielded // (num_workers * batch_size)
indexes = {}
for worker_idx in range(num_workers):
indexes[worker_idx] = divisible_num_batches_yielded * batch_size
num_samples_yielded = num_samples_yielded - (num_workers * divisible_num_batches_yielded * batch_size)
# take care of the reminder
worker_idx = 0 # reset the worker_idx
while True:
if num_samples_yielded >= batch_size:
indexes[worker_idx] += batch_size
worker_idx = (worker_idx + 1) % num_workers
num_samples_yielded -= batch_size
else:
indexes[worker_idx] += num_samples_yielded
break
return indexes
def _replay_chunks_sampling(
workers_intervals: Dict[int, List[Any]], indexes: Dict[int, int]
) -> Tuple[Dict[int, int], Dict[int, int]]:
chunks_index = {}
for worker_idx in range(len(workers_intervals)):
chunks_index[worker_idx] = 0
for worker_idx, intervals in workers_intervals.items():
for interval in intervals:
size = interval[-1] - interval[0]
if indexes[worker_idx] >= size:
indexes[worker_idx] -= size
chunks_index[worker_idx] += 1
return chunks_index, indexes
|
evocodebench_data_8
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# scikit-learn, Copyright (c) 2007-2010 David Cournapeau, Fabian Pedregosa, Olivier
# Grisel Licensed under BSD 3 clause.
from collections.abc import Callable, Iterator
from enum import Enum
from functools import wraps
import numpy as np
import numpy.typing as npt
import pandas as pd
import sklearn as sk
import sklearn.base as skb
__all__ = [
"AutoEnum",
"cached_property_slots",
"cache_method",
"input_to_array",
"args_names",
"format_measure",
"bisection",
"safe_split",
"fit_single_estimator",
"fit_and_predict",
"deduplicate_names",
"default_asset_names",
"check_estimator",
]
GenericAlias = type(list[int])
class AutoEnum(str, Enum):
"""Base Enum class used in `skfolio`"""
@staticmethod
def _generate_next_value_(
name: str, start: int, count: int, last_values: any
) -> str:
"""Overriding `auto()`"""
return name.lower()
@classmethod
def has(cls, value: str) -> bool:
"""Check if a value is in the Enum.
Parameters
----------
value : str
Input value.
Returns
-------
x : bool
True if the value is in the Enum, False otherwise.
"""
return value in cls._value2member_map_
def __repr__(self) -> str:
"""Representation of the Enum"""
return self.name
# noinspection PyPep8Naming
class cached_property_slots:
"""Cached property decorator for slots"""
def __init__(self, func):
self.func = func
self.public_name = None
self.private_name = None
self.__doc__ = func.__doc__
def __set_name__(self, owner, name):
self.public_name = name
self.private_name = f"_{name}"
def __get__(self, instance, owner=None):
if instance is None:
return self
if self.private_name is None:
raise TypeError(
"Cannot use cached_property instance without calling __set_name__"
" on it."
)
try:
value = getattr(instance, self.private_name)
except AttributeError:
value = self.func(instance)
setattr(instance, self.private_name, value)
return value
def __set__(self, instance, owner=None):
raise AttributeError(
f"'{type(instance).__name__}' object attribute '{self.public_name}' is"
" read-only"
)
__class_getitem__ = classmethod(GenericAlias)
def _make_key(args, kwds) -> int:
"""Make a cache key from optionally typed positional and keyword arguments"""
key = args
if kwds:
for item in kwds.items():
key += item
return hash(key)
def cache_method(cache_name: str) -> Callable:
"""Decorator that caches class methods results into a class dictionary.
Parameters
----------
cache_name : str
Name of the dictionary class attribute.
Returns
-------
func : Callable
Decorating function that caches class methods.
"""
# To avoid memory leakage and proper garbage collection, self should not be part of
# the cache key.
# This is a known issue when we use functools.lru_cache on class methods.
def decorating_function(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
func_name = method.__name__
key = _make_key(args, kwargs)
try:
cache = getattr(self, cache_name)
except AttributeError:
raise AttributeError(
"You first need to create a dictionary class attribute named "
f"'{cache_name}'"
) from None
if not isinstance(cache, dict):
raise AttributeError(
f"'The cache named '{cache_name}' must be a "
f"dictionary, got {type(cache)}"
)
if func_name not in cache:
cache[func_name] = {}
c = cache[func_name]
if key not in c:
c[key] = method(self, *args, **kwargs)
return c[key]
return wrapper
return decorating_function
def args_names(func: object) -> list[str]:
"""Returns the argument names of a function.
Parameters
----------
func : object
Function.
Returns
-------
args : list[str]
The list of function arguments.
"""
return [
v for v in func.__code__.co_varnames[: func.__code__.co_argcount] if v != "self"
]
def check_estimator(
estimator: skb.BaseEstimator | None, default: skb.BaseEstimator, check_type: any
):
"""Check the estimator type and returns its cloned version it provided, otherwise
return the default estimator.
Parameters
----------
estimator : BaseEstimator, optional
Estimator.
default : BaseEstimator
Default estimator to return when `estimator` is `None`.
check_type : any
Expected type of the estimator to check against.
Returns
-------
estimator: Estimator
The checked estimator or the default.
"""
if estimator is None:
return default
if not isinstance(estimator, check_type):
raise TypeError(f"Expected type {check_type}, got {type(estimator)}")
return sk.clone(estimator)
def input_to_array(
items: dict | npt.ArrayLike,
n_assets: int,
fill_value: any,
dim: int,
assets_names: np.ndarray | None,
name: str,
) -> np.ndarray:
"""Convert a collection of items (array-like or dictionary) into
a numpy array and verify its shape.
Parameters
----------
items : np.ndarray | dict | list
Items to verify and convert to array.
n_assets : int
Expected number of assets.
Used to verify the shape of the converted array.
fill_value : any
When `items` is a dictionary, elements that are not in `asset_names` are filled
with `fill_value` in the converted array.
dim : int
Dimension of the final array.
Possible values are `1` or `2`.
assets_names : ndarray, optional
Asset names used when `items` is a dictionary.
name : str
Name of the items used for error messages.
Returns
-------
values : ndarray of shape (n_assets) for dim=1 or (n_groups, n_assets) for dim=2
Converted array.
"""
if dim not in [1, 2]:
raise ValueError(f"dim must be 1 or 2, got {dim}")
if isinstance(items, dict):
if assets_names is None:
raise ValueError(
f"If `{name}` is provided as a dictionary, you must input `X` as a"
" DataFrame with assets names in columns"
)
if dim == 1:
arr = np.array([items.get(asset, fill_value) for asset in assets_names])
else:
# add assets and convert dict to ordered array
arr = {}
for asset in assets_names:
elem = items.get(asset)
if elem is None:
elem = [asset]
elif np.isscalar(elem):
elem = [asset, elem]
else:
elem = [asset, *elem]
arr[asset] = elem
arr = (
pd.DataFrame.from_dict(arr, orient="index")
.loc[assets_names]
.to_numpy()
.T
)
else:
arr = np.asarray(items)
if arr.ndim != dim:
raise ValueError(f"`{name}` must be a {dim}D array, got a {arr.ndim}D array")
if not isinstance(fill_value, str) and np.isnan(arr).any():
raise ValueError(f"`{name}` contains NaN")
if arr.shape[-1] != n_assets:
if dim == 1:
s = "(n_assets,)"
else:
s = "(n_groups, n_assets)"
raise ValueError(
f"`{name}` must be a of shape {s} with n_assets={n_assets}, "
f"got {arr.shape[0]}"
)
return arr
def format_measure(x: float, percent: bool = False) -> str:
"""Format a measure number into a user-friendly string.
Parameters
----------
x : float
Number to format.
percent : bool, default=False
If this is set to True, the number is formatted in percentage.
Returns
-------
formatted : str
Formatted string.
"""
if np.isnan(x):
return str(x)
if percent:
xn = x * 100
f = "%"
else:
xn = x
f = "f"
if xn == 0:
n = 0
else:
n = min(6, max(int(-np.log10(abs(xn))) + 2, 2))
return "{value:{fmt}}".format(value=x, fmt=f".{n}{f}")
def bisection(x: list[np.ndarray]) -> Iterator[list[np.ndarray, np.ndarray]]:
"""Generator to bisect a list of array.
Parameters
----------
x : list[ndarray]
A list of array.
Yields
------
arr : Iterator[list[ndarray, ndarray]]
Bisected array.
"""
for e in x:
n = len(e)
if n > 1:
mid = n // 2
yield [e[0:mid], e[mid:n]]
def safe_indexing(
X: npt.ArrayLike | pd.DataFrame, indices: npt.ArrayLike | None, axis: int = 0
):
"""
Return rows, items or columns of X using indices.
Parameters
----------
X : array-like
Data from which to sample rows.
indices : array-like, optional
Indices of rows or columns.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
subset :
Subset of X on axis 0.
"""
if indices is None:
return X
if hasattr(X, "iloc"):
return X.take(indices, axis=axis)
if axis == 0:
return X[indices]
return X[:, indices]
def safe_split(
X: npt.ArrayLike,
y: npt.ArrayLike | None = None,
indices: np.ndarray | None = None,
axis: int = 0,
):
"""Create subset of dataset.
Slice X, y according to indices for cross-validation.
Parameters
----------
X : array-like
Data to be indexed.
y : array-like
Data to be indexed.
indices : ndarray of int, optional
Rows or columns to select from X and y.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
X_subset : array-like
Indexed data.
y_subset : array-like
Indexed targets.
"""
X_subset = safe_indexing(X, indices=indices, axis=axis)
if y is not None:
y_subset = safe_indexing(y, indices=indices, axis=axis)
else:
y_subset = None
return X_subset, y_subset
def fit_single_estimator(
estimator: any,
X: npt.ArrayLike,
y: npt.ArrayLike | None = None,
indices: np.ndarray | None = None,
axis: int = 0,
):
"""function used to fit an estimator within a job.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_observations, n_assets)
The data to fit.
y : array-like of shape (n_observations, n_targets), optional
The target array if provided.
indices : ndarray of int, optional
Rows or columns to select from X and y.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
fitted_estimator : estimator
The fitted estimator.
"""
X, y = safe_split(X, y, indices=indices, axis=axis)
estimator.fit(X, y)
return estimator
def fit_and_predict(
estimator: any,
X: npt.ArrayLike,
y: npt.ArrayLike | None,
train: np.ndarray,
test: np.ndarray | list[np.ndarray],
fit_params: dict,
method: str,
column_indices: np.ndarray | None = None,
) -> npt.ArrayLike | list[npt.ArrayLike]:
"""Fit the estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_observations, n_assets)
The data to fit.
y : array-like of shape (n_observations, n_factors) or None
The factor array if provided
train : ndarray of int of shape (n_train_observations,)
Indices of training samples.
test : ndarray of int of shape (n_test_samples,) or list of ndarray
Indices of test samples or list of indices.
fit_params : dict
Parameters that will be passed to ``estimator.fit``.
method : str
Invokes the passed method name of the passed estimator.
column_indices : ndarray, optional
Indices of columns to select.
The default (`None`) is to select all columns.
Returns
-------
predictions : array-like or list of array-like
If `test` is an array, it returns the array-like result of calling
'estimator.method' on `test`.
Otherwise, if `test` is a list of arrays, it returns the list of array-like
results of calling 'estimator.method' on each test set in `test`.
"""
fit_params = fit_params if fit_params is not None else {}
X, y = safe_split(X, y, indices=column_indices, axis=1)
X_train, y_train = safe_split(X, y, indices=train, axis=0)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
if isinstance(test, list):
predictions = []
for t in test:
X_test, _ = safe_split(X, indices=t, axis=0)
predictions.append(func(X_test))
else:
X_test, _ = safe_split(X, indices=test, axis=0)
predictions = func(X_test)
return predictions
def default_asset_names(n_assets: int) -> np.ndarray:
"""Default asset names are `["x0", "x1", ..., "x(n_assets - 1)"]`
Parameters
----------
n_assets : int
Number of assets.
Returns
-------
asset_names : ndarray of str
Default assets names.
"""
return np.asarray([f"x{i}" for i in range(n_assets)], dtype=object)
def deduplicate_names(names: npt.ArrayLike) -> list[str]:
"""Rename duplicated names by appending "_{duplicate_nb}" at the end.
This function is inspired by the pandas function `_maybe_dedup_names`.
Parameters
----------
names : array-like of shape (n_names,)
List of names.
Returns
-------
names : list[str]
Deduplicate names.
"""
names = list(names)
counts = {}
for i, col in enumerate(names):
cur_count = counts.get(col, 0)
if cur_count > 0:
names[i] = f"{col}_{cur_count}"
counts[col] = cur_count + 1
return names
|
evocodebench_data_9
|
import json
import numpy as np
from agents.microagent import MicroAgent
class AgentSerializer:
@staticmethod
def to_dict(agent):
"""
Serialize the MicroAgent object to a dictionary for persistence.
"""
purpose_embedding = agent.purpose_embedding
if isinstance(purpose_embedding, np.ndarray):
purpose_embedding = purpose_embedding.tolist() # Convert ndarray to list
return {
"dynamic_prompt": agent.dynamic_prompt,
"purpose": agent.purpose,
"purpose_embedding": purpose_embedding,
"depth": agent.depth,
"max_depth": agent.max_depth,
"usage_count": agent.usage_count,
"id": agent.id,
"parent_id": agent.parent_id,
"working_agent": agent.working_agent,
"is_prime": agent.is_prime,
"evolve_count": agent.evolve_count,
"number_of_code_executions": agent.number_of_code_executions,
"last_input": agent.last_input,
}
@staticmethod
def from_dict(data, agent_lifecycle, openai_wrapper):
"""
Deserialize a dictionary back into a MicroAgent object.
"""
agent = MicroAgent(
data["dynamic_prompt"],
data["purpose"],
data["depth"],
agent_lifecycle,
openai_wrapper,
data["max_depth"],
data.get("working_agent", False),
data.get("is_prime", False),
id=data["id"],
parent_id=data["parent_id"]
)
if data.get("purpose_embedding") is not None:
agent.purpose_embedding = np.array(data["purpose_embedding"])
else:
agent.purpose_embedding = None
agent.usage_count = data.get("usage_count", 0)
agent.evolve_count = data.get("evolve_count", 0)
agent.number_of_code_executions = data.get("number_of_code_executions", 0)
agent.last_input = data.get("last_input", "")
return agent
|
evocodebench_data_10
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for processing images."""
import types
from typing import Optional, Union
import dm_pix
import jax
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
matplotlib.use('Agg')
_Array = Union[np.ndarray, jnp.ndarray]
def mse_to_psnr(mse):
"""Compute PSNR given an MSE (we assume the maximum pixel value is 1)."""
return -10.0 / jnp.log(10.0) * jnp.log(mse)
def psnr_to_mse(psnr):
"""Compute MSE given a PSNR (we assume the maximum pixel value is 1)."""
return jnp.exp(-0.1 * jnp.log(10.0) * psnr)
def ssim_to_dssim(ssim):
"""Compute DSSIM given an SSIM."""
return (1 - ssim) / 2
def dssim_to_ssim(dssim):
"""Compute DSSIM given an SSIM."""
return 1 - 2 * dssim
def linear_to_srgb(
linear, eps = None, xnp = jnp
):
"""Assumes `linear` is in [0, 1], see https://en.wikipedia.org/wiki/SRGB."""
if eps is None:
eps = xnp.finfo(xnp.float32).eps
srgb0 = 323 / 25 * linear
srgb1 = (211 * xnp.maximum(eps, linear) ** (5 / 12) - 11) / 200
return xnp.where(linear <= 0.0031308, srgb0, srgb1)
def srgb_to_linear(
srgb, eps = None, xnp = jnp
):
"""Assumes `srgb` is in [0, 1], see https://en.wikipedia.org/wiki/SRGB."""
if eps is None:
eps = xnp.finfo(xnp.float32).eps
linear0 = 25 / 323 * srgb
linear1 = xnp.maximum(eps, ((200 * srgb + 11) / (211))) ** (12 / 5)
return xnp.where(srgb <= 0.04045, linear0, linear1)
def downsample(img, factor):
"""Area downsample img (factor must evenly divide img height and width)."""
sh = img.shape
if not (sh[0] % factor == 0 and sh[1] % factor == 0):
raise ValueError(
f'Downsampling factor {factor} does not '
f'evenly divide image shape {sh[:2]}'
)
img = img.reshape((sh[0] // factor, factor, sh[1] // factor, factor) + sh[2:])
img = img.mean((1, 3))
return img
def compute_vignette(coords, weights, powers=(1, 2, 3)):
"""Compute a vignetting as a polynomial function of image plane radius."""
radius_squared = jnp.sum(jnp.square(coords), axis=-1)
features = radius_squared[Ellipsis, None] ** jnp.array(powers)
scaling = jnp.exp(-jnp.sum(jnp.abs(weights) * features[Ellipsis, None], axis=-2))
return scaling
def render_histogram(x, **kwargs):
"""Call pyplot's hist() and render it to a numpy buffer."""
fig = plt.figure()
fig.gca().hist(x, **kwargs)
fig.canvas.draw()
hw = fig.canvas.get_width_height()[::-1]
buf = fig.canvas.tostring_rgb()
array = np.frombuffer(buf, dtype=np.uint8).reshape(hw + (3,))
plt.close(fig)
return array
class MetricHarness:
"""A helper class for evaluating several error metrics."""
def __init__(
self,
disable_ssim=False,
):
if disable_ssim:
self.ssim_fn = None
else:
self.ssim_fn = jax.jit(dm_pix.ssim)
def __call__(self, rgb_pred, rgb_gt, name_fn=lambda s: s):
"""Evaluate the error between a predicted rgb image and the true image."""
metrics = {}
metrics['psnr'] = mse_to_psnr(((rgb_pred - rgb_gt) ** 2).mean())
if self.ssim_fn is not None:
metrics['ssim'] = self.ssim_fn(rgb_pred, rgb_gt)
# Apply the name function and cast all metrics down to a scalar float.
return {name_fn(k): float(v) for (k, v) in metrics.items()}
|
evocodebench_data_11
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Camera pose and ray generation utility functions."""
import enum
import functools
import types
from typing import Final, List, Mapping, Optional, Text, Tuple, TypeAlias
from absl import logging
import chex
from internal import configs
from internal import geometry
from internal import math
from internal import rigid_body
from internal import spin_math
from internal import stepfun
from internal import utils
import jax
from jax import random
import jax.numpy as jnp
import jaxcam
import numpy as np
import scipy
_Array: TypeAlias = np.ndarray | jnp.ndarray
_ScalarArray: TypeAlias = float | _Array
_IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD: Final[float] = 0.95
def convert_to_ndc(
origins,
directions,
pixtocam,
near = 1.0,
xnp = np,
):
"""Converts a set of rays to normalized device coordinates (NDC).
Args:
origins: ndarray(float32), [..., 3], world space ray origins.
directions: ndarray(float32), [..., 3], world space ray directions.
pixtocam: ndarray(float32), [3, 3], inverse intrinsic matrix.
near: float, near plane along the negative z axis.
xnp: either numpy or jax.numpy.
Returns:
origins_ndc: ndarray(float32), [..., 3].
directions_ndc: ndarray(float32), [..., 3].
This function assumes input rays should be mapped into the NDC space for a
perspective projection pinhole camera, with identity extrinsic matrix (pose)
and intrinsic parameters defined by inputs focal, width, and height.
The near value specifies the near plane of the frustum, and the far plane is
assumed to be infinity.
The ray bundle for the identity pose camera will be remapped to parallel rays
within the (-1, -1, -1) to (1, 1, 1) cube. Any other ray in the original
world space can be remapped as long as it has dz < 0 (ray direction has a
negative z-coord); this allows us to share a common NDC space for "forward
facing" scenes.
Note that
projection(origins + t * directions)
will NOT be equal to
origins_ndc + t * directions_ndc
and that the directions_ndc are not unit length. Rather, directions_ndc is
defined such that the valid near and far planes in NDC will be 0 and 1.
See Appendix C in https://arxiv.org/abs/2003.08934 for additional details.
"""
# Shift ray origins to near plane, such that oz = -near.
# This makes the new near bound equal to 0.
t = -(near + origins[Ellipsis, 2]) / directions[Ellipsis, 2]
origins = origins + t[Ellipsis, None] * directions
dx, dy, dz = xnp.moveaxis(directions, -1, 0)
ox, oy, oz = xnp.moveaxis(origins, -1, 0)
xmult = 1.0 / pixtocam[0, 2] # Equal to -2. * focal / cx
ymult = 1.0 / pixtocam[1, 2] # Equal to -2. * focal / cy
# Perspective projection into NDC for the t = 0 near points
# origins + 0 * directions
origins_ndc = xnp.stack(
[xmult * ox / oz, ymult * oy / oz, -xnp.ones_like(oz)], axis=-1
)
# Perspective projection into NDC for the t = infinity far points
# origins + infinity * directions
infinity_ndc = xnp.stack(
[xmult * dx / dz, ymult * dy / dz, xnp.ones_like(oz)], axis=-1
)
# directions_ndc points from origins_ndc to infinity_ndc
directions_ndc = infinity_ndc - origins_ndc
return origins_ndc, directions_ndc
def pad_poses(p):
"""Pad [..., 3, 4] pose matrices with a homogeneous bottom row [0,0,0,1]."""
bottom = np.broadcast_to([0, 0, 0, 1.0], p[Ellipsis, :1, :4].shape)
return np.concatenate([p[Ellipsis, :3, :4], bottom], axis=-2)
def unpad_poses(p):
"""Remove the homogeneous bottom row from [..., 4, 4] pose matrices."""
return p[Ellipsis, :3, :4]
def recenter_poses(poses):
"""Recenter poses around the origin."""
cam2world = average_pose(poses)
transform = np.linalg.inv(pad_poses(cam2world))
poses = transform @ pad_poses(poses)
return unpad_poses(poses), transform
def average_pose(poses, lock_up = False):
"""New pose using average position, z-axis, and up vector of input poses."""
position = poses[:, :3, 3].mean(0)
z_axis = poses[:, :3, 2].mean(0)
up = poses[:, :3, 1].mean(0)
cam2world = viewmatrix(z_axis, up, position, lock_up=lock_up)
return cam2world
def viewmatrix(
lookdir,
up,
position,
lock_up = False,
):
"""Construct lookat view matrix."""
orthogonal_dir = lambda a, b: normalize(np.cross(a, b))
vecs = [None, normalize(up), normalize(lookdir)]
# x-axis is always the normalized cross product of `lookdir` and `up`.
vecs[0] = orthogonal_dir(vecs[1], vecs[2])
# Default is to lock `lookdir` vector, if lock_up is True lock `up` instead.
ax = 2 if lock_up else 1
# Set the not-locked axis to be orthogonal to the other two.
vecs[ax] = orthogonal_dir(vecs[(ax + 1) % 3], vecs[(ax + 2) % 3])
m = np.stack(vecs + [position], axis=1)
return m
def rotation_about_axis(degrees, axis=0):
"""Creates rotation matrix about one of the coordinate axes."""
radians = degrees / 180.0 * np.pi
rot2x2 = np.array(
[[np.cos(radians), -np.sin(radians)], [np.sin(radians), np.cos(radians)]]
)
r = np.eye(3)
r[1:3, 1:3] = rot2x2
r = np.roll(np.roll(r, axis, axis=0), axis, axis=1)
p = np.eye(4)
p[:3, :3] = r
return p
def normalize(x):
"""Normalization helper function."""
return x / np.linalg.norm(x)
def focus_point_fn(poses, xnp = np):
"""Calculate nearest point to all focal axes in poses."""
directions, origins = poses[:, :3, 2:3], poses[:, :3, 3:4]
m = xnp.eye(3) - directions * xnp.transpose(directions, [0, 2, 1])
mt_m = xnp.transpose(m, [0, 2, 1]) @ m
focus_pt = xnp.linalg.inv(mt_m.mean(0)) @ (mt_m @ origins).mean(0)[:, 0]
return focus_pt
# Constants for generate_spiral_path():
NEAR_STRETCH = 0.9 # Push forward near bound for forward facing render path.
FAR_STRETCH = 5.0 # Push back far bound for forward facing render path.
FOCUS_DISTANCE = 0.75 # Relative weighting of near, far bounds for render path.
def generate_spiral_path(
poses,
bounds,
n_frames = 120,
n_rots = 2,
zrate = 0.5,
):
"""Calculates a forward facing spiral path for rendering."""
# Find a reasonable 'focus depth' for this dataset as a weighted average
# of conservative near and far bounds in disparity space.
near_bound = bounds.min() * NEAR_STRETCH
far_bound = bounds.max() * FAR_STRETCH
# All cameras will point towards the world space point (0, 0, -focal).
focal = 1 / (((1 - FOCUS_DISTANCE) / near_bound + FOCUS_DISTANCE / far_bound))
# Get radii for spiral path using 90th percentile of camera positions.
positions = poses[:, :3, 3]
radii = np.percentile(np.abs(positions), 90, 0)
radii = np.concatenate([radii, [1.0]])
# Generate poses for spiral path.
render_poses = []
cam2world = average_pose(poses)
up = poses[:, :3, 1].mean(0)
for theta in np.linspace(0.0, 2.0 * np.pi * n_rots, n_frames, endpoint=False):
t = radii * [np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0]
position = cam2world @ t
lookat = cam2world @ [0, 0, -focal, 1.0]
z_axis = position - lookat
render_poses.append(viewmatrix(z_axis, up, position))
render_poses = np.stack(render_poses, axis=0)
return render_poses
def transform_poses_pca(poses):
"""Transforms poses so principal components lie on XYZ axes.
Args:
poses: a (N, 3, 4) array containing the cameras' camera to world transforms.
Returns:
A tuple (poses, transform), with the transformed poses and the applied
camera_to_world transforms.
"""
t = poses[:, :3, 3]
t_mean = t.mean(axis=0)
t = t - t_mean
eigval, eigvec = np.linalg.eig(t.T @ t)
# Sort eigenvectors in order of largest to smallest eigenvalue.
inds = np.argsort(eigval)[::-1]
eigvec = eigvec[:, inds]
rot = eigvec.T
if np.linalg.det(rot) < 0:
rot = np.diag(np.array([1, 1, -1])) @ rot
transform = np.concatenate([rot, rot @ -t_mean[:, None]], -1)
poses_recentered = unpad_poses(transform @ pad_poses(poses))
transform = np.concatenate([transform, np.eye(4)[3:]], axis=0)
# Flip coordinate system if z component of y-axis is negative
if poses_recentered.mean(axis=0)[2, 1] < 0:
poses_recentered = np.diag(np.array([1, -1, -1])) @ poses_recentered
transform = np.diag(np.array([1, -1, -1, 1])) @ transform
# Just make sure it's it in the [-1, 1]^3 cube
scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3]))
poses_recentered[:, :3, 3] *= scale_factor
transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform
return poses_recentered, transform
def transform_poses_focus(poses):
"""Transforms poses so that the "focus point" of capture is at the origin.
Args:
poses: a (N, 3, 4) array containing the cameras' camera to world transforms.
Returns:
A tuple (poses, transform), with the transformed poses and the applied
camera_to_world transforms.
"""
# Move the focus point to the origin.
focus_point = focus_point_fn(poses)
# Use average up vector as the Z axis.
swap_y_z = np.array([
[1, 0, 0],
[0, 0, 1],
[0, -1, 0.0],
])
rot = average_pose(poses, lock_up=True)[:3, :3] @ swap_y_z
transform = np.concatenate([rot.T, rot.T @ -focus_point[:, None]], -1)
poses_recentered = transform @ pad_poses(poses)
transform = np.concatenate([transform, np.eye(4)[3:]], axis=0)
# Just make sure it's it in the [-1, 1]^3 cube
scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3]))
poses_recentered[:, :3, 3] *= scale_factor
transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform
return poses_recentered, transform
def generate_ellipse_path(
poses,
n_frames = 120,
const_speed = True,
z_variation = 0.0,
z_phase = 0.0,
rad_mult_min = 1.0,
rad_mult_max = 1.0,
render_rotate_xaxis = 0.0,
render_rotate_yaxis = 0.0,
use_avg_z_height = False,
z_height_percentile = None,
lock_up = False,
):
"""Generate an elliptical render path based on the given poses."""
# Calculate the focal point for the path (cameras point toward this).
center = focus_point_fn(poses)
# Default path height sits at z=0 (in middle of zero-mean capture pattern).
xy_offset = center[:2]
# Calculate lengths for ellipse axes based on input camera positions.
xy_radii = np.percentile(np.abs(poses[:, :2, 3] - xy_offset), 90, axis=0)
# Use ellipse that is symmetric about the focal point in xy.
xy_low = xy_offset - xy_radii
xy_high = xy_offset + xy_radii
# Optional height variation, need not be symmetric.
z_min = np.percentile((poses[:, 2, 3]), 10, axis=0)
z_max = np.percentile((poses[:, 2, 3]), 90, axis=0)
if use_avg_z_height or z_height_percentile is not None:
# Center the path vertically around the average camera height, good for
# datasets recentered by transform_poses_focus function.
if z_height_percentile is None:
z_init = poses[:, 2, 3].mean(axis=0)
else:
z_init = np.percentile(poses[:, 2, 3], z_height_percentile, axis=0)
else:
# Center the path at zero, good for datasets recentered by
# transform_poses_pca function.
z_init = 0
z_low = z_init + z_variation * (z_min - z_init)
z_high = z_init + z_variation * (z_max - z_init)
xyz_low = np.array([*xy_low, z_low])
xyz_high = np.array([*xy_high, z_high])
def get_positions(theta):
# Interpolate between bounds with trig functions to get ellipse in x-y.
# Optionally also interpolate in z to change camera height along path.
t_x = np.cos(theta) * 0.5 + 0.5
t_y = np.sin(theta) * 0.5 + 0.5
t_z = np.cos(theta + 2 * np.pi * z_phase) * 0.5 + 0.5
t_xyz = np.stack([t_x, t_y, t_z], axis=-1)
positions = xyz_low + t_xyz * (xyz_high - xyz_low)
# Interpolate between min and max radius multipliers so the camera zooms in
# and out of the scene center.
t = np.sin(theta) * 0.5 + 0.5
rad_mult = rad_mult_min + (rad_mult_max - rad_mult_min) * t
positions = center + (positions - center) * rad_mult[:, None]
return positions
theta = np.linspace(0, 2.0 * np.pi, n_frames + 1, endpoint=True)
positions = get_positions(theta)
if const_speed:
# Resample theta angles so that the velocity is closer to constant.
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
theta = stepfun.sample(None, theta, np.log(lengths), n_frames + 1)
positions = get_positions(theta)
# Throw away duplicated last position.
positions = positions[:-1]
# Set path's up vector to axis closest to average of input pose up vectors.
avg_up = poses[:, :3, 1].mean(0)
avg_up = avg_up / np.linalg.norm(avg_up)
ind_up = np.argmax(np.abs(avg_up))
up = np.eye(3)[ind_up] * np.sign(avg_up[ind_up])
poses = np.stack([viewmatrix(p - center, up, p, lock_up) for p in positions])
poses = poses @ rotation_about_axis(-render_rotate_yaxis, axis=1)
poses = poses @ rotation_about_axis(render_rotate_xaxis, axis=0)
return poses
def generate_interpolated_path(
poses,
n_interp,
spline_degree = 5,
smoothness = 0.03,
rot_weight = 0.1,
lock_up = False,
fixed_up_vector = None,
lookahead_i = None,
frames_per_colmap = None,
const_speed = False,
n_buffer = None,
periodic = False,
n_interp_as_total = False,
):
"""Creates a smooth spline path between input keyframe camera poses.
Spline is calculated with poses in format (position, lookat-point, up-point).
Args:
poses: (n, 3, 4) array of input pose keyframes.
n_interp: returned path will have n_interp * (n - 1) total poses.
spline_degree: polynomial degree of B-spline.
smoothness: parameter for spline smoothing, 0 forces exact interpolation.
rot_weight: relative weighting of rotation/translation in spline solve.
lock_up: if True, forced to use given Up and allow Lookat to vary.
fixed_up_vector: replace the interpolated `up` with a fixed vector.
lookahead_i: force the look direction to look at the pose `i` frames ahead.
frames_per_colmap: conversion factor for the desired average velocity.
const_speed: renormalize spline to have constant delta between each pose.
n_buffer: Number of buffer frames to insert at the start and end of the
path. Helps keep the ends of a spline path straight.
periodic: make the spline path periodic (perfect loop).
n_interp_as_total: use n_interp as total number of poses in path rather than
the number of poses to interpolate between each input.
Returns:
Array of new camera poses with shape (n_interp * (n - 1), 3, 4), or
(n_interp, 3, 4) if n_interp_as_total is set.
"""
def poses_to_points(poses, dist):
"""Converts from pose matrices to (position, lookat, up) format."""
pos = poses[:, :3, -1]
lookat = poses[:, :3, -1] - dist * poses[:, :3, 2]
up = poses[:, :3, -1] + dist * poses[:, :3, 1]
return np.stack([pos, lookat, up], 1)
def points_to_poses(points):
"""Converts from (position, lookat, up) format to pose matrices."""
poses = []
for i in range(len(points)):
pos, lookat_point, up_point = points[i]
if lookahead_i is not None:
if i + lookahead_i < len(points):
lookat = pos - points[i + lookahead_i][0]
else:
lookat = pos - lookat_point
up = (up_point - pos) if fixed_up_vector is None else fixed_up_vector
poses.append(viewmatrix(lookat, up, pos, lock_up=lock_up))
return np.array(poses)
def insert_buffer_poses(poses, n_buffer):
"""Insert extra poses at the start and end of the path."""
def average_distance(points):
distances = np.linalg.norm(points[1:] - points[0:-1], axis=-1)
return np.mean(distances)
def shift(pose, dz):
result = np.copy(pose)
z = result[:3, 2]
z /= np.linalg.norm(z)
# Move along forward-backward axis. -z is forward.
result[:3, 3] += z * dz
return result
dz = average_distance(poses[:, :3, 3])
prefix = np.stack([shift(poses[0], (i + 1) * dz) for i in range(n_buffer)])
prefix = prefix[::-1] # reverse order
suffix = np.stack(
[shift(poses[-1], -(i + 1) * dz) for i in range(n_buffer)]
)
result = np.concatenate([prefix, poses, suffix])
return result
def remove_buffer_poses(poses, u, n_frames, u_keyframes, n_buffer):
u_keyframes = u_keyframes[n_buffer:-n_buffer]
mask = (u >= u_keyframes[0]) & (u <= u_keyframes[-1])
poses = poses[mask]
u = u[mask]
n_frames = len(poses)
return poses, u, n_frames, u_keyframes
def interp(points, u, k, s):
"""Runs multidimensional B-spline interpolation on the input points."""
sh = points.shape
pts = np.reshape(points, (sh[0], -1))
k = min(k, sh[0] - 1)
tck, u_keyframes = scipy.interpolate.splprep(pts.T, k=k, s=s, per=periodic)
new_points = np.array(scipy.interpolate.splev(u, tck))
new_points = np.reshape(new_points.T, (len(u), sh[1], sh[2]))
return new_points, u_keyframes
if n_buffer is not None:
poses = insert_buffer_poses(poses, n_buffer)
points = poses_to_points(poses, dist=rot_weight)
if n_interp_as_total:
n_frames = n_interp + 1 # Add extra since final pose is discarded.
else:
n_frames = n_interp * (points.shape[0] - 1)
u = np.linspace(0, 1, n_frames, endpoint=True)
new_points, u_keyframes = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
if n_buffer is not None:
poses, u, n_frames, u_keyframes = remove_buffer_poses(
poses, u, n_frames, u_keyframes, n_buffer
)
if frames_per_colmap is not None:
# Recalculate the number of frames to achieve desired average velocity.
positions = poses[:, :3, -1]
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
total_length_colmap = lengths.sum()
print('old n_frames:', n_frames)
print('total_length_colmap:', total_length_colmap)
n_frames = int(total_length_colmap * frames_per_colmap)
print('new n_frames:', n_frames)
u = np.linspace(
np.min(u_keyframes), np.max(u_keyframes), n_frames, endpoint=True
)
new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
if const_speed:
# Resample timesteps so that the velocity is nearly constant.
positions = poses[:, :3, -1]
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
u = stepfun.sample(None, u, np.log(lengths), n_frames + 1)
new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
return poses[:-1], u[:-1], u_keyframes
def safe_interpolate_1d(
x,
spline_degree,
smoothness,
t_input,
t_output,
):
"""Interpolate 1d signal x (defined at t_input and queried at t_output)."""
# TODO(bmild): switch interpolation t values to match those chosen for path.
# One needs at least n=k+1 points to fit a polynomial of degree k to n points.
n = len(x)
spline_degree = min(spline_degree, n - 1)
if spline_degree > 0:
tck = scipy.interpolate.splrep(t_input, x, s=smoothness, k=spline_degree)
return scipy.interpolate.splev(t_output, tck).astype(x.dtype)
else: # n = 0 or 1
fill_value = x[0] if n else 0.0
return np.full(t_output.shape, fill_value, dtype=x.dtype)
def identify_file_names(dir_or_text_file):
"""Load filenames from text file or directory."""
if utils.isdir(dir_or_text_file):
# If `dir_or_text_file` is a directory, grab the filenames.
subset_names = sorted(utils.listdir(dir_or_text_file))
else:
# If `dir_or_text_file` is a text file, treat each line as a filename.
with utils.open_file(dir_or_text_file, 'r') as fp:
names = fp.read()
if isinstance(names, bytes):
names = names.decode('utf-8')
# Decode bytes into string and split into lines.
subset_names = names.splitlines()
return subset_names
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
def get_meters_per_colmap_from_calibration_images(
config, poses, image_names
):
"""Uses calibration images to get how many meters is a single COLMAP unit."""
# By default, the input camera poses are scaled to fit in the [-1, 1]^3 cube.
# This default value implies a scaling of 2 / .25 = 8 meters between the
# farthest apart camera poses.
meters_per_colmap = 8.0
if config.render_calibration_keyframes is not None:
# Use provided calibration keyframes to determine metric world scale.
calib_names = identify_file_names(config.render_calibration_keyframes)
indices = []
for i in range(0, len(calib_names), 2):
# Grab pairs of calibration images filenames.
name0, name1 = calib_names[i : i + 2]
# Check if both are in the set of colmap-posed images.
if name0 in image_names and name1 in image_names:
indices.append((image_names.index(name0), image_names.index(name1)))
if indices:
# Extract colmap-space positions from the camera pose matrices.
positions = poses[indices][Ellipsis, :3, -1]
# Every pair of calibration keyframes should have world space distance
# `render_calibration_distance` according to the capture handbook.
colmap_lengths = np.linalg.norm(
positions[:, 0] - positions[:, 1], axis=-1
)
colmap_length = colmap_lengths.mean(axis=0)
# Ratio of world distance to colmap distance.
meters_per_colmap = config.render_calibration_distance / colmap_length
print('colmap lengths', colmap_lengths)
print('avg', colmap_length)
print('meters_per_colmap', meters_per_colmap)
return meters_per_colmap
def calibrate_spline_speed(
config, poses, image_names
):
"""Uses input config to determine a conversion factor for the spline speed."""
if config.render_spline_meters_per_sec is None:
return None
meters_per_colmap = get_meters_per_colmap_from_calibration_images(
config, poses, image_names
)
meters_per_sec = config.render_spline_meters_per_sec
frames_per_sec = config.render_video_fps
frames_per_colmap = meters_per_colmap / meters_per_sec * frames_per_sec
print('returning frames_per_colmap', frames_per_colmap)
return frames_per_colmap
def create_render_spline_path(
config,
image_names,
poses,
exposures,
):
"""Creates spline interpolation render path from subset of dataset poses.
Args:
config: configs.Config object.
image_names: a list of image filenames.
poses: [N, 3, 4] array of extrinsic camera pose matrices.
exposures: optional list of floating point exposure values.
Returns:
spline_indices: list of indices used to select spline keyframe poses.
render_poses: array of interpolated extrinsic camera poses for the path.
render_exposures: optional list of interpolated exposures for the path.
"""
def remove_outlier_spline_indices(
spline_indices, poses, q_max, q_mult
):
"""Identify spline indices correspond to inlier poses."""
poses = poses[spline_indices]
points = poses[:, :3, -1]
distances = np.linalg.norm(points[1:] - points[:-1], axis=-1)
mask = distances < q_mult * np.quantile(distances, q_max)
mask = np.concatenate([mask, [True]], axis=0) # Keep the last pose.
num_inliers = int(np.sum(mask))
num_total = len(spline_indices)
print(
f'remove_outlier_spline_indices: {num_inliers}/{num_total} spline '
'path poses remaining after outlier removal.'
)
return spline_indices[mask]
# Grab poses corresponding to the image filenames.
spline_indices = identify_file_indices(
config.render_spline_keyframes, image_names
)
if (
config.render_spline_outlier_keyframe_quantile is not None
and config.render_spline_outlier_keyframe_multiplier is not None
):
spline_indices = remove_outlier_spline_indices(
spline_indices,
poses,
q_max=config.render_spline_outlier_keyframe_quantile,
q_mult=config.render_spline_outlier_keyframe_multiplier,
)
keyframes = poses[spline_indices]
frames_per_colmap = calibrate_spline_speed(config, poses, image_names)
if config.render_spline_fixed_up:
# Fix path to use world-space "up" vector instead of "banking" with spline.
all_up_vectors = poses[:, :3, 1] # second column of pose matrix is up.
fixed_up_vector = normalize(all_up_vectors.mean(axis=0))
else:
fixed_up_vector = None
render_poses, frame_timesteps, keyframe_timesteps = (
generate_interpolated_path(
keyframes,
n_interp=config.render_spline_n_interp,
spline_degree=config.render_spline_degree,
smoothness=config.render_spline_smoothness,
rot_weight=config.render_spline_rot_weight,
lock_up=config.render_spline_lock_up,
fixed_up_vector=fixed_up_vector,
lookahead_i=config.render_spline_lookahead_i,
frames_per_colmap=frames_per_colmap,
const_speed=config.render_spline_const_speed,
n_buffer=config.render_spline_n_buffer,
)
)
if config.render_spline_interpolate_exposure:
if exposures is None:
raise ValueError(
'config.render_spline_interpolate_exposure is True but '
'create_render_spline_path() was passed exposures=None.'
)
# Interpolate per-frame exposure value.
log_exposure = np.log(exposures[spline_indices])
# Use aggressive smoothing for exposure interpolation to avoid flickering.
log_exposure_interp = safe_interpolate_1d(
log_exposure,
spline_degree=5,
smoothness=config.render_spline_interpolate_exposure_smoothness,
t_input=keyframe_timesteps,
t_output=frame_timesteps,
)
render_exposures = np.exp(log_exposure_interp)
else:
render_exposures = None
return spline_indices, render_poses, render_exposures
def intrinsic_matrix(
fx,
fy,
cx,
cy,
xnp = np,
):
"""Intrinsic matrix for a pinhole camera in OpenCV coordinate system."""
return xnp.array([
[fx, 0, cx],
[0, fy, cy],
[0, 0, 1.0],
])
def get_pixtocam(
focal,
width,
height,
xnp = np,
):
"""Inverse intrinsic matrix for a perfect pinhole camera."""
camtopix = intrinsic_matrix(focal, focal, width * 0.5, height * 0.5, xnp)
return xnp.linalg.inv(camtopix)
def pixel_coordinates(
width, height, xnp = np
):
"""Tuple of the x and y integer coordinates for a grid of pixels."""
return xnp.meshgrid(xnp.arange(width), xnp.arange(height), indexing='xy')
def _radial_and_tangential_distort(
x,
y,
k1 = 0,
k2 = 0,
k3 = 0,
k4 = 0,
p1 = 0,
p2 = 0,
):
"""Computes the distorted pixel positions."""
r2 = x * x + y * y
radial_distortion = r2 * (k1 + r2 * (k2 + r2 * (k3 + r2 * k4)))
dx_radial = x * radial_distortion
dy_radial = y * radial_distortion
dx_tangential = 2 * p1 * x * y + p2 * (r2 + 2 * x * x)
dy_tangential = 2 * p2 * x * y + p1 * (r2 + 2 * y * y)
return x + dx_radial + dx_tangential, y + dy_radial + dy_tangential
def _compute_residual_and_jacobian(
x,
y,
xd,
yd,
k1 = 0.0,
k2 = 0.0,
k3 = 0.0,
k4 = 0.0,
p1 = 0.0,
p2 = 0.0,
):
"""Auxiliary function of radial_and_tangential_undistort()."""
# Adapted from https://github.com/google/nerfies/blob/main/nerfies/camera.py
# let r(x, y) = x^2 + y^2;
# d(x, y) = 1 + k1 * r(x, y) + k2 * r(x, y) ^2 + k3 * r(x, y)^3 +
# k4 * r(x, y)^4;
r = x * x + y * y
d = 1.0 + r * (k1 + r * (k2 + r * (k3 + r * k4)))
# The perfect projection is:
# xd = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2);
# yd = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2);
#
# Let's define
#
# fx(x, y) = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2) - xd;
# fy(x, y) = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2) - yd;
#
# We are looking for a solution that satisfies
# fx(x, y) = fy(x, y) = 0;
fx = d * x + 2 * p1 * x * y + p2 * (r + 2 * x * x) - xd
fy = d * y + 2 * p2 * x * y + p1 * (r + 2 * y * y) - yd
# Compute derivative of d over [x, y]
d_r = k1 + r * (2.0 * k2 + r * (3.0 * k3 + r * 4.0 * k4))
d_x = 2.0 * x * d_r
d_y = 2.0 * y * d_r
# Compute derivative of fx over x and y.
fx_x = d + d_x * x + 2.0 * p1 * y + 6.0 * p2 * x
fx_y = d_y * x + 2.0 * p1 * x + 2.0 * p2 * y
# Compute derivative of fy over x and y.
fy_x = d_x * y + 2.0 * p2 * y + 2.0 * p1 * x
fy_y = d + d_y * y + 2.0 * p2 * x + 6.0 * p1 * y
return fx, fy, fx_x, fx_y, fy_x, fy_y
def _radial_and_tangential_undistort(
xd,
yd,
k1 = 0,
k2 = 0,
k3 = 0,
k4 = 0,
p1 = 0,
p2 = 0,
eps = 1e-9,
max_iterations=10,
xnp = np,
):
"""Computes undistorted (x, y) from (xd, yd)."""
# From https://github.com/google/nerfies/blob/main/nerfies/camera.py
# Initialize from the distorted point.
x = xnp.copy(xd)
y = xnp.copy(yd)
for _ in range(max_iterations):
fx, fy, fx_x, fx_y, fy_x, fy_y = _compute_residual_and_jacobian(
x=x, y=y, xd=xd, yd=yd, k1=k1, k2=k2, k3=k3, k4=k4, p1=p1, p2=p2
)
denominator = fy_x * fx_y - fx_x * fy_y
x_numerator = fx * fy_y - fy * fx_y
y_numerator = fy * fx_x - fx * fy_x
step_x = xnp.where(
xnp.abs(denominator) > eps,
x_numerator / denominator,
xnp.zeros_like(denominator),
)
step_y = xnp.where(
xnp.abs(denominator) > eps,
y_numerator / denominator,
xnp.zeros_like(denominator),
)
x = x + step_x
y = y + step_y
return x, y
class ProjectionType(enum.Enum):
"""Camera projection type (perspective pinhole, fisheye, or 360 pano)."""
PERSPECTIVE = 'perspective'
FISHEYE = 'fisheye'
PANORAMIC = 'pano'
def pixels_to_rays(
pix_x_int,
pix_y_int,
pixtocams,
camtoworlds,
distortion_params = None,
pixtocam_ndc = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Calculates rays given pixel coordinates, intrinisics, and extrinsics.
Given 2D pixel coordinates pix_x_int, pix_y_int for cameras with
inverse intrinsics pixtocams and extrinsics camtoworlds (and optional
distortion coefficients distortion_params and NDC space projection matrix
pixtocam_ndc), computes the corresponding 3D camera rays.
Vectorized over the leading dimensions of the first four arguments.
Args:
pix_x_int: int array, shape SH, x coordinates of image pixels.
pix_y_int: int array, shape SH, y coordinates of image pixels.
pixtocams: float array, broadcastable to SH + [3, 3], inverse intrinsics.
camtoworlds: float array, broadcastable to SH + [3, 4], camera extrinsics.
distortion_params: dict of floats, optional camera distortion parameters.
pixtocam_ndc: float array, [3, 3], optional inverse intrinsics for NDC.
camtype: camera_utils.ProjectionType, fisheye or perspective camera.
xnp: either numpy or jax.numpy.
Returns:
origins: float array, shape SH + [3], ray origin points.
directions: float array, shape SH + [3], ray direction vectors.
viewdirs: float array, shape SH + [3], normalized ray direction vectors.
radii: float array, shape SH + [1], ray differential radii.
imageplane: float array, shape SH + [2], xy coordinates on the image plane.
If the image plane is at world space distance 1 from the pinhole, then
imageplane will be the xy coordinates of a pixel in that space (so the
camera ray direction at the origin would be (x, y, -1) in OpenGL coords).
"""
# Must add half pixel offset to shoot rays through pixel centers.
def pix_to_dir(x, y):
return xnp.stack([x + 0.5, y + 0.5, xnp.ones_like(x)], axis=-1)
# We need the dx and dy rays to calculate ray radii for mip-NeRF cones.
pixel_dirs_stacked = xnp.stack(
[
pix_to_dir(pix_x_int, pix_y_int),
pix_to_dir(pix_x_int + 1, pix_y_int),
pix_to_dir(pix_x_int, pix_y_int + 1),
],
axis=0,
)
# For jax, need to specify high-precision matmul.
matmul = math.matmul if xnp == jnp else xnp.matmul
mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0]
# Apply inverse intrinsic matrices.
camera_dirs_stacked = mat_vec_mul(pixtocams, pixel_dirs_stacked)
if distortion_params is not None:
# Correct for distortion.
x, y = _radial_and_tangential_undistort(
camera_dirs_stacked[Ellipsis, 0],
camera_dirs_stacked[Ellipsis, 1],
**distortion_params,
xnp=xnp,
)
camera_dirs_stacked = xnp.stack([x, y, xnp.ones_like(x)], -1)
if camtype == ProjectionType.FISHEYE:
theta = xnp.sqrt(xnp.sum(xnp.square(camera_dirs_stacked[Ellipsis, :2]), axis=-1))
theta = xnp.minimum(xnp.pi, theta)
sin_theta_over_theta = xnp.sin(theta) / theta
camera_dirs_stacked = xnp.stack(
[
camera_dirs_stacked[Ellipsis, 0] * sin_theta_over_theta,
camera_dirs_stacked[Ellipsis, 1] * sin_theta_over_theta,
xnp.cos(theta),
],
axis=-1,
)
elif camtype == ProjectionType.PANORAMIC:
theta = camera_dirs_stacked[Ellipsis, 0]
phi = camera_dirs_stacked[Ellipsis, 1]
# Negation on y and z components accounts for expected OpenCV convention.
camera_dirs_stacked = xnp.stack(
[
-xnp.sin(phi) * xnp.sin(theta),
-xnp.cos(phi),
-xnp.sin(phi) * xnp.cos(theta),
],
axis=-1,
)
# Flip from OpenCV to OpenGL coordinate system.
camera_dirs_stacked = matmul(
camera_dirs_stacked, xnp.diag(xnp.array([1.0, -1.0, -1.0]))
)
# Extract 2D image plane (x, y) coordinates.
imageplane = camera_dirs_stacked[0, Ellipsis, :2]
# Apply camera rotation matrices.
directions_stacked = mat_vec_mul(
camtoworlds[Ellipsis, :3, :3], camera_dirs_stacked
)
# Extract the offset rays.
directions, dx, dy = directions_stacked
origins = xnp.broadcast_to(camtoworlds[Ellipsis, :3, -1], directions.shape)
viewdirs = directions / xnp.linalg.norm(directions, axis=-1, keepdims=True)
if pixtocam_ndc is None:
# Distance from each unit-norm direction vector to its neighbors.
dx_norm = xnp.linalg.norm(dx - directions, axis=-1)
dy_norm = xnp.linalg.norm(dy - directions, axis=-1)
else:
# Convert ray origins and directions into projective NDC space.
ndc_fn = functools.partial(convert_to_ndc, pixtocam=pixtocam_ndc, xnp=xnp)
origins_dx, _ = ndc_fn(origins, dx)
origins_dy, _ = ndc_fn(origins, dy)
origins, directions = ndc_fn(origins, directions)
# In NDC space, we use the offset between origins instead of directions.
dx_norm = xnp.linalg.norm(origins_dx - origins, axis=-1)
dy_norm = xnp.linalg.norm(origins_dy - origins, axis=-1)
# Cut the distance in half, multiply it to match the variance of a uniform
# distribution the size of a pixel (1/12, see paper).
# TODO(barron): Add a unit test that this is correct.
radii = (0.5 * (dx_norm + dy_norm))[Ellipsis, None] * 2 / xnp.sqrt(12)
return origins, directions, viewdirs, radii, imageplane
def points_to_pixels(
points,
pixtocams,
camtoworlds,
distortion_params = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Calculates pixel coordinates given 3D points, intrinisics, and extrinsics.
Given 3D point coordinates points and cameras with inverse intrinsics
pixtocams and extrinsics camtoworlds (and optional distortion coefficients
distortion_params), computes the corresponding 2D pixel coordinates.
Vectorized over the leading dimensions of the first four arguments.
Args:
points: float array, [..., 3], 3D coordinates of points to project.
pixtocams: float array, [..., 3, 3], inverse intrinsics.
camtoworlds: float array, [..., 3, 4], camera extrinsics.
distortion_params: dict of floats or float arrays [...], optional camera
distortion parameters.
camtype: camera_utils.ProjectionType, type of camera model.
xnp: either numpy (host compute) or jax.numpy (device compute).
Returns:
coordinates: float array, [..., 2], pixel coordinates.
depth: float array, [...], per-point orthographic depth.
"""
if camtype != ProjectionType.PERSPECTIVE:
raise ValueError(f'points_to_pixels only supports perspective projection, '
f'not {camtype} mode.')
# For jax, need to specify high-precision matmul.
matmul = math.matmul if xnp == jnp else xnp.matmul
mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0]
rotation = camtoworlds[Ellipsis, :3, :3]
rotation_inv = xnp.swapaxes(rotation, -1, -2)
translation = camtoworlds[Ellipsis, :3, -1]
# Points (directions) in the camera coordinate frame.
points_camera = mat_vec_mul(rotation_inv, points - translation)
# Projection to image plane by dividing out -z.
depth = -points_camera[Ellipsis, -1]
camera_dirs = points_camera / depth[Ellipsis, None]
# OpenGL to OpenCV coordinates.
camera_dirs = matmul(camera_dirs, xnp.diag(xnp.array([1.0, -1.0, -1.0])))
if distortion_params is not None:
# Correct for distortion.
x, y = _radial_and_tangential_distort(
camera_dirs[Ellipsis, 0],
camera_dirs[Ellipsis, 1],
**distortion_params,
)
camera_dirs = xnp.stack([x, y, xnp.ones_like(x)], -1)
# Apply intrinsics matrix.
pixel_dirs = mat_vec_mul(xnp.linalg.inv(pixtocams), camera_dirs)
# Remove half pixel offset.
coordinates = pixel_dirs[Ellipsis, :2] - xnp.array([0.5, 0.5])
return coordinates, depth
def rays_planes_intersection(
z_min,
z_max,
origins,
directions,
xnp = np,
):
"""Crops rays to a range of z values.
This is useful for situations where the scene lies within a range of
altitudes, but the cameras are very far away, as with aerial data.
Args:
z_min: float z value of the lower cropping plane.
z_max: float z value of the upper cropping plane.
origins: ray origins points.
directions: ray direction vectors.
xnp: either numpy or jax.numpy.
Returns:
t_min: parametric location of the cropped ray origins
t_max: parametric location of the ends of the cropped rays
"""
t1 = (z_min - origins[Ellipsis, 2]) / directions[Ellipsis, 2]
t2 = (z_max - origins[Ellipsis, 2]) / directions[Ellipsis, 2]
t_min = xnp.maximum(0, xnp.minimum(t1, t2))
t_max = xnp.maximum(t1, t2)
return t_min, t_max
def _intersect_ranges(
r1,
r2,
xnp = np,
):
start = xnp.maximum(r1[0], r2[0])
end = xnp.minimum(r1[1], r2[1])
return (start, end)
def ray_box_intersection(
ray_o, ray_d, corners, xnp = np
):
"""Returns enter/exit distances along the ray for box defined by `corners`."""
t1 = (corners[0] - ray_o) / ray_d
t2 = (corners[1] - ray_o) / ray_d
t_min = xnp.minimum(t1, t2).max(axis=-1)
t_max = xnp.maximum(t1, t2).min(axis=-1)
return t_min, t_max
def modify_rays_with_bbox(
rays, corners, xnp = np
):
"""Sets near/far by bbox intersection and multiplies lossmult by mask."""
lossmult = rays.lossmult
near = rays.near
far = rays.far
t_min, t_max = ray_box_intersection(
rays.origins, rays.directions, corners, xnp=xnp
)
t_min, t_max = t_min[Ellipsis, None], t_max[Ellipsis, None]
hits = t_min <= t_max
inear, ifar = _intersect_ranges((near, far), (t_min, t_max), xnp=xnp)
overlaps = inear <= ifar
valid = hits * overlaps
if lossmult is None:
lossmult = valid.astype(xnp.float32)
else:
lossmult = xnp.where(valid, lossmult, 0.0)
near = xnp.where(valid, inear, 0.0)
far = xnp.where(valid, ifar, 0.0)
return rays.replace(lossmult=lossmult, near=near, far=far)
def ray_sphere_intersection(
ray_o,
ray_d,
center,
radius,
xnp = np,
):
"""Calculates distance to hit a sphere for a ray.
Args:
ray_o: Ray origin (..., 3)
ray_d: Ray direction (..., 3)
center: Sphere center (..., 3)
radius: Sphere radius (..., 1)
xnp: Numpy or Jax module
Returns:
t_min, t_max, hit. When no hit is found, t_min = t_max = 0.
"""
oc = ray_o - center
a = (ray_d**2).sum(axis=-1)
b = 2 * (oc * ray_d).sum(axis=-1)
c = (oc * oc).sum(axis=-1) - radius**2
det = b**2 - 4.0 * a * c
hit = (det >= 0) * (a > 0)
# Nb: Results are 'wrong' if valid = false, this is just to make jax
# not freak out.
det = xnp.where(hit, det, 0.0)
a = xnp.where(hit, a, 1.0)
t_min = xnp.where(hit, (-b - xnp.sqrt(det)) / (2.0 * a), 0.0)
t_max = xnp.where(hit, (-b + xnp.sqrt(det)) / (2.0 * a), 0.0)
return t_min, t_max, hit
def gather_cameras(cameras, cam_idx, xnp=np):
"""Gathers relevant camera parameters for each ray."""
pixtocams, camtoworlds, distortion_params = cameras[:3]
if pixtocams.ndim > 2:
pixtocams_idx = pixtocams[cam_idx]
else:
pixtocams_idx = pixtocams
if camtoworlds.ndim > 2:
camtoworlds_idx = camtoworlds[cam_idx]
else:
camtoworlds_idx = camtoworlds
if distortion_params is not None:
distortion_params_idx = {}
for k, v in distortion_params.items(): # pytype: disable=attribute-error # jax-ndarray
if not xnp.isscalar(v):
distortion_params_idx[k] = v[cam_idx]
else:
distortion_params_idx[k] = v
else:
distortion_params_idx = None
return (
pixtocams_idx,
camtoworlds_idx,
distortion_params_idx,
)
def cast_ray_batch(
cameras,
rays,
camtype = ProjectionType.PERSPECTIVE,
scene_bbox = None,
xnp = np,
):
"""Maps from input cameras and uncast Rays batch to output cast Rays batch.
`cameras` is a Tuple of five sets of camera parameters.
pixtocams: 1 or N stacked [3, 3] inverse intrinsic matrices.
camtoworlds: 1 or N stacked [3, 4] extrinsic pose matrices.
distortion_params: optional, dict[str, float] containing pinhole model
distortion parameters.
pixtocam_ndc: optional, [3, 3] inverse intrinsic matrix for mapping to NDC.
z_range: optional range of Z values
Args:
cameras: described above.
rays: ray data including integer pixel coordinates and camera indices.
These fields can be an arbitrary batch shape.
camtype: camera_utils.ProjectionType, fisheye or perspective camera.
scene_bbox: min and max corner of scene bounding box, if applicable.
xnp: either numpy or jax.numpy.
Returns:
rays: Rays dataclass with computed 3D world space ray data.
"""
# rays.cam_idx has shape [..., 1], remove this hanging dimension.
cam_idx = rays.cam_idx[Ellipsis, 0]
cameras_idx = gather_cameras(cameras, cam_idx, xnp=xnp)
pixtocams, camtoworlds, distortion_params = cameras_idx
pixtocam_ndc, z_range = cameras[3:5]
# Compute rays from pixel coordinates.
origins, directions, viewdirs, radii, imageplane = pixels_to_rays(
rays.pixels[Ellipsis, 0],
rays.pixels[Ellipsis, 1],
pixtocams,
camtoworlds,
distortion_params=distortion_params,
pixtocam_ndc=pixtocam_ndc,
camtype=camtype,
xnp=xnp,
)
if z_range is not None:
t_min, t_max = rays_planes_intersection(
z_range[0], z_range[1], origins, directions, xnp
)
t_min = xnp.broadcast_to(t_min[Ellipsis, None], origins.shape)
t_max = xnp.broadcast_to(t_max[Ellipsis, None], origins.shape)
hit_mask = t_max < t_min
origins = xnp.where(hit_mask, origins, origins + directions * t_min)
directions = xnp.where(hit_mask, directions, directions * (t_max - t_min))
# Preserve all metadata and add the cast rays.
rays = rays.replace(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
)
if scene_bbox is not None:
rays = modify_rays_with_bbox(rays, scene_bbox, xnp=xnp)
return rays
def cast_general_rays(
camtoworld,
pixtocam,
height,
width,
near,
far,
distortion_params = None,
pixtocam_ndc = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Wrapper for generating a general ray batch."""
pix_x_int, pix_y_int = pixel_coordinates(width, height, xnp=xnp)
ray_args = pixels_to_rays(
pix_x_int,
pix_y_int,
pixtocam,
camtoworld,
distortion_params=distortion_params,
pixtocam_ndc=pixtocam_ndc,
camtype=camtype,
xnp=xnp,
)
broadcast_scalar = lambda x: xnp.broadcast_to(x, pix_x_int.shape)[Ellipsis, None]
ray_kwargs = {
'pixels': xnp.stack([pix_x_int, pix_y_int], axis=-1),
'near': broadcast_scalar(near),
'far': broadcast_scalar(far),
'cam_idx': broadcast_scalar(0),
}
return utils.Rays(*ray_args, **ray_kwargs)
def cast_pinhole_rays(
camtoworld,
height,
width,
focal,
near,
far,
xnp = np,
):
"""Generates a pinhole camera ray batch (w/o distortion)."""
return cast_general_rays(
camtoworld,
get_pixtocam(focal, width, height, xnp=xnp),
height,
width,
near,
far,
camtype=ProjectionType.PERSPECTIVE,
xnp=xnp,
)
def cast_spherical_rays(
camtoworld,
height,
width,
near,
far,
xnp,
):
"""Generates a spherical camera ray batch."""
return cast_general_rays(
camtoworld,
xnp.diag(xnp.array([2.0 * np.pi / width, np.pi / height, 1.0])),
height,
width,
near,
far,
camtype=ProjectionType.PANORAMIC,
xnp=xnp,
)
def jax_camera_from_tuple(
camera_tuple,
image_size,
projection_type,
):
"""Converts a camera tuple into a JAX camera.
Args:
camera_tuple: A tuple containing `inv_intrinsics`, the inverse intrinsics
matrix; `extrinsics`, the camera to world matrix; and `distortion_params`,
the dictionary of distortion parameters.
image_size: An array containing the (width, height) image size.
projection_type: The projection type of the camera.
Returns:
A JAX camera class instance encoding the same camera information.
"""
if projection_type.value not in {
ProjectionType.PERSPECTIVE.value,
ProjectionType.FISHEYE.value,
}:
raise ValueError(f'Projection {projection_type} is not supported.')
inv_intrinsics, extrinsic, distortion_params = camera_tuple[:3]
intrinsics = jnp.linalg.inv(inv_intrinsics)
focal_length = intrinsics[0, 0]
principal_point = intrinsics[:2, 2]
pixel_aspect_ratio = intrinsics[1, 1] / intrinsics[0, 0]
radial_distortion = None
tangential_distortion = None
if distortion_params is not None:
if (
'k1' in distortion_params
and 'k2' in distortion_params
and 'k3' in distortion_params
):
radial_keys = ['k1', 'k2', 'k3', 'k4']
radial_distortion = jnp.array(
[distortion_params[k] for k in radial_keys if k in distortion_params]
)
if 'p1' in distortion_params and 'p2' in distortion_params:
tangential_distortion = jnp.array([
distortion_params['p1'],
distortion_params['p2'],
])
extrinsic = jnp.concatenate(
[extrinsic[:3, :4], jnp.array([[0, 0, 0, 1]])], axis=0
)
# Convert to OpenCV coordinates.
extrinsic = math.matmul(extrinsic, jnp.diag(jnp.array([1, -1, -1, 1])))
world_to_cam = jnp.linalg.inv(extrinsic)
camera = jaxcam.Camera.create(
focal_length=focal_length,
pixel_aspect_ratio=pixel_aspect_ratio,
radial_distortion=radial_distortion,
tangential_distortion=tangential_distortion,
principal_point=principal_point,
image_size=image_size,
is_fisheye=(projection_type.value == ProjectionType.FISHEYE.value),
)
camera = jaxcam.update_world_to_camera_matrix(camera, world_to_cam)
return camera
def tuple_from_jax_camera(
jax_camera,
):
"""Converts a JAX camera into a camera tuple."""
focal_x = jax_camera.focal_length
focal_y = jax_camera.focal_length * jax_camera.pixel_aspect_ratio
intrinsic = jnp.block([
[focal_x, jax_camera.skew, jax_camera.principal_point[0]],
[0, focal_y, jax_camera.principal_point[1]],
[0, 0, 1],
])
pix_to_cam = jnp.linalg.inv(intrinsic)
world_to_cam = jaxcam.world_to_camera_matrix(jax_camera)
cam_to_world = jnp.linalg.inv(world_to_cam)
# Convert back to OpenGL coordinates.
cam_to_world = math.matmul(cam_to_world, jnp.diag(jnp.array([1, -1, -1, 1])))
cam_to_world = cam_to_world[:3, :]
distortion_params = None
if jax_camera.has_distortion:
distortion_params = {}
if jax_camera.has_radial_distortion:
distortion_params.update({
'k1': jax_camera.radial_distortion[0],
'k2': jax_camera.radial_distortion[1],
'k3': jax_camera.radial_distortion[2],
'k4': jax_camera.radial_distortion[3],
})
if jax_camera.has_tangential_distortion:
distortion_params.update({
'p1': jax_camera.tangential_distortion[0],
'p2': jax_camera.tangential_distortion[1],
})
return pix_to_cam, cam_to_world, distortion_params
def rotation_distance(
rotation_mat1, rotation_mat2
):
"""Computes the angle between two rotation matrices in degrees.
Args:
rotation_mat1: (3, 3) The first batch of rotation matrix.
rotation_mat2: (3, 3) The second batch of rotation matrix.
Returns:
The angle in degrees between 0 and 180.
"""
axis_angle1 = rigid_body.log_so3(rotation_mat1)
axis_angle2 = rigid_body.log_so3(rotation_mat2)
orientation_error_deg = jnp.degrees(
jnp.linalg.norm(axis_angle1 - axis_angle2, axis=-1)
)
return jnp.where( # pytype: disable=bad-return-type # jnp-type
orientation_error_deg < 180,
orientation_error_deg,
360 - orientation_error_deg,
)
def compute_camera_metrics(
cameras_gt, cameras_pred
):
"""Computes the metrics between two cameras."""
orientation_diffs = jax.vmap(rotation_distance)(
cameras_pred.orientation, cameras_gt.orientation
)
translation_diffs = jnp.abs(cameras_pred.translation - cameras_gt.translation)
diffs = {
'focal_length': jnp.abs(
cameras_pred.focal_length - cameras_gt.focal_length
),
'position': jnp.linalg.norm(
cameras_pred.position - cameras_gt.position, axis=-1
),
'translation_x': translation_diffs[Ellipsis, 0],
'translation_y': translation_diffs[Ellipsis, 1],
'translation_z': translation_diffs[Ellipsis, 2],
'orientation': jnp.abs(orientation_diffs),
'principal_points': jnp.linalg.norm(
cameras_pred.principal_point - cameras_gt.principal_point,
axis=-1,
),
}
if cameras_pred.radial_distortion is not None:
radial_distortion_gt = jnp.zeros(4)
if cameras_gt.has_radial_distortion:
radial_distortion_gt = cameras_gt.radial_distortion
for i in range(cameras_pred.radial_distortion.shape[-1]):
diffs[f'radial_distortion_{i}'] = jnp.abs(
cameras_pred.radial_distortion[Ellipsis, i] - radial_distortion_gt[Ellipsis, i]
)
if cameras_pred.tangential_distortion is not None:
tangential_distortion_gt = jnp.zeros(2)
if cameras_gt.has_tangential_distortion:
tangential_distortion_gt = cameras_gt.radial_distortion
for i in range(cameras_pred.tangential_distortion.shape[-1]):
diffs[f'tangential_distortion_{i}'] = jnp.abs(
cameras_pred.tangential_distortion[Ellipsis, i]
- tangential_distortion_gt[Ellipsis, i]
)
return diffs
def perturb_cameras(
rng,
cameras,
sigma_look_at,
sigma_position,
sigma_focal_length = 0.0,
sigma_dolly_z = 0.0,
single_dolly = True,
dolly_use_average = False,
):
"""Randomly perturb camera positions and orientations.
For position the 3D coordinate is simply shifted according to
an offset vector. For the orientation an offset angle is calculated based
on spherical coordinates. The underlying offsets are randomly chosen using
normal distributions absed on the input sigmas.
Args:
rng: A PRNGKey.
cameras: Cameras to perturb.
sigma_look_at: Strength of look-at position offset. Higher means stronger.
sigma_position: Strength of position offset. Higher means stronger.
sigma_focal_length: Strength of focal length zoom z-axis scale. Higher means
stronger. This is essentially a percentage (0.2 means 20%).
sigma_dolly_z: Strength of Dolly zoom z-axis scale. Higher means stronger.
This is essentially a percentage (0.2 means 20%).
single_dolly: If True, only have a single perturbation for dolly zoom.
dolly_use_average: If True, set the dolly z to the average of the input
instead of perturbing.
Returns:
Perturbed cameras.
"""
# Dolly zoom.
if sigma_dolly_z > 0.0 or dolly_use_average:
# Turn out "percentage" into a log scale. This is equivalent to having
# minval = log(1+s) and maxval = log(1/(1+s)) but sampling from a normal
# distribution.
log_sigma_dolly_z = jnp.log1p(sigma_dolly_z)
rng, dolly_key = random.split(rng)
translation = cameras.translation
x, y, z = jnp.split(translation, 3, -1)
if dolly_use_average:
new_z = jnp.broadcast_to(z.mean(axis=0, keepdims=True), z.shape)
elif single_dolly:
new_z = z * jnp.exp(random.normal(dolly_key, (1,)) * log_sigma_dolly_z)
else:
new_z = z * jnp.exp(random.normal(dolly_key, z.shape) * log_sigma_dolly_z)
new_focal_length = cameras.focal_length * (new_z / z).squeeze(-1)
new_translation = jnp.concatenate([x, y, new_z], axis=-1)
new_position = jax.vmap(spin_math.matmul)(
-cameras.orientation.swapaxes(-1, -2), new_translation
)
cameras = cameras.replace(
position=new_position, focal_length=new_focal_length
)
# Perturb focal length.
rng, key = random.split(rng)
new_focal_length = cameras.focal_length * jnp.exp(
random.normal(key, cameras.shape) * jnp.log1p(sigma_focal_length)
)
cameras = cameras.replace(focal_length=new_focal_length)
camera_positions = cameras.position
up_vectors = -cameras.orientation[Ellipsis, 1, :]
# Perturb camera positions.
rng, key = random.split(rng)
perturb_dir = spin_math.normalize(random.normal(key, camera_positions.shape))
camera_positions_perturbed = np.array(
sigma_position * perturb_dir + camera_positions
)
# Perturb look-at point.
look_at_positions = jax.vmap(geometry.line_closest_point)(
cameras.position, cameras.optical_axis, jnp.zeros_like(cameras.position)
)
rng, key = random.split(rng)
perturb_dir = math.normalize(random.normal(key, camera_positions.shape))
look_at_positions_perturbed = np.array(
sigma_look_at * perturb_dir + look_at_positions
)
# Apply the look-at function.
new_cameras = []
for camera, camera_position, look_at_position, up_vector in zip(
cameras,
camera_positions_perturbed,
look_at_positions_perturbed,
up_vectors,
):
new_cameras.append(
jaxcam.look_at(
camera=camera,
eye=camera_position,
center=look_at_position,
world_up=up_vector,
)
)
cameras = jaxcam.concatenate(new_cameras)
return cameras
|
evocodebench_data_12
|
# formatting done while cleaning
def connect(prev, curr):
has_space = prev.endswith(" ")
has_hyphen = prev.endswith("-")
if has_hyphen:
result = prev[0:-1] + curr
return result
result = prev + ("" if has_space else " ") + curr
return result
def fix_mixedcase_words(word):
# if lower no uppers after
# if upper no
if len(word) < 1 or word.isupper() or word.islower():
return word
else:
# check the first two letters to see if it is just a titled word e.g. Hello
if word[0].isupper() and word[1].islower():
return word.capitalize()
else:
# e.g. return HELLO if HEllo else return hello if heLlo
return word.lower() if word[0].islower() else word.upper()
# formatting done after cleaning
|
evocodebench_data_13
|
from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
)
return values
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
|
evocodebench_data_14
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
|
evocodebench_data_15
|
import ast
import asyncio
import functools
import itertools
import logging
import os
import re
import string
from copy import deepcopy
from typing import List, Callable, Dict, Optional, Any, Collection
import pandas as pd
logger = logging.getLogger("AutoRAG")
def fetch_contents(corpus_data: pd.DataFrame, ids: List[List[str]]) -> List[List[str]]:
flat_ids = itertools.chain.from_iterable(ids)
contents = list(map(lambda x: corpus_data.loc[lambda row: row['doc_id'] == x]['contents'].values[0], flat_ids))
result = []
idx = 0
for sublist in ids:
result.append(contents[idx:idx + len(sublist)])
idx += len(sublist)
return result
def result_to_dataframe(column_names: List[str]):
"""
Decorator for converting results to pd.DataFrame.
"""
def decorator_result_to_dataframe(func: Callable):
@functools.wraps(func)
def wrapper(*args, **kwargs) -> pd.DataFrame:
results = func(*args, **kwargs)
if len(column_names) == 1:
df_input = {column_names[0]: results}
else:
df_input = {column_name: result for result, column_name in zip(results, column_names)}
result_df = pd.DataFrame(df_input)
return result_df
return wrapper
return decorator_result_to_dataframe
def load_summary_file(summary_path: str,
dict_columns: Optional[List[str]] = None) -> pd.DataFrame:
"""
Load summary file from summary_path.
:param summary_path: The path of the summary file.
:param dict_columns: The columns that are dictionary type.
You must fill this parameter if you want to load summary file properly.
Default is ['module_params'].
:return: The summary dataframe.
"""
if not os.path.exists(summary_path):
raise ValueError(f"summary.csv does not exist in {summary_path}.")
summary_df = pd.read_csv(summary_path)
if dict_columns is None:
dict_columns = ['module_params']
if any([col not in summary_df.columns for col in dict_columns]):
raise ValueError(f"{dict_columns} must be in summary_df.columns.")
def convert_dict(elem):
return ast.literal_eval(elem)
summary_df[dict_columns] = summary_df[dict_columns].applymap(convert_dict)
return summary_df
def make_combinations(target_dict: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Make combinations from target_dict.
The target_dict key value must be a string,
and the value can be list of values or single value.
If generates all combinations of values from target_dict,
which means generated dictionaries that contain only one value for each key,
and all dictionaries will be different from each other.
:param target_dict: The target dictionary.
:return: The list of generated dictionaries.
"""
dict_with_lists = dict(map(lambda x: (x[0], x[1] if isinstance(x[1], list) else [x[1]]),
target_dict.items()))
def delete_duplicate(x):
def is_hashable(obj):
try:
hash(obj)
return True
except TypeError:
return False
if any([not is_hashable(elem) for elem in x]):
# TODO: add duplication check for unhashable objects
return x
else:
return list(set(x))
dict_with_lists = dict(map(lambda x: (x[0], delete_duplicate(x[1])), dict_with_lists.items()))
combination = list(itertools.product(*dict_with_lists.values()))
combination_dicts = [dict(zip(dict_with_lists.keys(), combo)) for combo in combination]
return combination_dicts
def explode(index_values: Collection[Any], explode_values: Collection[Collection[Any]]):
"""
Explode index_values and explode_values.
The index_values and explode_values must have the same length.
It will flatten explode_values and keep index_values as a pair.
:param index_values: The index values.
:param explode_values: The exploded values.
:return: Tuple of exploded index_values and exploded explode_values.
"""
assert len(index_values) == len(explode_values), "Index values and explode values must have same length"
df = pd.DataFrame({
'index_values': index_values,
'explode_values': explode_values
})
df = df.explode('explode_values')
return df['index_values'].tolist(), df['explode_values'].tolist()
def replace_value_in_dict(target_dict: Dict, key: str,
replace_value: Any) -> Dict:
"""
Replace the value of the certain key in target_dict.
If there is not targeted key in target_dict, it will return target_dict.
:param target_dict: The target dictionary.
:param key: The key to replace.
:param replace_value: The value to replace.
:return: The replaced dictionary.
"""
replaced_dict = deepcopy(target_dict)
if key not in replaced_dict:
return replaced_dict
replaced_dict[key] = replace_value
return replaced_dict
def normalize_string(s: str) -> str:
"""
Taken from the official evaluation script for v1.1 of the SQuAD dataset.
Lower text and remove punctuation, articles and extra whitespace.
"""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def convert_string_to_tuple_in_dict(d):
"""Recursively converts strings that start with '(' and end with ')' to tuples in a dictionary."""
for key, value in d.items():
# If the value is a dictionary, recurse
if isinstance(value, dict):
convert_string_to_tuple_in_dict(value)
# If the value is a list, iterate through its elements
elif isinstance(value, list):
for i, item in enumerate(value):
# If an item in the list is a dictionary, recurse
if isinstance(item, dict):
convert_string_to_tuple_in_dict(item)
# If an item in the list is a string matching the criteria, convert it to a tuple
elif isinstance(item, str) and item.startswith('(') and item.endswith(')'):
value[i] = ast.literal_eval(item)
# If the value is a string matching the criteria, convert it to a tuple
elif isinstance(value, str) and value.startswith('(') and value.endswith(')'):
d[key] = ast.literal_eval(value)
return d
def convert_env_in_dict(d: Dict):
"""
Recursively converts environment variable string in a dictionary to actual environment variable.
:param d: The dictionary to convert.
:return: The converted dictionary.
"""
env_pattern = re.compile(r".*?\${(.*?)}.*?")
def convert_env(val: str):
matches = env_pattern.findall(val)
for match in matches:
val = val.replace(f"${{{match}}}", os.environ.get(match, ""))
return val
for key, value in d.items():
if isinstance(value, dict):
convert_env_in_dict(value)
elif isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, dict):
convert_env_in_dict(item)
elif isinstance(item, str):
value[i] = convert_env(item)
elif isinstance(value, str):
d[key] = convert_env(value)
return d
async def process_batch(tasks, batch_size: int = 64) -> List[Any]:
"""
Processes tasks in batches asynchronously.
:param tasks: A list of no-argument functions or coroutines to be executed.
:param batch_size: The number of tasks to process in a single batch.
Default is 64.
:return: A list of results from the processed tasks.
"""
results = []
for i in range(0, len(tasks), batch_size):
batch = tasks[i:i + batch_size]
batch_results = await asyncio.gather(*batch)
results.extend(batch_results)
return results
|
evocodebench_data_16
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
|
evocodebench_data_17
|
import os
from contextlib import redirect_stdout
import argparse
from copy import deepcopy
from XAgent.config import CONFIG, ARGS
from command import CommandLine, CommandLineParam
def parse_args() -> argparse.Namespace:
"""
Parse the command line arguments and return them as an argparse.Namespace object.
Returns:
argparse.Namespace: An object containing command line arguments and their values.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, required=True, help="The task description.")
parser.add_argument("--upload-files", nargs='+', dest="upload_files", help="List of files to upload.")
parser.add_argument("--model", type=str, help="Model identifier for the task.")
parser.add_argument("--record-dir", type=str, dest="record_dir", help="Directory to record task execution logs.")
parser.add_argument("--mode", type=str, default="auto", help="Operational mode: 'auto' or 'manual'.")
parser.add_argument("--quiet", action="store_true", default=False, help="Run in quiet mode; minimal output.")
parser.add_argument("--max-subtask-chain-length", type=int, dest="max_subtask_chain_length",
help="Maximum length of subtask chain.")
parser.add_argument("--enable-ask-human-for-help", action="store_true", dest="enable_ask_human_for_help",
help="Flag to enable asking for human assistance.")
parser.add_argument("--max-plan-refine-chain-length", type=int, dest="max_plan_refine_chain_length",
help="Maximum length of plan refinement chain.")
parser.add_argument("--max-plan-tree-depth", type=int, dest="max_plan_tree_depth",
help="Maximum depth of the plan tree.")
parser.add_argument("--max-plan-tree-width", type=int, dest="max_plan_tree_width",
help="Maximum width of the plan tree.")
parser.add_argument("--max-retry-times", type=int, dest="max_retry_times", help="Maximum number of retry attempts.")
parser.add_argument("--config-file", type=str, default=os.getenv('CONFIG_FILE', 'assets/config.yml'),
dest="config_file", help="Path to the configuration file.")
return parser.parse_args()
def execute_command_line_process(args: argparse.Namespace, quiet_mode: bool = False) -> None:
"""
Execute the command line process based on the parsed arguments. If quiet mode is enabled,
redirect stdout to a file specified by the recorder's record_root_dir.
Args:
args (argparse.Namespace): Parsed command line arguments.
quiet_mode (bool): Whether to run in quiet mode, outputting to a file instead of the terminal.
"""
args_dict = vars(args)
for key, value in args_dict.items():
if value is not None:
if key == 'model':
ARGS['default_completion_kwargs'] = deepcopy(CONFIG['default_completion_kwargs'])
ARGS['default_completion_kwargs']['model'] = value
else:
ARGS[key] = value
# Redirect stdout to a file if quiet mode is true
if quiet_mode:
from XAgent.running_recorder import recorder
record_file_path = os.path.join(recorder.record_root_dir, "command_line.ansi")
with open(record_file_path, "w", encoding="utf-8") as file, redirect_stdout(file):
start_command_line(args_dict)
else:
start_command_line(args_dict)
def start_command_line(args_dict: dict) -> None:
"""
Start the command line interface with the provided arguments.
Args:
args_dict (dict): A dictionary of command line arguments.
"""
param = CommandLineParam(
task=args_dict['task'],
upload_files=args_dict.get('upload_files'),
role="Assistant",
mode=args_dict["mode"],
)
cmd = CommandLine(param)
cmd.start()
if __name__ == '__main__':
args = parse_args()
os.environ['CONFIG_FILE'] = args.config_file
# The quiet_mode argument is passed directly to the function
execute_command_line_process(args, quiet_mode=args.quiet)
|
evocodebench_data_18
|
from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
)
return values
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
|
evocodebench_data_19
|
import dataclasses
import datetime
import inspect
import json
import typing
from typing import get_args, Literal
import string
import types
def json_default(thing):
try:
return dataclasses.asdict(thing)
except TypeError:
pass
if isinstance(thing, datetime.datetime):
return thing.isoformat(timespec='microseconds')
if isinstance(thing, type):
return thing.__name__
#if hasattr(typing, "_GenericAlias") and isinstance(thing, typing._GenericAlias):
if hasattr(typing, "_UnionGenericAlias"):
if isinstance(thing, typing._UnionGenericAlias):
return {
"Union": [json_default(arg) for arg in get_args(thing)]
}
if thing == Literal[...]:
return {
"Literal": thing.__args__
}
if isinstance(thing, type(None)):
return "None"
if isinstance(thing, typing._SpecialForm):
return thing._name
if isinstance(thing, typing._GenericAlias) or isinstance(thing, types.GenericAlias):
return {
"GenericAlias": [json_default(arg) for arg in get_args(thing)]
}
if isinstance(thing, str):
return thing
if isinstance(thing, list) or isinstance(thing, tuple) or isinstance(thing, set):
return [json_default(item) for item in thing]
if isinstance(thing, dict):
return {json_default(key): json_default(value) for key, value in thing.items()}
raise TypeError(f"object of type {type(thing).__name__} not serializable")
def json_dumps(thing):
return json.dumps(
thing,
default=json_default,
ensure_ascii=False,
sort_keys=True,
indent=None,
separators=(',', ':'),
)
def get_model(content, logger, func_hash):
"""
Get the model from the content and the logger.
Decide on model depending on the length of the content. if is finetunable, return model, true, otherwise return model, false
Args:
content (str): the content to be aligned
logger (buffered logger): the logger
func_hash (str): the function hash
Returns:
model (str): the model to be used
finetunable (bool): whether the model is finetunable
"""
num_tokens = approximate_token_count(content)
finetune_limit = logger.finetune_token_limit
finetune_model, teacher_models = logger.get_models(func_hash)
if num_tokens < finetune_limit:
return finetune_model, True
else:
# this is just for backwards compatibility currently
if len(teacher_models) == 0 or isinstance(teacher_models[0], str):
teacher_models = [("gpt-4", 7000),("gpt-4-32k", 31000)]
for model, token_limit in teacher_models:
if num_tokens < token_limit:
return model, False
raise ValueError("The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
def approximate_token_count(content):
"""
Approximate the token count of input
Number of tokens is word tokens (nr of words * 1.33) + nr of special characters (which are usually their own tokens)
Args:
content (str, bytes): the content to be approximated
Returns:
number_of_tokens (int): the number of tokens
"""
common_special_characters = r"\/(){}[]<>|`~@#$%^&*+=-_:;\""
# check if input type is string
if isinstance(content, str):
number_of_word_tokens = int(len(content.split(" "))*1.333)
nr_of_special_characters = sum([content.count(char) for char in common_special_characters])
return number_of_word_tokens + nr_of_special_characters
# check if input is a byte string
if isinstance(content, bytes):
number_of_word_tokens = int(len(content.split(b" "))*1.333)
nr_of_special_characters = sum([content.count(char.encode("utf-8")) for char in common_special_characters])
return number_of_word_tokens + nr_of_special_characters
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
def get_key(args, kwargs) -> tuple:
args_tuple = _deep_tuple(args)
kwargs_tuple = _deep_tuple(kwargs)
return args_tuple, kwargs_tuple
def prepare_object_for_saving(input_object):
"""
Get a dictionary representation of the object
"""
# check if list
if isinstance(input_object, list):
return [prepare_object_for_saving(item) for item in input_object]
# check if tuple
elif isinstance(input_object, tuple):
return tuple([prepare_object_for_saving(item) for item in input_object])
# check if dict
elif isinstance(input_object, dict):
return {key: prepare_object_for_saving(value) for key, value in input_object.items()}
# check if pydantic object
if hasattr(input_object, "__dict__"):
attributes = input_object.__dict__
for key, value in attributes.items():
attributes[key] = prepare_object_for_saving(value)
return attributes
#
# check if datetime for custom logic
elif isinstance(input_object, datetime.datetime) or isinstance(input_object, datetime.date) or isinstance(input_object, datetime.time):
attrs = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond', 'tzinfo']
attributes = {attr: getattr(input_object, attr, None) for attr in attrs if getattr(input_object, attr, None) is not None}
return attributes
return input_object
def encode_int(n):
# Define the character set for encoding
charset = string.ascii_lowercase + string.digits + "_"
return charset[n]
def decode_int(s):
# Define the character set for encoding
charset = string.ascii_lowercase + string.digits + "_"
return charset.index(s)
def _get_source_ipython(func) -> str:
"""
Get the source code of a function from IPython (to support Colab and Jupyter notebooks)
:param func: The function to get the source code from
:return: The source code of the function
"""
# Get the IPython instance
from IPython import get_ipython
ipython = get_ipython()
# Get the input history
input_cells = ipython.history_manager.input_hist_parsed
class_name = func.__name__
source_code = None
for cell in input_cells:
if f"class {class_name}" in cell:
source_code = cell
break
# If found, print the source code
return source_code
def get_source(func) -> str:
"""
Get the source code of a function
Args:
func (function): the function to get the source code from
Returns:
source (str): the source code of the function
"""
try:
return inspect.getsource(func)
except Exception:
return _get_source_ipython(func)
|
evocodebench_data_20
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pyformat: mode=yapf
"""Math utility functions."""
from typing import Optional, Union
from internal import math
import jax
from jax import numpy as jnp
import optax
def matmul(a, b):
"""jnp.matmul defaults to bfloat16 on TPU, but this doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def safe_sqrt(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = 0.0):
"""A safe version of jnp.sqrt that avoid evaluating at zero.
Note: sqrt(x) = sqrt(eps) = 3e-4 when x < eps = 1.19e-7.
Args:
x: The operand.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
The sqrt(x), or sqrt(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.sqrt(safe_x)
def safe_acos(t,
eps = jnp.finfo(jnp.float32).eps):
"""A safe version of arccos which avoids evaluating at -1 or 1."""
return jnp.arccos(jnp.clip(t, -1.0 + eps, 1.0 - eps))
def safe_log(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = jnp.finfo(jnp.float32).eps):
"""Computes a safe log that avoids evaluating at zero.
Args:
x: Input array.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
log(x) or log(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.log(safe_x)
def normalize(
x,
axis = -1,
# pylint: disable=redefined-builtin
ord = None,
eps = jnp.finfo(jnp.float32).eps,
):
"""Normalize a vector."""
return x / optax.safe_norm(x, axis=axis, ord=ord, min_norm=eps, keepdims=True)
def inv_sqrtm(
matrix,
normalize_eigvals = False,
):
"""Takes the inverse matrix square root of a PSD matrix.
Forked from `coord.sqrtm`.
Args:
matrix: (..., d, d) A positive semi-definite matrix.
normalize_eigvals: If True, normalize the eigenvalues by the geometric mean.
Returns:
The inverse square root of the matrix, and (eigvec, eigval) if return_eigs
is True.
"""
eigvec, eigval = jax.lax.linalg.eigh(
matrix, symmetrize_input=False, sort_eigenvalues=False)
if normalize_eigvals:
# Equivalent to dividing by geometric mean, but numerically stabler.
log_eigval = jnp.log(eigval)
eigval = jnp.exp(log_eigval - jnp.mean(log_eigval, axis=-1, keepdims=True))
scaling = math.safe_div(1, math.safe_sqrt(eigval))
scaling = scaling[Ellipsis, None, :]
sqrtm_mat = matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return sqrtm_mat, (eigvec, eigval)
def to_homogeneous(v):
"""Converts a vector to a homogeneous representation.
Args:
v: (*, C) A non-homogeneous vector.
Returns:
(*, C+1) A homogeneous version of v.
"""
return jnp.concatenate([v, jnp.ones_like(v[Ellipsis, :1])], axis=-1)
def from_homogeneous(v):
"""Converts a homogeneous vector to a non-homogeneous vector.
Args:
v: (*, C+1) A homogeneous vector.
Returns:
(*, C) The non-homogeneous version of v.
"""
return v[Ellipsis, :-1] / v[Ellipsis, -1:]
def apply_homogeneous_transform(transform,
vectors):
"""Apply a homogeneous transformation to a collection of vectors.
Args:
transform: (C+1,C+1) A homogeneous transformation matrix.
vectors: (*,C) An array containing 3D points.
Returns:
(*,C) The points transformed by the array.
"""
vectors_h = to_homogeneous(vectors.reshape((-1, vectors.shape[-1])))
transformed = from_homogeneous(matmul(transform, vectors_h.T).T)
return transformed.reshape(vectors.shape)
def generalized_bias_and_gain(x, slope,
threshold):
"""Maps the input according to the generalized bias and gain function.
References:
https://arxiv.org/abs/2010.09714
Args:
x: The inputs array with values in [0, 1] to map.
slope: The slope parameter of the curve which controls the slope of the
curve at the threshold.
threshold: The value at which `x` reverses its shape, and the point at which
the output is guaranteed to be equal to the input.
Returns:
The output of the curve at each input point `x`.
"""
eps = jnp.finfo(jnp.float32).tiny
left_curve = (threshold * x) / (x + slope * (threshold - x) + eps)
right_curve = ((1 - threshold) * (x - 1) / (1 - x - slope *
(threshold - x) + eps) + 1)
return jnp.where(x < threshold, left_curve, right_curve)
|
evocodebench_data_21
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
from logging import Logger
from time import time
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from torch.utils.data import IterableDataset
from litdata.constants import (
_DEFAULT_CACHE_DIR,
_INDEX_FILENAME,
)
from litdata.streaming import Cache
from litdata.streaming.item_loader import BaseItemLoader
from litdata.streaming.resolver import Dir, _resolve_dir
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer
from litdata.streaming.shuffle import FullShuffle, NoShuffle, Shuffle
from litdata.utilities.env import _DistributedEnv, _is_in_dataloader_worker, _WorkerEnv
logger = Logger(__name__)
class StreamingDataset(IterableDataset):
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class."""
def __init__(
self,
input_dir: Union[str, "Dir"],
item_loader: Optional[BaseItemLoader] = None,
shuffle: bool = False,
drop_last: Optional[bool] = None,
seed: int = 42,
serializers: Optional[Dict[str, Serializer]] = None,
max_cache_size: Union[int, str] = "100GB",
) -> None:
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class.
Arguments:
input_dir: Path to the folder where the input data is stored.
item_loader: The logic to load an item from a chunk.
shuffle: Whether to shuffle the data.
drop_last: If `True`, drops the last items to ensure that
all processes/workers return the same amount of data.
The argument `drop_last` is set to `True` in a distributed setting
and `False` otherwise.
seed: Random seed for shuffling.
serializers: The serializers used to serialize and deserialize the chunks.
max_cache_size: The maximum cache size used by the StreamingDataset.
"""
super().__init__()
if not isinstance(shuffle, bool):
raise ValueError(f"Shuffle should be a boolean. Found {shuffle}")
input_dir = _resolve_dir(input_dir)
self.input_dir = input_dir
self.item_loader = item_loader
self.shuffle: bool = shuffle
self.distributed_env = _DistributedEnv.detect()
if self.distributed_env.world_size > 1:
if drop_last is False:
logger.warn(
"You're operating within a distributed environment and have disabled the `drop_last` option. "
"Please note that this configuration may lead to training interruptions if your system depends "
"on distributed collectives."
)
else:
drop_last = True
self.drop_last = drop_last or False
self.seed = seed
self.max_cache_size = max_cache_size
self.cache: Optional[Cache] = None
self.worker_env: Optional[_WorkerEnv] = None
self.worker_chunks: List[int] = []
self.worker_intervals: List[List[int]] = []
self.current_indexes: List[int] = []
self.chunk_index = 0
self.num_chunks: Optional[int] = None
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.min_items_per_replica: Optional[int] = None
self.current_epoch = 1
self.random_state = None
self.shuffler: Optional[Shuffle] = None
self.serializers = serializers
self._state_dict: Optional[Dict[str, Any]] = None
def set_shuffle(self, shuffle: bool) -> None:
self.shuffle = shuffle
def set_epoch(self, current_epoch: int) -> None:
"""Set the current epoch to the dataset on epoch starts.
When using the StreamingDataLoader, this is done automatically
"""
# If the state dict has been reloaded, don't override the current epoch
# The StreamingDataloader would clean this out
if self._state_dict is None:
self.current_epoch = current_epoch
def _create_cache(self, worker_env: _WorkerEnv) -> Cache:
if _should_replace_path(self.input_dir.path):
cache_path = _try_create_cache_dir(
input_dir=self.input_dir.path if self.input_dir.path else self.input_dir.url
)
if cache_path is not None:
self.input_dir.path = cache_path
cache = Cache(
input_dir=self.input_dir,
item_loader=self.item_loader,
chunk_bytes=1,
serializers=self.serializers,
max_cache_size=self.max_cache_size,
)
cache._reader._try_load_config()
if not cache.filled:
raise ValueError(
f"The provided dataset `{self.input_dir}` doesn't contain any {_INDEX_FILENAME} file."
" HINT: Did you successfully optimize a dataset to the provided `input_dir`?"
)
return cache
def _create_shuffler(self, cache: Cache) -> Shuffle:
seed = self.seed
drop_last = self.drop_last
if self._state_dict is not None:
state: Dict[str, Any] = self._state_dict
seed = state["seed"]
drop_last = state["drop_last"]
return FullShuffle(cache, seed, drop_last) if self.shuffle else NoShuffle(cache, seed, drop_last)
def __len__(self) -> int:
if self.shuffler is None:
cache = self._create_cache(worker_env=_WorkerEnv.detect())
self.shuffler = self._create_shuffler(cache)
return self.shuffler.get_len(self.distributed_env, self.current_epoch)
def __iter__(self) -> "StreamingDataset":
# When the StreamingDataset is used within map or optimize, let's refetch the distributed env.
if os.getenv("DATA_OPTIMIZER_GLOBAL_RANK"):
self.distributed_env = _DistributedEnv.detect()
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
# Handle restart
if self._state_dict:
self._validate_state_dict()
state: Dict[str, Any] = self._state_dict
self.current_epoch = state["current_epoch"]
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
# Handle restart
if self._state_dict:
self._resume(chunks_replica, intervals_replica)
else:
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[
self.distributed_env.global_rank % self.distributed_env.world_size
]
self.worker_chunks = []
self.worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % self.worker_env.world_size != self.worker_env.rank:
continue
self.worker_chunks.append(chunk_index)
self.worker_intervals.append(chunk_interval)
self.num_chunks = len(self.worker_chunks)
self.current_indexes = []
self.chunk_index = 0
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.last_time = time()
return self
def _resume(self, chunks_replica: List[int], intervals_replica: List[Any]) -> None:
assert self._state_dict
assert self.worker_env
assert self.shuffler
state: Dict[str, Any] = self._state_dict
num_workers = state["num_workers"]
batch_size = state["batch_size"]
# TODO: Implement elastic sampling where the number of workers, ranks can change.
num_samples_yielded = self._state_dict["num_samples_yielded"]
# replay sampling from each worker / chunks using the batch size
workers_chunks, workers_intervals = _associate_chunks_to_workers(
num_workers, self.worker_env, chunks_replica, intervals_replica
)
indexes = _replay_sampling(num_samples_yielded, batch_size, num_workers)
chunks_index, indexes = _replay_chunks_sampling(workers_intervals, indexes)
# select the chunks and intervals associated to this worker
worker_rank = self.worker_env.rank
self.num_chunks = len(workers_intervals[worker_rank])
self.chunk_index = chunks_index[worker_rank]
self.worker_chunks = workers_chunks[worker_rank]
self.worker_intervals = workers_intervals[worker_rank]
# replay the indexes for the current chunks
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
# re-shuffle the indexes
current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
# skip any indexes already consumed
current_indexes = current_indexes[indexes[worker_rank] :]
self.current_indexes = current_indexes
self.global_index = num_samples_yielded
# bump the chunk_index
self.chunk_index += 1
def __getitem__(self, index: Union[ChunkedIndex, int]) -> Any:
if self.cache is None:
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
if isinstance(index, int):
index = ChunkedIndex(index, self.cache._get_chunk_index_from_index(index))
return self.cache[index]
def __next__(self) -> Any:
# Prevent to create more batch on a given process
if self.global_index >= len(self):
self.current_epoch += 1
raise StopIteration
# Lazily re-populate the interval to reduce memory usage.
if len(self.current_indexes) == 0:
if self.chunk_index == self.num_chunks:
self.current_epoch += 1
raise StopIteration
# reset index
self.index = 0
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
assert self.shuffler is not None
assert self.num_chunks is not None
self.current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
self.chunk_index += 1
# Get the first index
index = self.current_indexes.pop(0)
# Call the `__getitem__` method.
data = self.__getitem__(
ChunkedIndex(
index=index,
chunk_index=self.worker_chunks[self.chunk_index - 1],
# We provide the chunks indexes only one the first
chunk_indexes=None if self.has_triggered_download else self.worker_chunks,
is_last_index=(self.chunk_index - 1) == len(self.worker_intervals) and len(self.current_indexes) == 1,
)
)
self.has_triggered_download = True
self.global_index += 1
self.index += 1
return data
def state_dict(self, num_samples_yielded: int, num_workers: int, batch_size: int) -> Dict[str, Any]:
if _is_in_dataloader_worker():
raise RuntimeError("The method `state_dict` should only be called in the main process.")
if self._state_dict is not None:
self._state_dict["num_samples_yielded"] = num_samples_yielded
return self._state_dict
state = {
"num_samples_yielded": num_samples_yielded,
"num_workers": num_workers,
"batch_size": batch_size,
"current_epoch": self.current_epoch,
"input_dir_path": self.input_dir.path,
"input_dir_url": self.input_dir.url,
"item_loader": self.item_loader.state_dict() if self.item_loader else None,
"drop_last": self.drop_last,
"seed": self.seed,
"world_size": self.distributed_env.world_size,
"shuffle": self.shuffle,
}
return state
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if state_dict:
# the state is restored within the workers
self._state_dict = state_dict
def _validate_state_dict(self) -> None:
assert self._state_dict
assert self.worker_env
assert self.cache
state: Dict[str, Any] = self._state_dict
if state["shuffle"] != self.shuffle:
raise ValueError(
"The provided `shuffle` state doesn't match the current one. "
f"Found `{self.shuffle}` instead of `{state['shuffle']}`."
)
if state["num_workers"] != self.worker_env.world_size:
raise ValueError(
"The provided `num_workers` state doesn't match the current one. "
f"Found `{self.worker_env.world_size}` instead of `{state['num_workers']}`."
)
# Note: We need to check whether the path has been resolved to its associated cache.
# In this case, validate the cache folder is the same.
if _should_replace_path(state["input_dir_path"]):
cache_path = _try_create_cache_dir(
input_dir=state["input_dir_path"] if state["input_dir_path"] else state["input_dir_url"]
)
if cache_path != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{cache_path}`."
)
elif state["input_dir_path"] != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{state['input_dir_path']}`."
)
if state["input_dir_url"] != self.input_dir.url:
raise ValueError(
"The provided `input_dir` URL state doesn't match the current one. "
f"Found `{self.input_dir.url}` instead of `{state['input_dir_url']}`."
)
if state["seed"] != self.seed:
raise ValueError(
"The provided `seed` state doesn't match the current one. "
f"Found `{self.seed}` instead of `{state['seed']}`."
)
if self.item_loader and state["item_loader"] != self.item_loader.state_dict():
raise ValueError(
"The provided `item_loader` state doesn't match the current one. "
f"Found `{self.item_loader.state_dict()}` instead of `{state['item_loader']}`."
)
if state["drop_last"] != self.drop_last:
raise ValueError(
"The provided `drop_last` state doesn't match the current one. "
f"Found `{self.drop_last}` instead of `{state['drop_last']}`."
)
def _try_create_cache_dir(input_dir: Optional[str]) -> Optional[str]:
hash_object = hashlib.md5((input_dir or "").encode())
if "LIGHTNING_CLUSTER_ID" not in os.environ or "LIGHTNING_CLOUD_PROJECT_ID" not in os.environ:
cache_dir = os.path.join(_DEFAULT_CACHE_DIR, hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
cache_dir = os.path.join("/cache", "chunks", hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _should_replace_path(path: Optional[str]) -> bool:
"""Whether the input path is a special path to be replaced."""
if path is None or path == "":
return True
return path.startswith("/teamspace/datasets/") or path.startswith("/teamspace/s3_connections/")
def is_integer(value: str) -> bool:
try:
int(value)
return True
except Exception:
return False
def _associate_chunks_to_workers(
num_workers: int, worker_env: _WorkerEnv, chunks_replica: List[int], intervals_replica: List[Any]
) -> Any:
workers_chunks = {}
workers_intervals = {}
for worker_idx in range(num_workers):
worker_chunks = []
worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % worker_env.world_size != worker_idx:
continue
worker_chunks.append(chunk_index)
worker_intervals.append(chunk_interval)
workers_chunks[worker_idx] = worker_chunks
workers_intervals[worker_idx] = worker_intervals
return workers_chunks, workers_intervals
def _replay_sampling(num_samples_yielded: int, batch_size: int, num_workers: int) -> Dict[int, int]:
"""This function replays the sampling from the dataloader."""
divisible_num_batches_yielded = num_samples_yielded // (num_workers * batch_size)
indexes = {}
for worker_idx in range(num_workers):
indexes[worker_idx] = divisible_num_batches_yielded * batch_size
num_samples_yielded = num_samples_yielded - (num_workers * divisible_num_batches_yielded * batch_size)
# take care of the reminder
worker_idx = 0 # reset the worker_idx
while True:
if num_samples_yielded >= batch_size:
indexes[worker_idx] += batch_size
worker_idx = (worker_idx + 1) % num_workers
num_samples_yielded -= batch_size
else:
indexes[worker_idx] += num_samples_yielded
break
return indexes
def _replay_chunks_sampling(
workers_intervals: Dict[int, List[Any]], indexes: Dict[int, int]
) -> Tuple[Dict[int, int], Dict[int, int]]:
chunks_index = {}
for worker_idx in range(len(workers_intervals)):
chunks_index[worker_idx] = 0
for worker_idx, intervals in workers_intervals.items():
for interval in intervals:
size = interval[-1] - interval[0]
if indexes[worker_idx] >= size:
indexes[worker_idx] -= size
chunks_index[worker_idx] += 1
return chunks_index, indexes
|
evocodebench_data_22
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Voxel grid interpolation and Instant NGP hash encoding utility functions."""
# This Python/Jax program is a re-implementation of the multiresolution
# hash encoding structure described in Section 3 of the
# Instant Neural Graphics Primitives SIGGRAPH 2022 paper by
# Müller, Evans, Schied, and Keller.
# see https://github.com/NVlabs/instant-ngp
import functools
from typing import Union
from flax import linen as nn
import gin
from internal import hash_resample
from internal import math
from internal import resample
import jax
from jax import random
import jax.numpy as jnp
import numpy as onp
# A bounding box defined as a tuple containing (min_coord, max_coord).
BboxType = tuple[tuple[float, float, float], tuple[float, float, float]]
def trilerp(
values,
coordinates,
datastructure,
):
"""Sample from a hash or 3D voxel grid `values` using `coordinates`.
TODO(keunhong): Consider making datastructure an enum as well.
Args:
values: A (D,H,W,C) array containing values if datastructure == 'grid' or a
(N,C) array containing values if datastructure == 'hash'.
coordinates: A (..., 3) array containing coordinates to sample. The values
must be between 0 and the size of that dimension.
datastructure: Which datastructure to use, either 'grid' or 'hash'.
op_mode: Which resample op implementation to use, see `ResampleOpMode`.
Returns:
A (..., C) array containing the interpolated values at the given
coordinates.
Raises:
ValueError: If an invalid datastructure is passed.
"""
if datastructure == 'hash':
fn = hash_resample.hash_resample_3d
elif datastructure == 'grid':
# Note: unlike hash_resample_3d, resample_3d expects integer coordinate
# voxel centers, so we offset the coordinates by 0.5 here. We also
# flip the input coordinates since the convention used in `resample_3d`
# is for input point (x, y, z) to index grid_values[z, y, x]. We prefer the
# grid axis order to align with the Cartesian coordinate axes.
coordinates = jnp.flip(coordinates - 0.5, axis=-1)
def fn(v, c):
"""Add and remove two extra dims at the front of coord/output tensors."""
return resample.resample_3d(v, c[None, None])[0, 0]
else:
raise ValueError(
'datastructure must be either `grid` or `hash` but '
f'`{datastructure}` was given.'
)
coordinates_flat = coordinates.reshape(-1, coordinates.shape[-1])
if values.dtype != coordinates_flat.dtype:
coordinates_flat = coordinates_flat.astype(values.dtype)
result_flat = fn(values, coordinates_flat)
result = result_flat.reshape(coordinates.shape[:-1] + (values.shape[-1],))
return result
# Each of the L (`num_scales`) resolution levels in the 3D hash table stores
# “neural feature” vectors of length F (`num_features`).
# A given level is discretized into N^3 cells,
# where N (`grid_size`) ranges from
# Nmin=16 to Nmax ∈ [512..524288] (or more),
# which are then hashed into a table with T (`hash_map_size`) entries.
# This is summarized in Table 1 in the InstantNGP paper.
@gin.configurable
class HashEncoding(nn.Module): # TODO(barron): Rename this to just "NGP".
"""Multiresolution grid/hash encoding from Instant NGP."""
hash_map_size: int = 2**19 # parameter T in InstantNGP
num_features: int = 2 # parameter F in InstantNGP
scale_supersample: float = 2.0 # The "supersampling" factor between scales.
# == 0.25 scales sizes by 16x, like (16, 256).
# == 0.5 scales sizes by 4x, like (16, 64, 256).
# == 1 scales sizes by 2x, like (16, 32, 64, 128, 256).
# == 2 scales sizes by sqrt(2)x, like (16, 23, 32, 45, 64, ..., 256).
# If you want a ratio of R between adjacent grid scales, set
# scale_supersample = 1 / log2(R).
# TODO(barron): Parameterize this as with R directly.
min_grid_size: int = 16 # parameter N_min in InstantNGP
max_grid_size: int = 2048 # parameter N_max in InstantNGP
hash_init_range: float = 1e-4
precondition_scaling: float = 10.0 # Modification to NGP made by hedman@.
# Defines the bounding box of the coordinates hash grid contains. If it is a
# float, it will cover the bounding box ((-s, -s, -s), (s, s, s)). Otherwise,
# it can be a tuple containing (min_coord, max_coord), e.g.:
# `((xmin, ymin, zmin), (xmax, ymax, zmax))`.
# Defaults to 2 for the MipNeRF 360 "squash" space.
bbox_scaling: Union[float, BboxType] = 2.0
append_scale: bool = True # Append an explicit scale feature.
jitter_coordinates: bool = False # Randomly jitter coords by [-0.5, 0.5).
# To retrieve the “neural” feature vector for a given 3D coordinate
# x in the [0,1]^3 volume (which MipNeRF360 extends to an unbounded volume),
# the voxels surrounding the coordinate are fetched from the hash table
# and their corresponding feature vectors are then tri-linearly interpolated.
# The feature vectors from each level are concatenated together,
# and then returned for further processing by a following MLP.
# This is summarized in Figure 3 of the paper InstantNGP paper.
use_float16_hash: bool = False # Whether to use float16 for the hashes.
@property
def grid_sizes(self):
"""Returns the grid sizes."""
desired_num_scales = 1 + self.scale_supersample * onp.log2(
self.max_grid_size / self.min_grid_size
)
num_scales = int(onp.round(desired_num_scales))
if onp.abs(desired_num_scales - num_scales) > 1e-4:
raise ValueError(
'grid scale parameters are ('
+ f'min_grid_size={self.min_grid_size}, '
+ f'max_grid_size={self.max_grid_size}, '
+ f'scale_supersample={self.scale_supersample}), '
+ f'which yields a non-integer number of scales {desired_num_scales}.'
)
return onp.round(
onp.geomspace(
self.min_grid_size,
self.max_grid_size,
num_scales,
)
).astype(onp.int32)
def get_grid_size_str(self, grid_size):
grid_size_str_len = len(str(onp.max(self.grid_sizes))) # For zero paddding.
return str(grid_size).zfill(grid_size_str_len) # Zero pad.
@property
def bbox(self):
bbox = self.bbox_scaling
if isinstance(bbox, float):
bbox = ((-bbox,) * 3, (bbox,) * 3)
return onp.array(bbox)
@nn.compact
def __call__(
self,
x,
*,
x_scale=None,
per_level_fn=None,
train=True,
rng=None,
min_grid_size=None,
max_grid_size=None,
):
# Map x to [0,1]^3
x = (x - self.bbox[0]) / (self.bbox[1] - self.bbox[0])
if x_scale is not None:
bbox_sizes = onp.diff(self.bbox, axis=0)[0]
if any(abs(bbox_sizes[0] - bbox_sizes[1:]) > onp.finfo(onp.float32).eps):
raise ValueError('x_scale must be None when bbox is not square.')
x_scale /= bbox_sizes[0]
# Create a list of per-level features.
grid_values = []
grid_sizes = []
grid_datastructures = []
features = []
for grid_size in self.grid_sizes:
if (min_grid_size is not None and grid_size < min_grid_size) or (
max_grid_size is not None and grid_size > max_grid_size
):
continue
if grid_size**3 <= self.hash_map_size:
# For smaller levels (fewer cells), store entries in a dense grid.
datastructure = 'grid'
shape_prefix = [grid_size] * 3
else:
datastructure = 'hash'
shape_prefix = [self.hash_map_size]
# Initialize/grab the tensor of grid or hash values.
maxval = self.hash_init_range / self.precondition_scaling
dtype_to_use = jnp.float32
if self.use_float16_hash and datastructure == 'hash':
dtype_to_use = jnp.float16
init_fn = functools.partial(
random.uniform,
shape=shape_prefix + [self.num_features],
minval=-maxval,
maxval=maxval,
dtype=dtype_to_use,
)
grid_size_str = self.get_grid_size_str(grid_size)
values = self.param(f'{datastructure}_{grid_size_str}', init_fn)
grid_values.append(values)
grid_sizes.append(grid_size)
grid_datastructures.append(datastructure)
for values, grid_size, datastructure in zip(
grid_values, grid_sizes, grid_datastructures
):
# Scale `x` by the grid size to get the indices of the coordinates.
x_scaled = x * grid_size
# Optionally jitter the scaled coordinates by [-0.5, 0.5).
if self.jitter_coordinates:
if rng is not None:
key, rng = random.split(rng)
x_scaled += random.uniform(key, x_scaled.shape) - 0.5
# Interpolate into `values` to get a per-coordinate feature vector.
f = trilerp(values, x_scaled, datastructure)
if x_scale is not None:
# Weight the feature by assuming that x_scale is the standard deviation
# of an isotropic gaussian whose mean is x, and by computing the
# fraction of the PDF of that Gaussian that is inside a [-1/2, 1/2]^3
# cube centered at x.
weighting = math.approx_erf(1 / (jnp.sqrt(8) * (x_scale * grid_size)))
f *= weighting
if self.append_scale:
# Take the `weighting` used to rescale `f` and concatenate
# `2 * weighting - 1`` as a feature. Training can get unstable if the
# feature and the weight-feature have very different magnitudes, and
# the features usually start small and grow large, so we rescale the
# weight-feature with the current standard deviation of the features
# (softly clipped to be >= the maximum initialized value to guard
# against the case where `values`` shrinks to `0`) so that they're
# matched. We have a stop-gradient so training doesn't
# try to change `f_scale`` by messing with `f``).
f_scale = (2 * weighting - 1) * jnp.sqrt(
maxval**2 + jnp.mean(jax.lax.stop_gradient(values) ** 2)
)
f = jnp.concatenate([f, f_scale], axis=-1)
if per_level_fn is not None:
f = per_level_fn(f)
features.append(f)
# Aggregate into a single "neural feature" vector.
features = jnp.concatenate(features, axis=-1)
features *= self.precondition_scaling
return features
|
evocodebench_data_23
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for constructing geodesic polyhedron, which are used as a basis."""
import itertools
import numpy as np
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j)))
int_weights = np.array(int_weights)
weights = int_weights / v # Barycentric weights.
return weights
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f'v {v} must an integer')
tri_weights = compute_tesselation_weights(v)
verts = []
for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts)
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
def generate_basis(
base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4
):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'tetrahedron', 'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == 'tetrahedron':
verts = np.array([
(np.sqrt(8 / 9), 0, -1 / 3),
(-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3),
(-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3),
(0, 0, 1),
])
faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)])
elif base_shape == 'icosahedron':
a = (np.sqrt(5) + 1) / 2
verts = np.array([
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]) / np.sqrt(a + 2)
faces = np.array([
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
])
elif base_shape == 'octahedron':
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
else:
raise ValueError(f'base_shape {base_shape} not supported')
verts = tesselate_geodesic(verts, faces, angular_tesselation)
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :]
basis = verts[:, ::-1]
return basis
|
evocodebench_data_24
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for linear splines."""
import functools
from internal import math
from internal import utils
import jax
from jax.experimental import checkify
import jax.numpy as jnp
def check_zero_endpoints(y):
checkify.check(jnp.all(y[Ellipsis, 0] == 0), 'Splines must all start with 0.')
checkify.check(jnp.all(y[Ellipsis, -1] == 0), 'Splines must all end with 0.')
def query(tq, t, v):
"""Query linear spline (t, v) at tq."""
utils.assert_valid_linspline(t, v)
interp = functools.partial(jnp.interp, left=0, right=0)
return jnp.vectorize(interp, signature='(n),(m),(m)->(n)')(tq, t, v)
def integrate(t, w):
"""Integrate (t, w) according to the trapezoid rule."""
utils.assert_valid_linspline(t, w)
return 0.5 * jnp.sum((w[Ellipsis, :-1] + w[Ellipsis, 1:]) * jnp.diff(t), axis=-1)
def normalize(t, w, eps=jnp.finfo(jnp.float32).eps ** 2):
"""Make w integrate to 1."""
utils.assert_valid_linspline(t, w)
return w / jnp.maximum(eps, integrate(t, w))[Ellipsis, None]
def insert_knot(ti, t, y):
"""Inserts knots ti into the linear spline (t, w). Assumes zero endpoints."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Compute the spline value at the insertion points.
yi = query(ti, t, y)
# Concatenate the insertion points and values onto the end of each spline.
ti_ex = jnp.broadcast_to(ti, t.shape[: -len(ti.shape)] + ti.shape)
yi_ex = jnp.broadcast_to(yi, y.shape[: -len(yi.shape)] + yi.shape)
to = jnp.concatenate([t, ti_ex], axis=-1)
yo = jnp.concatenate([y, yi_ex], axis=-1)
# Sort the spline according to t.
sort_idx = jnp.argsort(to)
to = jnp.take_along_axis(to, sort_idx, axis=-1)
yo = jnp.take_along_axis(yo, sort_idx, axis=-1)
return to, yo
def clamp(t, y, minval, maxval):
"""Clamp (t, y) to be zero outside of t in [minval, maxval]."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Add in extra points at and immediately above/below the min/max vals.
ti = jnp.concatenate(
[
math.minus_eps(minval),
minval,
maxval,
math.plus_eps(maxval),
],
axis=-1,
)
tc, yo = insert_knot(ti, t, y)
# Zero the spline values outside of [minval, maxval].
yc = jnp.where(tc > maxval, 0, jnp.where(tc < minval, 0, yo))
return tc, yc
def compute_integral(t, y):
"""Integrate a linear spline into a piecewise quadratic spline."""
utils.assert_valid_linspline(t, y)
eps = jnp.finfo(jnp.float32).eps ** 2
dt = jnp.diff(t)
a = jnp.diff(y) / jnp.maximum(eps, 2 * dt)
b = y[Ellipsis, :-1]
# The integral has an ambiguous global offset here, which we set to 0.
c1 = 0.5 * jnp.cumsum(dt[Ellipsis, :-1] * (y[Ellipsis, :-2] + y[Ellipsis, 1:-1]), axis=-1)
c = jnp.concatenate([jnp.zeros_like(y[Ellipsis, :1]), c1], axis=-1)
# This quadratic is parameterized as:
# (t - t[i])**2 * a[i] + (t - t[i]) * b[i] + c[i]
return a, b, c
def sorted_lookup(x, xp):
"""Lookup `x` at sorted locations `xp`."""
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
functools.partial(jnp.searchsorted, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx0 = jnp.maximum(idx - 1, 0)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
return idx0, idx1
def interpolate_integral(tq, t, a, b, c):
"""Interpolate into the piecewise quadratic returned by compute_integral()."""
utils.assert_valid_stepfun(t, a)
utils.assert_valid_stepfun(t, b)
utils.assert_valid_stepfun(t, c)
# Clip to valid inputs (assumes repeating boundaries).
tq = jnp.clip(tq, t[Ellipsis, :1], math.minus_eps(t[Ellipsis, -1:]))
# Lookup the quadratic coefficients corresponding to each input query.
idx0, _ = sorted_lookup(tq, t)
# TODO(barron): It might be faster to stack (a, c, b) during generation and
# do a single gather.
t0 = jnp.take_along_axis(t, idx0, axis=-1)
a0 = jnp.take_along_axis(a, idx0, axis=-1)
b0 = jnp.take_along_axis(b, idx0, axis=-1)
c0 = jnp.take_along_axis(c, idx0, axis=-1)
td = tq - t0
v = a0 * td**2 + b0 * td + c0
return v
def blur_stepfun(ts, ys, halfwidth):
"""Convolve a step function (ts, ys) with a box filter of size `halfwidth`."""
utils.assert_valid_stepfun(ts, ys)
# Blur each entire step function by a single `halfwidth` value.
# Dilate the t-values by at least numerical epsilon in each direction.
ts_lo = ts - halfwidth
ts_hi = jnp.maximum(math.plus_eps(ts), ts + halfwidth)
# The difference in adjacent `y` values (zero padded) divided by the
# difference in adjacent `t` values.
ys0 = jnp.concatenate(
[jnp.zeros_like(ys[Ellipsis, :1]), ys, jnp.zeros_like(ys[Ellipsis, :1])], axis=-1
)
dy = jnp.diff(ys0) / (ts_hi - ts_lo)
# When decreasing t splat a positive second derivative, and when increasing
# t splat a negative second derivative.
tp = jnp.concatenate([ts_lo, ts_hi], axis=-1)
dyp = jnp.concatenate([dy, -dy], axis=-1)
# Sort the dilated t-values and their accompanying derivative weights.
idx = jnp.argsort(tp, axis=-1)
tp = jnp.take_along_axis(tp, idx, axis=-1)
dyp = jnp.take_along_axis(dyp, idx[Ellipsis, :-2], axis=-1)
# A ramp is the double integral of a delta function, so if we double-
# integrate these derivatives you get the sum of a bunch of trapezoids.
yp = jnp.cumsum(jnp.diff(tp)[Ellipsis, :-1] * jnp.cumsum(dyp, axis=-1), axis=-1)
# Add in the missing first and last endpoint values, which must be zero
# because we assume zero padding on `ys`.
yp = jnp.concatenate(
[jnp.zeros_like(yp[Ellipsis, :1]), yp, jnp.zeros_like(yp[Ellipsis, -1:])], axis=-1
)
return tp, yp
|
evocodebench_data_25
|
from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
)
return values
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
|
evocodebench_data_26
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Camera pose and ray generation utility functions."""
import enum
import functools
import types
from typing import Final, List, Mapping, Optional, Text, Tuple, TypeAlias
from absl import logging
import chex
from internal import configs
from internal import geometry
from internal import math
from internal import rigid_body
from internal import spin_math
from internal import stepfun
from internal import utils
import jax
from jax import random
import jax.numpy as jnp
import jaxcam
import numpy as np
import scipy
_Array: TypeAlias = np.ndarray | jnp.ndarray
_ScalarArray: TypeAlias = float | _Array
_IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD: Final[float] = 0.95
def convert_to_ndc(
origins,
directions,
pixtocam,
near = 1.0,
xnp = np,
):
"""Converts a set of rays to normalized device coordinates (NDC).
Args:
origins: ndarray(float32), [..., 3], world space ray origins.
directions: ndarray(float32), [..., 3], world space ray directions.
pixtocam: ndarray(float32), [3, 3], inverse intrinsic matrix.
near: float, near plane along the negative z axis.
xnp: either numpy or jax.numpy.
Returns:
origins_ndc: ndarray(float32), [..., 3].
directions_ndc: ndarray(float32), [..., 3].
This function assumes input rays should be mapped into the NDC space for a
perspective projection pinhole camera, with identity extrinsic matrix (pose)
and intrinsic parameters defined by inputs focal, width, and height.
The near value specifies the near plane of the frustum, and the far plane is
assumed to be infinity.
The ray bundle for the identity pose camera will be remapped to parallel rays
within the (-1, -1, -1) to (1, 1, 1) cube. Any other ray in the original
world space can be remapped as long as it has dz < 0 (ray direction has a
negative z-coord); this allows us to share a common NDC space for "forward
facing" scenes.
Note that
projection(origins + t * directions)
will NOT be equal to
origins_ndc + t * directions_ndc
and that the directions_ndc are not unit length. Rather, directions_ndc is
defined such that the valid near and far planes in NDC will be 0 and 1.
See Appendix C in https://arxiv.org/abs/2003.08934 for additional details.
"""
# Shift ray origins to near plane, such that oz = -near.
# This makes the new near bound equal to 0.
t = -(near + origins[Ellipsis, 2]) / directions[Ellipsis, 2]
origins = origins + t[Ellipsis, None] * directions
dx, dy, dz = xnp.moveaxis(directions, -1, 0)
ox, oy, oz = xnp.moveaxis(origins, -1, 0)
xmult = 1.0 / pixtocam[0, 2] # Equal to -2. * focal / cx
ymult = 1.0 / pixtocam[1, 2] # Equal to -2. * focal / cy
# Perspective projection into NDC for the t = 0 near points
# origins + 0 * directions
origins_ndc = xnp.stack(
[xmult * ox / oz, ymult * oy / oz, -xnp.ones_like(oz)], axis=-1
)
# Perspective projection into NDC for the t = infinity far points
# origins + infinity * directions
infinity_ndc = xnp.stack(
[xmult * dx / dz, ymult * dy / dz, xnp.ones_like(oz)], axis=-1
)
# directions_ndc points from origins_ndc to infinity_ndc
directions_ndc = infinity_ndc - origins_ndc
return origins_ndc, directions_ndc
def pad_poses(p):
"""Pad [..., 3, 4] pose matrices with a homogeneous bottom row [0,0,0,1]."""
bottom = np.broadcast_to([0, 0, 0, 1.0], p[Ellipsis, :1, :4].shape)
return np.concatenate([p[Ellipsis, :3, :4], bottom], axis=-2)
def unpad_poses(p):
"""Remove the homogeneous bottom row from [..., 4, 4] pose matrices."""
return p[Ellipsis, :3, :4]
def recenter_poses(poses):
"""Recenter poses around the origin."""
cam2world = average_pose(poses)
transform = np.linalg.inv(pad_poses(cam2world))
poses = transform @ pad_poses(poses)
return unpad_poses(poses), transform
def average_pose(poses, lock_up = False):
"""New pose using average position, z-axis, and up vector of input poses."""
position = poses[:, :3, 3].mean(0)
z_axis = poses[:, :3, 2].mean(0)
up = poses[:, :3, 1].mean(0)
cam2world = viewmatrix(z_axis, up, position, lock_up=lock_up)
return cam2world
def viewmatrix(
lookdir,
up,
position,
lock_up = False,
):
"""Construct lookat view matrix."""
orthogonal_dir = lambda a, b: normalize(np.cross(a, b))
vecs = [None, normalize(up), normalize(lookdir)]
# x-axis is always the normalized cross product of `lookdir` and `up`.
vecs[0] = orthogonal_dir(vecs[1], vecs[2])
# Default is to lock `lookdir` vector, if lock_up is True lock `up` instead.
ax = 2 if lock_up else 1
# Set the not-locked axis to be orthogonal to the other two.
vecs[ax] = orthogonal_dir(vecs[(ax + 1) % 3], vecs[(ax + 2) % 3])
m = np.stack(vecs + [position], axis=1)
return m
def rotation_about_axis(degrees, axis=0):
"""Creates rotation matrix about one of the coordinate axes."""
radians = degrees / 180.0 * np.pi
rot2x2 = np.array(
[[np.cos(radians), -np.sin(radians)], [np.sin(radians), np.cos(radians)]]
)
r = np.eye(3)
r[1:3, 1:3] = rot2x2
r = np.roll(np.roll(r, axis, axis=0), axis, axis=1)
p = np.eye(4)
p[:3, :3] = r
return p
def normalize(x):
"""Normalization helper function."""
return x / np.linalg.norm(x)
def focus_point_fn(poses, xnp = np):
"""Calculate nearest point to all focal axes in poses."""
directions, origins = poses[:, :3, 2:3], poses[:, :3, 3:4]
m = xnp.eye(3) - directions * xnp.transpose(directions, [0, 2, 1])
mt_m = xnp.transpose(m, [0, 2, 1]) @ m
focus_pt = xnp.linalg.inv(mt_m.mean(0)) @ (mt_m @ origins).mean(0)[:, 0]
return focus_pt
# Constants for generate_spiral_path():
NEAR_STRETCH = 0.9 # Push forward near bound for forward facing render path.
FAR_STRETCH = 5.0 # Push back far bound for forward facing render path.
FOCUS_DISTANCE = 0.75 # Relative weighting of near, far bounds for render path.
def generate_spiral_path(
poses,
bounds,
n_frames = 120,
n_rots = 2,
zrate = 0.5,
):
"""Calculates a forward facing spiral path for rendering."""
# Find a reasonable 'focus depth' for this dataset as a weighted average
# of conservative near and far bounds in disparity space.
near_bound = bounds.min() * NEAR_STRETCH
far_bound = bounds.max() * FAR_STRETCH
# All cameras will point towards the world space point (0, 0, -focal).
focal = 1 / (((1 - FOCUS_DISTANCE) / near_bound + FOCUS_DISTANCE / far_bound))
# Get radii for spiral path using 90th percentile of camera positions.
positions = poses[:, :3, 3]
radii = np.percentile(np.abs(positions), 90, 0)
radii = np.concatenate([radii, [1.0]])
# Generate poses for spiral path.
render_poses = []
cam2world = average_pose(poses)
up = poses[:, :3, 1].mean(0)
for theta in np.linspace(0.0, 2.0 * np.pi * n_rots, n_frames, endpoint=False):
t = radii * [np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0]
position = cam2world @ t
lookat = cam2world @ [0, 0, -focal, 1.0]
z_axis = position - lookat
render_poses.append(viewmatrix(z_axis, up, position))
render_poses = np.stack(render_poses, axis=0)
return render_poses
def transform_poses_pca(poses):
"""Transforms poses so principal components lie on XYZ axes.
Args:
poses: a (N, 3, 4) array containing the cameras' camera to world transforms.
Returns:
A tuple (poses, transform), with the transformed poses and the applied
camera_to_world transforms.
"""
t = poses[:, :3, 3]
t_mean = t.mean(axis=0)
t = t - t_mean
eigval, eigvec = np.linalg.eig(t.T @ t)
# Sort eigenvectors in order of largest to smallest eigenvalue.
inds = np.argsort(eigval)[::-1]
eigvec = eigvec[:, inds]
rot = eigvec.T
if np.linalg.det(rot) < 0:
rot = np.diag(np.array([1, 1, -1])) @ rot
transform = np.concatenate([rot, rot @ -t_mean[:, None]], -1)
poses_recentered = unpad_poses(transform @ pad_poses(poses))
transform = np.concatenate([transform, np.eye(4)[3:]], axis=0)
# Flip coordinate system if z component of y-axis is negative
if poses_recentered.mean(axis=0)[2, 1] < 0:
poses_recentered = np.diag(np.array([1, -1, -1])) @ poses_recentered
transform = np.diag(np.array([1, -1, -1, 1])) @ transform
# Just make sure it's it in the [-1, 1]^3 cube
scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3]))
poses_recentered[:, :3, 3] *= scale_factor
transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform
return poses_recentered, transform
def transform_poses_focus(poses):
"""Transforms poses so that the "focus point" of capture is at the origin.
Args:
poses: a (N, 3, 4) array containing the cameras' camera to world transforms.
Returns:
A tuple (poses, transform), with the transformed poses and the applied
camera_to_world transforms.
"""
# Move the focus point to the origin.
focus_point = focus_point_fn(poses)
# Use average up vector as the Z axis.
swap_y_z = np.array([
[1, 0, 0],
[0, 0, 1],
[0, -1, 0.0],
])
rot = average_pose(poses, lock_up=True)[:3, :3] @ swap_y_z
transform = np.concatenate([rot.T, rot.T @ -focus_point[:, None]], -1)
poses_recentered = transform @ pad_poses(poses)
transform = np.concatenate([transform, np.eye(4)[3:]], axis=0)
# Just make sure it's it in the [-1, 1]^3 cube
scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3]))
poses_recentered[:, :3, 3] *= scale_factor
transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform
return poses_recentered, transform
def generate_ellipse_path(
poses,
n_frames = 120,
const_speed = True,
z_variation = 0.0,
z_phase = 0.0,
rad_mult_min = 1.0,
rad_mult_max = 1.0,
render_rotate_xaxis = 0.0,
render_rotate_yaxis = 0.0,
use_avg_z_height = False,
z_height_percentile = None,
lock_up = False,
):
"""Generate an elliptical render path based on the given poses."""
# Calculate the focal point for the path (cameras point toward this).
center = focus_point_fn(poses)
# Default path height sits at z=0 (in middle of zero-mean capture pattern).
xy_offset = center[:2]
# Calculate lengths for ellipse axes based on input camera positions.
xy_radii = np.percentile(np.abs(poses[:, :2, 3] - xy_offset), 90, axis=0)
# Use ellipse that is symmetric about the focal point in xy.
xy_low = xy_offset - xy_radii
xy_high = xy_offset + xy_radii
# Optional height variation, need not be symmetric.
z_min = np.percentile((poses[:, 2, 3]), 10, axis=0)
z_max = np.percentile((poses[:, 2, 3]), 90, axis=0)
if use_avg_z_height or z_height_percentile is not None:
# Center the path vertically around the average camera height, good for
# datasets recentered by transform_poses_focus function.
if z_height_percentile is None:
z_init = poses[:, 2, 3].mean(axis=0)
else:
z_init = np.percentile(poses[:, 2, 3], z_height_percentile, axis=0)
else:
# Center the path at zero, good for datasets recentered by
# transform_poses_pca function.
z_init = 0
z_low = z_init + z_variation * (z_min - z_init)
z_high = z_init + z_variation * (z_max - z_init)
xyz_low = np.array([*xy_low, z_low])
xyz_high = np.array([*xy_high, z_high])
def get_positions(theta):
# Interpolate between bounds with trig functions to get ellipse in x-y.
# Optionally also interpolate in z to change camera height along path.
t_x = np.cos(theta) * 0.5 + 0.5
t_y = np.sin(theta) * 0.5 + 0.5
t_z = np.cos(theta + 2 * np.pi * z_phase) * 0.5 + 0.5
t_xyz = np.stack([t_x, t_y, t_z], axis=-1)
positions = xyz_low + t_xyz * (xyz_high - xyz_low)
# Interpolate between min and max radius multipliers so the camera zooms in
# and out of the scene center.
t = np.sin(theta) * 0.5 + 0.5
rad_mult = rad_mult_min + (rad_mult_max - rad_mult_min) * t
positions = center + (positions - center) * rad_mult[:, None]
return positions
theta = np.linspace(0, 2.0 * np.pi, n_frames + 1, endpoint=True)
positions = get_positions(theta)
if const_speed:
# Resample theta angles so that the velocity is closer to constant.
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
theta = stepfun.sample(None, theta, np.log(lengths), n_frames + 1)
positions = get_positions(theta)
# Throw away duplicated last position.
positions = positions[:-1]
# Set path's up vector to axis closest to average of input pose up vectors.
avg_up = poses[:, :3, 1].mean(0)
avg_up = avg_up / np.linalg.norm(avg_up)
ind_up = np.argmax(np.abs(avg_up))
up = np.eye(3)[ind_up] * np.sign(avg_up[ind_up])
poses = np.stack([viewmatrix(p - center, up, p, lock_up) for p in positions])
poses = poses @ rotation_about_axis(-render_rotate_yaxis, axis=1)
poses = poses @ rotation_about_axis(render_rotate_xaxis, axis=0)
return poses
def generate_interpolated_path(
poses,
n_interp,
spline_degree = 5,
smoothness = 0.03,
rot_weight = 0.1,
lock_up = False,
fixed_up_vector = None,
lookahead_i = None,
frames_per_colmap = None,
const_speed = False,
n_buffer = None,
periodic = False,
n_interp_as_total = False,
):
"""Creates a smooth spline path between input keyframe camera poses.
Spline is calculated with poses in format (position, lookat-point, up-point).
Args:
poses: (n, 3, 4) array of input pose keyframes.
n_interp: returned path will have n_interp * (n - 1) total poses.
spline_degree: polynomial degree of B-spline.
smoothness: parameter for spline smoothing, 0 forces exact interpolation.
rot_weight: relative weighting of rotation/translation in spline solve.
lock_up: if True, forced to use given Up and allow Lookat to vary.
fixed_up_vector: replace the interpolated `up` with a fixed vector.
lookahead_i: force the look direction to look at the pose `i` frames ahead.
frames_per_colmap: conversion factor for the desired average velocity.
const_speed: renormalize spline to have constant delta between each pose.
n_buffer: Number of buffer frames to insert at the start and end of the
path. Helps keep the ends of a spline path straight.
periodic: make the spline path periodic (perfect loop).
n_interp_as_total: use n_interp as total number of poses in path rather than
the number of poses to interpolate between each input.
Returns:
Array of new camera poses with shape (n_interp * (n - 1), 3, 4), or
(n_interp, 3, 4) if n_interp_as_total is set.
"""
def poses_to_points(poses, dist):
"""Converts from pose matrices to (position, lookat, up) format."""
pos = poses[:, :3, -1]
lookat = poses[:, :3, -1] - dist * poses[:, :3, 2]
up = poses[:, :3, -1] + dist * poses[:, :3, 1]
return np.stack([pos, lookat, up], 1)
def points_to_poses(points):
"""Converts from (position, lookat, up) format to pose matrices."""
poses = []
for i in range(len(points)):
pos, lookat_point, up_point = points[i]
if lookahead_i is not None:
if i + lookahead_i < len(points):
lookat = pos - points[i + lookahead_i][0]
else:
lookat = pos - lookat_point
up = (up_point - pos) if fixed_up_vector is None else fixed_up_vector
poses.append(viewmatrix(lookat, up, pos, lock_up=lock_up))
return np.array(poses)
def insert_buffer_poses(poses, n_buffer):
"""Insert extra poses at the start and end of the path."""
def average_distance(points):
distances = np.linalg.norm(points[1:] - points[0:-1], axis=-1)
return np.mean(distances)
def shift(pose, dz):
result = np.copy(pose)
z = result[:3, 2]
z /= np.linalg.norm(z)
# Move along forward-backward axis. -z is forward.
result[:3, 3] += z * dz
return result
dz = average_distance(poses[:, :3, 3])
prefix = np.stack([shift(poses[0], (i + 1) * dz) for i in range(n_buffer)])
prefix = prefix[::-1] # reverse order
suffix = np.stack(
[shift(poses[-1], -(i + 1) * dz) for i in range(n_buffer)]
)
result = np.concatenate([prefix, poses, suffix])
return result
def remove_buffer_poses(poses, u, n_frames, u_keyframes, n_buffer):
u_keyframes = u_keyframes[n_buffer:-n_buffer]
mask = (u >= u_keyframes[0]) & (u <= u_keyframes[-1])
poses = poses[mask]
u = u[mask]
n_frames = len(poses)
return poses, u, n_frames, u_keyframes
def interp(points, u, k, s):
"""Runs multidimensional B-spline interpolation on the input points."""
sh = points.shape
pts = np.reshape(points, (sh[0], -1))
k = min(k, sh[0] - 1)
tck, u_keyframes = scipy.interpolate.splprep(pts.T, k=k, s=s, per=periodic)
new_points = np.array(scipy.interpolate.splev(u, tck))
new_points = np.reshape(new_points.T, (len(u), sh[1], sh[2]))
return new_points, u_keyframes
if n_buffer is not None:
poses = insert_buffer_poses(poses, n_buffer)
points = poses_to_points(poses, dist=rot_weight)
if n_interp_as_total:
n_frames = n_interp + 1 # Add extra since final pose is discarded.
else:
n_frames = n_interp * (points.shape[0] - 1)
u = np.linspace(0, 1, n_frames, endpoint=True)
new_points, u_keyframes = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
if n_buffer is not None:
poses, u, n_frames, u_keyframes = remove_buffer_poses(
poses, u, n_frames, u_keyframes, n_buffer
)
if frames_per_colmap is not None:
# Recalculate the number of frames to achieve desired average velocity.
positions = poses[:, :3, -1]
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
total_length_colmap = lengths.sum()
print('old n_frames:', n_frames)
print('total_length_colmap:', total_length_colmap)
n_frames = int(total_length_colmap * frames_per_colmap)
print('new n_frames:', n_frames)
u = np.linspace(
np.min(u_keyframes), np.max(u_keyframes), n_frames, endpoint=True
)
new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
if const_speed:
# Resample timesteps so that the velocity is nearly constant.
positions = poses[:, :3, -1]
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
u = stepfun.sample(None, u, np.log(lengths), n_frames + 1)
new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
return poses[:-1], u[:-1], u_keyframes
def safe_interpolate_1d(
x,
spline_degree,
smoothness,
t_input,
t_output,
):
"""Interpolate 1d signal x (defined at t_input and queried at t_output)."""
# TODO(bmild): switch interpolation t values to match those chosen for path.
# One needs at least n=k+1 points to fit a polynomial of degree k to n points.
n = len(x)
spline_degree = min(spline_degree, n - 1)
if spline_degree > 0:
tck = scipy.interpolate.splrep(t_input, x, s=smoothness, k=spline_degree)
return scipy.interpolate.splev(t_output, tck).astype(x.dtype)
else: # n = 0 or 1
fill_value = x[0] if n else 0.0
return np.full(t_output.shape, fill_value, dtype=x.dtype)
def identify_file_names(dir_or_text_file):
"""Load filenames from text file or directory."""
if utils.isdir(dir_or_text_file):
# If `dir_or_text_file` is a directory, grab the filenames.
subset_names = sorted(utils.listdir(dir_or_text_file))
else:
# If `dir_or_text_file` is a text file, treat each line as a filename.
with utils.open_file(dir_or_text_file, 'r') as fp:
names = fp.read()
if isinstance(names, bytes):
names = names.decode('utf-8')
# Decode bytes into string and split into lines.
subset_names = names.splitlines()
return subset_names
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
def get_meters_per_colmap_from_calibration_images(
config, poses, image_names
):
"""Uses calibration images to get how many meters is a single COLMAP unit."""
# By default, the input camera poses are scaled to fit in the [-1, 1]^3 cube.
# This default value implies a scaling of 2 / .25 = 8 meters between the
# farthest apart camera poses.
meters_per_colmap = 8.0
if config.render_calibration_keyframes is not None:
# Use provided calibration keyframes to determine metric world scale.
calib_names = identify_file_names(config.render_calibration_keyframes)
indices = []
for i in range(0, len(calib_names), 2):
# Grab pairs of calibration images filenames.
name0, name1 = calib_names[i : i + 2]
# Check if both are in the set of colmap-posed images.
if name0 in image_names and name1 in image_names:
indices.append((image_names.index(name0), image_names.index(name1)))
if indices:
# Extract colmap-space positions from the camera pose matrices.
positions = poses[indices][Ellipsis, :3, -1]
# Every pair of calibration keyframes should have world space distance
# `render_calibration_distance` according to the capture handbook.
colmap_lengths = np.linalg.norm(
positions[:, 0] - positions[:, 1], axis=-1
)
colmap_length = colmap_lengths.mean(axis=0)
# Ratio of world distance to colmap distance.
meters_per_colmap = config.render_calibration_distance / colmap_length
print('colmap lengths', colmap_lengths)
print('avg', colmap_length)
print('meters_per_colmap', meters_per_colmap)
return meters_per_colmap
def calibrate_spline_speed(
config, poses, image_names
):
"""Uses input config to determine a conversion factor for the spline speed."""
if config.render_spline_meters_per_sec is None:
return None
meters_per_colmap = get_meters_per_colmap_from_calibration_images(
config, poses, image_names
)
meters_per_sec = config.render_spline_meters_per_sec
frames_per_sec = config.render_video_fps
frames_per_colmap = meters_per_colmap / meters_per_sec * frames_per_sec
print('returning frames_per_colmap', frames_per_colmap)
return frames_per_colmap
def create_render_spline_path(
config,
image_names,
poses,
exposures,
):
"""Creates spline interpolation render path from subset of dataset poses.
Args:
config: configs.Config object.
image_names: a list of image filenames.
poses: [N, 3, 4] array of extrinsic camera pose matrices.
exposures: optional list of floating point exposure values.
Returns:
spline_indices: list of indices used to select spline keyframe poses.
render_poses: array of interpolated extrinsic camera poses for the path.
render_exposures: optional list of interpolated exposures for the path.
"""
def remove_outlier_spline_indices(
spline_indices, poses, q_max, q_mult
):
"""Identify spline indices correspond to inlier poses."""
poses = poses[spline_indices]
points = poses[:, :3, -1]
distances = np.linalg.norm(points[1:] - points[:-1], axis=-1)
mask = distances < q_mult * np.quantile(distances, q_max)
mask = np.concatenate([mask, [True]], axis=0) # Keep the last pose.
num_inliers = int(np.sum(mask))
num_total = len(spline_indices)
print(
f'remove_outlier_spline_indices: {num_inliers}/{num_total} spline '
'path poses remaining after outlier removal.'
)
return spline_indices[mask]
# Grab poses corresponding to the image filenames.
spline_indices = identify_file_indices(
config.render_spline_keyframes, image_names
)
if (
config.render_spline_outlier_keyframe_quantile is not None
and config.render_spline_outlier_keyframe_multiplier is not None
):
spline_indices = remove_outlier_spline_indices(
spline_indices,
poses,
q_max=config.render_spline_outlier_keyframe_quantile,
q_mult=config.render_spline_outlier_keyframe_multiplier,
)
keyframes = poses[spline_indices]
frames_per_colmap = calibrate_spline_speed(config, poses, image_names)
if config.render_spline_fixed_up:
# Fix path to use world-space "up" vector instead of "banking" with spline.
all_up_vectors = poses[:, :3, 1] # second column of pose matrix is up.
fixed_up_vector = normalize(all_up_vectors.mean(axis=0))
else:
fixed_up_vector = None
render_poses, frame_timesteps, keyframe_timesteps = (
generate_interpolated_path(
keyframes,
n_interp=config.render_spline_n_interp,
spline_degree=config.render_spline_degree,
smoothness=config.render_spline_smoothness,
rot_weight=config.render_spline_rot_weight,
lock_up=config.render_spline_lock_up,
fixed_up_vector=fixed_up_vector,
lookahead_i=config.render_spline_lookahead_i,
frames_per_colmap=frames_per_colmap,
const_speed=config.render_spline_const_speed,
n_buffer=config.render_spline_n_buffer,
)
)
if config.render_spline_interpolate_exposure:
if exposures is None:
raise ValueError(
'config.render_spline_interpolate_exposure is True but '
'create_render_spline_path() was passed exposures=None.'
)
# Interpolate per-frame exposure value.
log_exposure = np.log(exposures[spline_indices])
# Use aggressive smoothing for exposure interpolation to avoid flickering.
log_exposure_interp = safe_interpolate_1d(
log_exposure,
spline_degree=5,
smoothness=config.render_spline_interpolate_exposure_smoothness,
t_input=keyframe_timesteps,
t_output=frame_timesteps,
)
render_exposures = np.exp(log_exposure_interp)
else:
render_exposures = None
return spline_indices, render_poses, render_exposures
def intrinsic_matrix(
fx,
fy,
cx,
cy,
xnp = np,
):
"""Intrinsic matrix for a pinhole camera in OpenCV coordinate system."""
return xnp.array([
[fx, 0, cx],
[0, fy, cy],
[0, 0, 1.0],
])
def get_pixtocam(
focal,
width,
height,
xnp = np,
):
"""Inverse intrinsic matrix for a perfect pinhole camera."""
camtopix = intrinsic_matrix(focal, focal, width * 0.5, height * 0.5, xnp)
return xnp.linalg.inv(camtopix)
def pixel_coordinates(
width, height, xnp = np
):
"""Tuple of the x and y integer coordinates for a grid of pixels."""
return xnp.meshgrid(xnp.arange(width), xnp.arange(height), indexing='xy')
def _radial_and_tangential_distort(
x,
y,
k1 = 0,
k2 = 0,
k3 = 0,
k4 = 0,
p1 = 0,
p2 = 0,
):
"""Computes the distorted pixel positions."""
r2 = x * x + y * y
radial_distortion = r2 * (k1 + r2 * (k2 + r2 * (k3 + r2 * k4)))
dx_radial = x * radial_distortion
dy_radial = y * radial_distortion
dx_tangential = 2 * p1 * x * y + p2 * (r2 + 2 * x * x)
dy_tangential = 2 * p2 * x * y + p1 * (r2 + 2 * y * y)
return x + dx_radial + dx_tangential, y + dy_radial + dy_tangential
def _compute_residual_and_jacobian(
x,
y,
xd,
yd,
k1 = 0.0,
k2 = 0.0,
k3 = 0.0,
k4 = 0.0,
p1 = 0.0,
p2 = 0.0,
):
"""Auxiliary function of radial_and_tangential_undistort()."""
# Adapted from https://github.com/google/nerfies/blob/main/nerfies/camera.py
# let r(x, y) = x^2 + y^2;
# d(x, y) = 1 + k1 * r(x, y) + k2 * r(x, y) ^2 + k3 * r(x, y)^3 +
# k4 * r(x, y)^4;
r = x * x + y * y
d = 1.0 + r * (k1 + r * (k2 + r * (k3 + r * k4)))
# The perfect projection is:
# xd = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2);
# yd = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2);
#
# Let's define
#
# fx(x, y) = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2) - xd;
# fy(x, y) = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2) - yd;
#
# We are looking for a solution that satisfies
# fx(x, y) = fy(x, y) = 0;
fx = d * x + 2 * p1 * x * y + p2 * (r + 2 * x * x) - xd
fy = d * y + 2 * p2 * x * y + p1 * (r + 2 * y * y) - yd
# Compute derivative of d over [x, y]
d_r = k1 + r * (2.0 * k2 + r * (3.0 * k3 + r * 4.0 * k4))
d_x = 2.0 * x * d_r
d_y = 2.0 * y * d_r
# Compute derivative of fx over x and y.
fx_x = d + d_x * x + 2.0 * p1 * y + 6.0 * p2 * x
fx_y = d_y * x + 2.0 * p1 * x + 2.0 * p2 * y
# Compute derivative of fy over x and y.
fy_x = d_x * y + 2.0 * p2 * y + 2.0 * p1 * x
fy_y = d + d_y * y + 2.0 * p2 * x + 6.0 * p1 * y
return fx, fy, fx_x, fx_y, fy_x, fy_y
def _radial_and_tangential_undistort(
xd,
yd,
k1 = 0,
k2 = 0,
k3 = 0,
k4 = 0,
p1 = 0,
p2 = 0,
eps = 1e-9,
max_iterations=10,
xnp = np,
):
"""Computes undistorted (x, y) from (xd, yd)."""
# From https://github.com/google/nerfies/blob/main/nerfies/camera.py
# Initialize from the distorted point.
x = xnp.copy(xd)
y = xnp.copy(yd)
for _ in range(max_iterations):
fx, fy, fx_x, fx_y, fy_x, fy_y = _compute_residual_and_jacobian(
x=x, y=y, xd=xd, yd=yd, k1=k1, k2=k2, k3=k3, k4=k4, p1=p1, p2=p2
)
denominator = fy_x * fx_y - fx_x * fy_y
x_numerator = fx * fy_y - fy * fx_y
y_numerator = fy * fx_x - fx * fy_x
step_x = xnp.where(
xnp.abs(denominator) > eps,
x_numerator / denominator,
xnp.zeros_like(denominator),
)
step_y = xnp.where(
xnp.abs(denominator) > eps,
y_numerator / denominator,
xnp.zeros_like(denominator),
)
x = x + step_x
y = y + step_y
return x, y
class ProjectionType(enum.Enum):
"""Camera projection type (perspective pinhole, fisheye, or 360 pano)."""
PERSPECTIVE = 'perspective'
FISHEYE = 'fisheye'
PANORAMIC = 'pano'
def pixels_to_rays(
pix_x_int,
pix_y_int,
pixtocams,
camtoworlds,
distortion_params = None,
pixtocam_ndc = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Calculates rays given pixel coordinates, intrinisics, and extrinsics.
Given 2D pixel coordinates pix_x_int, pix_y_int for cameras with
inverse intrinsics pixtocams and extrinsics camtoworlds (and optional
distortion coefficients distortion_params and NDC space projection matrix
pixtocam_ndc), computes the corresponding 3D camera rays.
Vectorized over the leading dimensions of the first four arguments.
Args:
pix_x_int: int array, shape SH, x coordinates of image pixels.
pix_y_int: int array, shape SH, y coordinates of image pixels.
pixtocams: float array, broadcastable to SH + [3, 3], inverse intrinsics.
camtoworlds: float array, broadcastable to SH + [3, 4], camera extrinsics.
distortion_params: dict of floats, optional camera distortion parameters.
pixtocam_ndc: float array, [3, 3], optional inverse intrinsics for NDC.
camtype: camera_utils.ProjectionType, fisheye or perspective camera.
xnp: either numpy or jax.numpy.
Returns:
origins: float array, shape SH + [3], ray origin points.
directions: float array, shape SH + [3], ray direction vectors.
viewdirs: float array, shape SH + [3], normalized ray direction vectors.
radii: float array, shape SH + [1], ray differential radii.
imageplane: float array, shape SH + [2], xy coordinates on the image plane.
If the image plane is at world space distance 1 from the pinhole, then
imageplane will be the xy coordinates of a pixel in that space (so the
camera ray direction at the origin would be (x, y, -1) in OpenGL coords).
"""
# Must add half pixel offset to shoot rays through pixel centers.
def pix_to_dir(x, y):
return xnp.stack([x + 0.5, y + 0.5, xnp.ones_like(x)], axis=-1)
# We need the dx and dy rays to calculate ray radii for mip-NeRF cones.
pixel_dirs_stacked = xnp.stack(
[
pix_to_dir(pix_x_int, pix_y_int),
pix_to_dir(pix_x_int + 1, pix_y_int),
pix_to_dir(pix_x_int, pix_y_int + 1),
],
axis=0,
)
# For jax, need to specify high-precision matmul.
matmul = math.matmul if xnp == jnp else xnp.matmul
mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0]
# Apply inverse intrinsic matrices.
camera_dirs_stacked = mat_vec_mul(pixtocams, pixel_dirs_stacked)
if distortion_params is not None:
# Correct for distortion.
x, y = _radial_and_tangential_undistort(
camera_dirs_stacked[Ellipsis, 0],
camera_dirs_stacked[Ellipsis, 1],
**distortion_params,
xnp=xnp,
)
camera_dirs_stacked = xnp.stack([x, y, xnp.ones_like(x)], -1)
if camtype == ProjectionType.FISHEYE:
theta = xnp.sqrt(xnp.sum(xnp.square(camera_dirs_stacked[Ellipsis, :2]), axis=-1))
theta = xnp.minimum(xnp.pi, theta)
sin_theta_over_theta = xnp.sin(theta) / theta
camera_dirs_stacked = xnp.stack(
[
camera_dirs_stacked[Ellipsis, 0] * sin_theta_over_theta,
camera_dirs_stacked[Ellipsis, 1] * sin_theta_over_theta,
xnp.cos(theta),
],
axis=-1,
)
elif camtype == ProjectionType.PANORAMIC:
theta = camera_dirs_stacked[Ellipsis, 0]
phi = camera_dirs_stacked[Ellipsis, 1]
# Negation on y and z components accounts for expected OpenCV convention.
camera_dirs_stacked = xnp.stack(
[
-xnp.sin(phi) * xnp.sin(theta),
-xnp.cos(phi),
-xnp.sin(phi) * xnp.cos(theta),
],
axis=-1,
)
# Flip from OpenCV to OpenGL coordinate system.
camera_dirs_stacked = matmul(
camera_dirs_stacked, xnp.diag(xnp.array([1.0, -1.0, -1.0]))
)
# Extract 2D image plane (x, y) coordinates.
imageplane = camera_dirs_stacked[0, Ellipsis, :2]
# Apply camera rotation matrices.
directions_stacked = mat_vec_mul(
camtoworlds[Ellipsis, :3, :3], camera_dirs_stacked
)
# Extract the offset rays.
directions, dx, dy = directions_stacked
origins = xnp.broadcast_to(camtoworlds[Ellipsis, :3, -1], directions.shape)
viewdirs = directions / xnp.linalg.norm(directions, axis=-1, keepdims=True)
if pixtocam_ndc is None:
# Distance from each unit-norm direction vector to its neighbors.
dx_norm = xnp.linalg.norm(dx - directions, axis=-1)
dy_norm = xnp.linalg.norm(dy - directions, axis=-1)
else:
# Convert ray origins and directions into projective NDC space.
ndc_fn = functools.partial(convert_to_ndc, pixtocam=pixtocam_ndc, xnp=xnp)
origins_dx, _ = ndc_fn(origins, dx)
origins_dy, _ = ndc_fn(origins, dy)
origins, directions = ndc_fn(origins, directions)
# In NDC space, we use the offset between origins instead of directions.
dx_norm = xnp.linalg.norm(origins_dx - origins, axis=-1)
dy_norm = xnp.linalg.norm(origins_dy - origins, axis=-1)
# Cut the distance in half, multiply it to match the variance of a uniform
# distribution the size of a pixel (1/12, see paper).
# TODO(barron): Add a unit test that this is correct.
radii = (0.5 * (dx_norm + dy_norm))[Ellipsis, None] * 2 / xnp.sqrt(12)
return origins, directions, viewdirs, radii, imageplane
def points_to_pixels(
points,
pixtocams,
camtoworlds,
distortion_params = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Calculates pixel coordinates given 3D points, intrinisics, and extrinsics.
Given 3D point coordinates points and cameras with inverse intrinsics
pixtocams and extrinsics camtoworlds (and optional distortion coefficients
distortion_params), computes the corresponding 2D pixel coordinates.
Vectorized over the leading dimensions of the first four arguments.
Args:
points: float array, [..., 3], 3D coordinates of points to project.
pixtocams: float array, [..., 3, 3], inverse intrinsics.
camtoworlds: float array, [..., 3, 4], camera extrinsics.
distortion_params: dict of floats or float arrays [...], optional camera
distortion parameters.
camtype: camera_utils.ProjectionType, type of camera model.
xnp: either numpy (host compute) or jax.numpy (device compute).
Returns:
coordinates: float array, [..., 2], pixel coordinates.
depth: float array, [...], per-point orthographic depth.
"""
if camtype != ProjectionType.PERSPECTIVE:
raise ValueError(f'points_to_pixels only supports perspective projection, '
f'not {camtype} mode.')
# For jax, need to specify high-precision matmul.
matmul = math.matmul if xnp == jnp else xnp.matmul
mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0]
rotation = camtoworlds[Ellipsis, :3, :3]
rotation_inv = xnp.swapaxes(rotation, -1, -2)
translation = camtoworlds[Ellipsis, :3, -1]
# Points (directions) in the camera coordinate frame.
points_camera = mat_vec_mul(rotation_inv, points - translation)
# Projection to image plane by dividing out -z.
depth = -points_camera[Ellipsis, -1]
camera_dirs = points_camera / depth[Ellipsis, None]
# OpenGL to OpenCV coordinates.
camera_dirs = matmul(camera_dirs, xnp.diag(xnp.array([1.0, -1.0, -1.0])))
if distortion_params is not None:
# Correct for distortion.
x, y = _radial_and_tangential_distort(
camera_dirs[Ellipsis, 0],
camera_dirs[Ellipsis, 1],
**distortion_params,
)
camera_dirs = xnp.stack([x, y, xnp.ones_like(x)], -1)
# Apply intrinsics matrix.
pixel_dirs = mat_vec_mul(xnp.linalg.inv(pixtocams), camera_dirs)
# Remove half pixel offset.
coordinates = pixel_dirs[Ellipsis, :2] - xnp.array([0.5, 0.5])
return coordinates, depth
def rays_planes_intersection(
z_min,
z_max,
origins,
directions,
xnp = np,
):
"""Crops rays to a range of z values.
This is useful for situations where the scene lies within a range of
altitudes, but the cameras are very far away, as with aerial data.
Args:
z_min: float z value of the lower cropping plane.
z_max: float z value of the upper cropping plane.
origins: ray origins points.
directions: ray direction vectors.
xnp: either numpy or jax.numpy.
Returns:
t_min: parametric location of the cropped ray origins
t_max: parametric location of the ends of the cropped rays
"""
t1 = (z_min - origins[Ellipsis, 2]) / directions[Ellipsis, 2]
t2 = (z_max - origins[Ellipsis, 2]) / directions[Ellipsis, 2]
t_min = xnp.maximum(0, xnp.minimum(t1, t2))
t_max = xnp.maximum(t1, t2)
return t_min, t_max
def _intersect_ranges(
r1,
r2,
xnp = np,
):
start = xnp.maximum(r1[0], r2[0])
end = xnp.minimum(r1[1], r2[1])
return (start, end)
def ray_box_intersection(
ray_o, ray_d, corners, xnp = np
):
"""Returns enter/exit distances along the ray for box defined by `corners`."""
t1 = (corners[0] - ray_o) / ray_d
t2 = (corners[1] - ray_o) / ray_d
t_min = xnp.minimum(t1, t2).max(axis=-1)
t_max = xnp.maximum(t1, t2).min(axis=-1)
return t_min, t_max
def modify_rays_with_bbox(
rays, corners, xnp = np
):
"""Sets near/far by bbox intersection and multiplies lossmult by mask."""
lossmult = rays.lossmult
near = rays.near
far = rays.far
t_min, t_max = ray_box_intersection(
rays.origins, rays.directions, corners, xnp=xnp
)
t_min, t_max = t_min[Ellipsis, None], t_max[Ellipsis, None]
hits = t_min <= t_max
inear, ifar = _intersect_ranges((near, far), (t_min, t_max), xnp=xnp)
overlaps = inear <= ifar
valid = hits * overlaps
if lossmult is None:
lossmult = valid.astype(xnp.float32)
else:
lossmult = xnp.where(valid, lossmult, 0.0)
near = xnp.where(valid, inear, 0.0)
far = xnp.where(valid, ifar, 0.0)
return rays.replace(lossmult=lossmult, near=near, far=far)
def ray_sphere_intersection(
ray_o,
ray_d,
center,
radius,
xnp = np,
):
"""Calculates distance to hit a sphere for a ray.
Args:
ray_o: Ray origin (..., 3)
ray_d: Ray direction (..., 3)
center: Sphere center (..., 3)
radius: Sphere radius (..., 1)
xnp: Numpy or Jax module
Returns:
t_min, t_max, hit. When no hit is found, t_min = t_max = 0.
"""
oc = ray_o - center
a = (ray_d**2).sum(axis=-1)
b = 2 * (oc * ray_d).sum(axis=-1)
c = (oc * oc).sum(axis=-1) - radius**2
det = b**2 - 4.0 * a * c
hit = (det >= 0) * (a > 0)
# Nb: Results are 'wrong' if valid = false, this is just to make jax
# not freak out.
det = xnp.where(hit, det, 0.0)
a = xnp.where(hit, a, 1.0)
t_min = xnp.where(hit, (-b - xnp.sqrt(det)) / (2.0 * a), 0.0)
t_max = xnp.where(hit, (-b + xnp.sqrt(det)) / (2.0 * a), 0.0)
return t_min, t_max, hit
def gather_cameras(cameras, cam_idx, xnp=np):
"""Gathers relevant camera parameters for each ray."""
pixtocams, camtoworlds, distortion_params = cameras[:3]
if pixtocams.ndim > 2:
pixtocams_idx = pixtocams[cam_idx]
else:
pixtocams_idx = pixtocams
if camtoworlds.ndim > 2:
camtoworlds_idx = camtoworlds[cam_idx]
else:
camtoworlds_idx = camtoworlds
if distortion_params is not None:
distortion_params_idx = {}
for k, v in distortion_params.items(): # pytype: disable=attribute-error # jax-ndarray
if not xnp.isscalar(v):
distortion_params_idx[k] = v[cam_idx]
else:
distortion_params_idx[k] = v
else:
distortion_params_idx = None
return (
pixtocams_idx,
camtoworlds_idx,
distortion_params_idx,
)
def cast_ray_batch(
cameras,
rays,
camtype = ProjectionType.PERSPECTIVE,
scene_bbox = None,
xnp = np,
):
"""Maps from input cameras and uncast Rays batch to output cast Rays batch.
`cameras` is a Tuple of five sets of camera parameters.
pixtocams: 1 or N stacked [3, 3] inverse intrinsic matrices.
camtoworlds: 1 or N stacked [3, 4] extrinsic pose matrices.
distortion_params: optional, dict[str, float] containing pinhole model
distortion parameters.
pixtocam_ndc: optional, [3, 3] inverse intrinsic matrix for mapping to NDC.
z_range: optional range of Z values
Args:
cameras: described above.
rays: ray data including integer pixel coordinates and camera indices.
These fields can be an arbitrary batch shape.
camtype: camera_utils.ProjectionType, fisheye or perspective camera.
scene_bbox: min and max corner of scene bounding box, if applicable.
xnp: either numpy or jax.numpy.
Returns:
rays: Rays dataclass with computed 3D world space ray data.
"""
# rays.cam_idx has shape [..., 1], remove this hanging dimension.
cam_idx = rays.cam_idx[Ellipsis, 0]
cameras_idx = gather_cameras(cameras, cam_idx, xnp=xnp)
pixtocams, camtoworlds, distortion_params = cameras_idx
pixtocam_ndc, z_range = cameras[3:5]
# Compute rays from pixel coordinates.
origins, directions, viewdirs, radii, imageplane = pixels_to_rays(
rays.pixels[Ellipsis, 0],
rays.pixels[Ellipsis, 1],
pixtocams,
camtoworlds,
distortion_params=distortion_params,
pixtocam_ndc=pixtocam_ndc,
camtype=camtype,
xnp=xnp,
)
if z_range is not None:
t_min, t_max = rays_planes_intersection(
z_range[0], z_range[1], origins, directions, xnp
)
t_min = xnp.broadcast_to(t_min[Ellipsis, None], origins.shape)
t_max = xnp.broadcast_to(t_max[Ellipsis, None], origins.shape)
hit_mask = t_max < t_min
origins = xnp.where(hit_mask, origins, origins + directions * t_min)
directions = xnp.where(hit_mask, directions, directions * (t_max - t_min))
# Preserve all metadata and add the cast rays.
rays = rays.replace(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
)
if scene_bbox is not None:
rays = modify_rays_with_bbox(rays, scene_bbox, xnp=xnp)
return rays
def cast_general_rays(
camtoworld,
pixtocam,
height,
width,
near,
far,
distortion_params = None,
pixtocam_ndc = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Wrapper for generating a general ray batch."""
pix_x_int, pix_y_int = pixel_coordinates(width, height, xnp=xnp)
ray_args = pixels_to_rays(
pix_x_int,
pix_y_int,
pixtocam,
camtoworld,
distortion_params=distortion_params,
pixtocam_ndc=pixtocam_ndc,
camtype=camtype,
xnp=xnp,
)
broadcast_scalar = lambda x: xnp.broadcast_to(x, pix_x_int.shape)[Ellipsis, None]
ray_kwargs = {
'pixels': xnp.stack([pix_x_int, pix_y_int], axis=-1),
'near': broadcast_scalar(near),
'far': broadcast_scalar(far),
'cam_idx': broadcast_scalar(0),
}
return utils.Rays(*ray_args, **ray_kwargs)
def cast_pinhole_rays(
camtoworld,
height,
width,
focal,
near,
far,
xnp = np,
):
"""Generates a pinhole camera ray batch (w/o distortion)."""
return cast_general_rays(
camtoworld,
get_pixtocam(focal, width, height, xnp=xnp),
height,
width,
near,
far,
camtype=ProjectionType.PERSPECTIVE,
xnp=xnp,
)
def cast_spherical_rays(
camtoworld,
height,
width,
near,
far,
xnp,
):
"""Generates a spherical camera ray batch."""
return cast_general_rays(
camtoworld,
xnp.diag(xnp.array([2.0 * np.pi / width, np.pi / height, 1.0])),
height,
width,
near,
far,
camtype=ProjectionType.PANORAMIC,
xnp=xnp,
)
def jax_camera_from_tuple(
camera_tuple,
image_size,
projection_type,
):
"""Converts a camera tuple into a JAX camera.
Args:
camera_tuple: A tuple containing `inv_intrinsics`, the inverse intrinsics
matrix; `extrinsics`, the camera to world matrix; and `distortion_params`,
the dictionary of distortion parameters.
image_size: An array containing the (width, height) image size.
projection_type: The projection type of the camera.
Returns:
A JAX camera class instance encoding the same camera information.
"""
if projection_type.value not in {
ProjectionType.PERSPECTIVE.value,
ProjectionType.FISHEYE.value,
}:
raise ValueError(f'Projection {projection_type} is not supported.')
inv_intrinsics, extrinsic, distortion_params = camera_tuple[:3]
intrinsics = jnp.linalg.inv(inv_intrinsics)
focal_length = intrinsics[0, 0]
principal_point = intrinsics[:2, 2]
pixel_aspect_ratio = intrinsics[1, 1] / intrinsics[0, 0]
radial_distortion = None
tangential_distortion = None
if distortion_params is not None:
if (
'k1' in distortion_params
and 'k2' in distortion_params
and 'k3' in distortion_params
):
radial_keys = ['k1', 'k2', 'k3', 'k4']
radial_distortion = jnp.array(
[distortion_params[k] for k in radial_keys if k in distortion_params]
)
if 'p1' in distortion_params and 'p2' in distortion_params:
tangential_distortion = jnp.array([
distortion_params['p1'],
distortion_params['p2'],
])
extrinsic = jnp.concatenate(
[extrinsic[:3, :4], jnp.array([[0, 0, 0, 1]])], axis=0
)
# Convert to OpenCV coordinates.
extrinsic = math.matmul(extrinsic, jnp.diag(jnp.array([1, -1, -1, 1])))
world_to_cam = jnp.linalg.inv(extrinsic)
camera = jaxcam.Camera.create(
focal_length=focal_length,
pixel_aspect_ratio=pixel_aspect_ratio,
radial_distortion=radial_distortion,
tangential_distortion=tangential_distortion,
principal_point=principal_point,
image_size=image_size,
is_fisheye=(projection_type.value == ProjectionType.FISHEYE.value),
)
camera = jaxcam.update_world_to_camera_matrix(camera, world_to_cam)
return camera
def tuple_from_jax_camera(
jax_camera,
):
"""Converts a JAX camera into a camera tuple."""
focal_x = jax_camera.focal_length
focal_y = jax_camera.focal_length * jax_camera.pixel_aspect_ratio
intrinsic = jnp.block([
[focal_x, jax_camera.skew, jax_camera.principal_point[0]],
[0, focal_y, jax_camera.principal_point[1]],
[0, 0, 1],
])
pix_to_cam = jnp.linalg.inv(intrinsic)
world_to_cam = jaxcam.world_to_camera_matrix(jax_camera)
cam_to_world = jnp.linalg.inv(world_to_cam)
# Convert back to OpenGL coordinates.
cam_to_world = math.matmul(cam_to_world, jnp.diag(jnp.array([1, -1, -1, 1])))
cam_to_world = cam_to_world[:3, :]
distortion_params = None
if jax_camera.has_distortion:
distortion_params = {}
if jax_camera.has_radial_distortion:
distortion_params.update({
'k1': jax_camera.radial_distortion[0],
'k2': jax_camera.radial_distortion[1],
'k3': jax_camera.radial_distortion[2],
'k4': jax_camera.radial_distortion[3],
})
if jax_camera.has_tangential_distortion:
distortion_params.update({
'p1': jax_camera.tangential_distortion[0],
'p2': jax_camera.tangential_distortion[1],
})
return pix_to_cam, cam_to_world, distortion_params
def rotation_distance(
rotation_mat1, rotation_mat2
):
"""Computes the angle between two rotation matrices in degrees.
Args:
rotation_mat1: (3, 3) The first batch of rotation matrix.
rotation_mat2: (3, 3) The second batch of rotation matrix.
Returns:
The angle in degrees between 0 and 180.
"""
axis_angle1 = rigid_body.log_so3(rotation_mat1)
axis_angle2 = rigid_body.log_so3(rotation_mat2)
orientation_error_deg = jnp.degrees(
jnp.linalg.norm(axis_angle1 - axis_angle2, axis=-1)
)
return jnp.where( # pytype: disable=bad-return-type # jnp-type
orientation_error_deg < 180,
orientation_error_deg,
360 - orientation_error_deg,
)
def compute_camera_metrics(
cameras_gt, cameras_pred
):
"""Computes the metrics between two cameras."""
orientation_diffs = jax.vmap(rotation_distance)(
cameras_pred.orientation, cameras_gt.orientation
)
translation_diffs = jnp.abs(cameras_pred.translation - cameras_gt.translation)
diffs = {
'focal_length': jnp.abs(
cameras_pred.focal_length - cameras_gt.focal_length
),
'position': jnp.linalg.norm(
cameras_pred.position - cameras_gt.position, axis=-1
),
'translation_x': translation_diffs[Ellipsis, 0],
'translation_y': translation_diffs[Ellipsis, 1],
'translation_z': translation_diffs[Ellipsis, 2],
'orientation': jnp.abs(orientation_diffs),
'principal_points': jnp.linalg.norm(
cameras_pred.principal_point - cameras_gt.principal_point,
axis=-1,
),
}
if cameras_pred.radial_distortion is not None:
radial_distortion_gt = jnp.zeros(4)
if cameras_gt.has_radial_distortion:
radial_distortion_gt = cameras_gt.radial_distortion
for i in range(cameras_pred.radial_distortion.shape[-1]):
diffs[f'radial_distortion_{i}'] = jnp.abs(
cameras_pred.radial_distortion[Ellipsis, i] - radial_distortion_gt[Ellipsis, i]
)
if cameras_pred.tangential_distortion is not None:
tangential_distortion_gt = jnp.zeros(2)
if cameras_gt.has_tangential_distortion:
tangential_distortion_gt = cameras_gt.radial_distortion
for i in range(cameras_pred.tangential_distortion.shape[-1]):
diffs[f'tangential_distortion_{i}'] = jnp.abs(
cameras_pred.tangential_distortion[Ellipsis, i]
- tangential_distortion_gt[Ellipsis, i]
)
return diffs
def perturb_cameras(
rng,
cameras,
sigma_look_at,
sigma_position,
sigma_focal_length = 0.0,
sigma_dolly_z = 0.0,
single_dolly = True,
dolly_use_average = False,
):
"""Randomly perturb camera positions and orientations.
For position the 3D coordinate is simply shifted according to
an offset vector. For the orientation an offset angle is calculated based
on spherical coordinates. The underlying offsets are randomly chosen using
normal distributions absed on the input sigmas.
Args:
rng: A PRNGKey.
cameras: Cameras to perturb.
sigma_look_at: Strength of look-at position offset. Higher means stronger.
sigma_position: Strength of position offset. Higher means stronger.
sigma_focal_length: Strength of focal length zoom z-axis scale. Higher means
stronger. This is essentially a percentage (0.2 means 20%).
sigma_dolly_z: Strength of Dolly zoom z-axis scale. Higher means stronger.
This is essentially a percentage (0.2 means 20%).
single_dolly: If True, only have a single perturbation for dolly zoom.
dolly_use_average: If True, set the dolly z to the average of the input
instead of perturbing.
Returns:
Perturbed cameras.
"""
# Dolly zoom.
if sigma_dolly_z > 0.0 or dolly_use_average:
# Turn out "percentage" into a log scale. This is equivalent to having
# minval = log(1+s) and maxval = log(1/(1+s)) but sampling from a normal
# distribution.
log_sigma_dolly_z = jnp.log1p(sigma_dolly_z)
rng, dolly_key = random.split(rng)
translation = cameras.translation
x, y, z = jnp.split(translation, 3, -1)
if dolly_use_average:
new_z = jnp.broadcast_to(z.mean(axis=0, keepdims=True), z.shape)
elif single_dolly:
new_z = z * jnp.exp(random.normal(dolly_key, (1,)) * log_sigma_dolly_z)
else:
new_z = z * jnp.exp(random.normal(dolly_key, z.shape) * log_sigma_dolly_z)
new_focal_length = cameras.focal_length * (new_z / z).squeeze(-1)
new_translation = jnp.concatenate([x, y, new_z], axis=-1)
new_position = jax.vmap(spin_math.matmul)(
-cameras.orientation.swapaxes(-1, -2), new_translation
)
cameras = cameras.replace(
position=new_position, focal_length=new_focal_length
)
# Perturb focal length.
rng, key = random.split(rng)
new_focal_length = cameras.focal_length * jnp.exp(
random.normal(key, cameras.shape) * jnp.log1p(sigma_focal_length)
)
cameras = cameras.replace(focal_length=new_focal_length)
camera_positions = cameras.position
up_vectors = -cameras.orientation[Ellipsis, 1, :]
# Perturb camera positions.
rng, key = random.split(rng)
perturb_dir = spin_math.normalize(random.normal(key, camera_positions.shape))
camera_positions_perturbed = np.array(
sigma_position * perturb_dir + camera_positions
)
# Perturb look-at point.
look_at_positions = jax.vmap(geometry.line_closest_point)(
cameras.position, cameras.optical_axis, jnp.zeros_like(cameras.position)
)
rng, key = random.split(rng)
perturb_dir = math.normalize(random.normal(key, camera_positions.shape))
look_at_positions_perturbed = np.array(
sigma_look_at * perturb_dir + look_at_positions
)
# Apply the look-at function.
new_cameras = []
for camera, camera_position, look_at_position, up_vector in zip(
cameras,
camera_positions_perturbed,
look_at_positions_perturbed,
up_vectors,
):
new_cameras.append(
jaxcam.look_at(
camera=camera,
eye=camera_position,
center=look_at_position,
world_up=up_vector,
)
)
cameras = jaxcam.concatenate(new_cameras)
return cameras
|
evocodebench_data_27
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Geometry utilities."""
from typing import Union
import chex
from internal import rigid_body
from internal import spin_math
import jax
from jax import numpy as jnp
from jax import random
import numpy as onp
import optax
_ArrayType = Union[onp.ndarray, jnp.ndarray]
def line_distance(point1, dir1, point2,
dir2):
"""Compute the distance between two lines in 3D.
Note that this is the distance between lines and not line segments or rays;
i.e., it does not consider endpoints and will compute the distance assuming
the line extends infinitely in both directions.
Args:
point1: (3,) a point on the first line.
dir1: (3,) the direction vector of the first line.
point2: (3,) a point on the second line.
dir2: (3,) the direction vector of the second line.
Returns:
The distance between the two lines.
"""
is_parallel = are_lines_parallel(dir1, dir2)
skew_dist = skew_line_distance(point1, dir1, point2, dir2)
parallel_dist = line_to_point_distance(point1, dir1, point2)
return jnp.where(is_parallel, parallel_dist, skew_dist)
def skew_line_closest_points(point1, dir1,
point2,
dir2):
"""Compute the shortest distance between two skew lines.
See:
https://en.wikipedia.org/wiki/Skew_lines#Nearest_points
Args:
point1: a point on the first line.
dir1: the direction vector of the first line.
point2: a point on the second line.
dir2: the direction vector of the second line.
Returns:
The distance between the two skew lines.
"""
# Make sure direction vectors are unit.
dir1 = spin_math.normalize(dir1)
dir2 = spin_math.normalize(dir2)
# The vector perpendicular to both lines.
n = jnp.cross(dir1, dir2)
# Compute the point on line 1 nearest to line 2.
n2 = jnp.cross(dir2, n)
c1 = point1 + jnp.dot(point2 - point1, n2) / jnp.dot(dir1, n2) * dir1
# Compute the point on line 2 nearest to line 1.
n1 = jnp.cross(dir1, n)
c2 = point2 + jnp.dot(point1 - point2, n1) / jnp.dot(dir2, n1) * dir2
return c1, c2 # pytype: disable=bad-return-type # jax-ndarray
def skew_line_distance(point1, dir1,
point2, dir2):
"""Compute the shortest distance between two skew lines.
Args:
point1: a point on the first line.
dir1: the direction vector of the first line.
point2: a point on the second line.
dir2: the direction vector of the second line.
Returns:
The distance between the two skew lines.
"""
c1, c2 = skew_line_closest_points(point1, dir1, point2, dir2)
return jnp.linalg.norm(c1 - c2)
def line_closest_point(line_point, line_dir,
query_point):
"""Return the closest point on the line to a point.
Args:
line_point: a point on the line.
line_dir: the direction vector of the line.
query_point: the query point.
Returns:
The closest point on the line to the query point.
"""
# Make sure direction vector is unit.
line_dir = spin_math.normalize(line_dir)
# Find the point along the line that is closest.
t = jnp.dot(query_point - line_point, line_dir)
return line_point + t * line_dir
def line_to_point_distance(line_point, line_dir,
query_point):
"""Return the distance from point to a line.
Args:
line_point: a point on the line.
line_dir: the direction vector of the line.
query_point: the point to compute the distance to.
Returns:
The closest distance between the line and the point.
"""
closest_point = line_closest_point(line_point, line_dir, query_point)
return jnp.linalg.norm(query_point - closest_point)
def ray_sphere_intersection(origin,
direction,
radius = 1.0):
"""Computes the intersecting point between a ray and a sphere.
Variables use notation from Wikipedia:
u: direction of ray
o: origin of ray
References:
https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
Args:
origin: The origin of the ray.
direction: The direction of the ray.
radius: The radius of the sphere.
Returns:
The intersecting point on the sphere.
"""
u_dot_o = jnp.sum(direction * origin, axis=-1, keepdims=True)
nabla = u_dot_o**2 - (jnp.linalg.norm(origin, keepdims=True)**2 - radius**2)
# This this is a ray and not a line, we only need to consider the case where
# nabla is positive.
distance = -u_dot_o + jnp.sqrt(nabla)
return origin + distance * direction
def are_lines_parallel(dir1, dir2):
eps = jnp.finfo(jnp.float32).eps
dir1 = spin_math.normalize(dir1)
dir2 = spin_math.normalize(dir2)
return jnp.dot(dir1, dir2) >= 1.0 - eps # pytype: disable=bad-return-type # jnp-type
def spherical_equirectangular_grid(
height,
width,
min_elevation = 0,
max_elevation = jnp.pi,
min_azimuth = 0,
max_azimuth = 2 * jnp.pi):
"""Creates an equirectangular grid (panorama) in spherical coordinates.
Args:
height: The height of the output grid.
width: The width of the output grid.
min_elevation: The minimum value for the elevation.
max_elevation: The maximum value for the elevation.
min_azimuth: The minimum value for the azimuth.
max_azimuth: The maximum value for the azimuth.
Returns:
elevations: (height, width) An array containing the elevations.
azimuths: (height, width) An array containing the azimuths.
"""
elevations = jnp.linspace(min_elevation, max_elevation, height)
# Prevent duplicate sample since 0 and 2*pi are the same azimuth.
azimuths = jnp.linspace(min_azimuth, max_azimuth, width, endpoint=False)
azimuths, elevations = jnp.meshgrid(azimuths, elevations)
return elevations, azimuths # pytype: disable=bad-return-type # jax-ndarray
def spherical_to_cartesian(
r,
theta,
phi,
):
"""Converts spherical to cartesian coordinates.
For more details see cartesian_to_spherical below.
Args:
r: (..., 1) Radius of spherical coordinate.
theta: (..., 1) Elevation of spherical coordinate.
phi: (..., 1) Azimuth of spherical coordinate.
Returns:
Cartesian coordinates of shape (..., 3) defined by x, y, z.
"""
x = r * jnp.sin(theta) * jnp.cos(phi)
y = r * jnp.sin(theta) * jnp.sin(phi)
z = r * jnp.cos(theta)
return jnp.stack([x, y, z], axis=-1)
def cartesian_to_spherical(
cartesian_vector,
eps = onp.float32(onp.finfo(onp.float32).tiny)
):
"""Converts cartesian to spherical coordinates.
Uses a right-handed coordinate system where z is up and y is right. The
spherical coordinates are defined by radius (r), inclination (theta)
∈ [0, π]) from fixed zenit direction (z) and azimuth (phi) ∈ [0, 2π]) from
x-axis to y-axis.
We are using the phyiscal coordinate system as described here:
https://en.wikipedia.org/wiki/Spherical_coordinate_system.
Args:
cartesian_vector: (..., 3) Cartesian coordinates defined by (x, y, z).
eps: Epsilon used for safe_acos.
Returns:
Spherical coordinates as tuple of r, elevation (theta), azimuth (phi).
"""
x = cartesian_vector[Ellipsis, 0]
y = cartesian_vector[Ellipsis, 1]
z = cartesian_vector[Ellipsis, 2]
r = optax.safe_norm(cartesian_vector, min_norm=eps, axis=-1)
theta = spin_math.safe_acos(z / r)
phi = jnp.arctan2(y, x)
return r, theta, phi # pytype: disable=bad-return-type # jax-ndarray
def sample_random_points_on_sphere(key, num_points,
min_radius,
max_radius):
"""Sample points uniformly on sphere with random radius within bounds.
Args:
key: Seed for random sampling.
num_points: Number of points to sample.
min_radius: Minimum euclidean distance of point from center of sphere.
max_radius: Maximum euclidean distance of point from center of sphere.
Returns:
Array of uniform points (N, 3) on sphere with random radius.
"""
key1, key2, _ = random.split(key, 3)
random_radii = random.uniform(
key1, (num_points, 1), minval=min_radius, maxval=max_radius)
v = spin_math.normalize(random.normal(key2, (num_points, 3)))
return v * random_radii # pytype: disable=bad-return-type # jax-ndarray
def sample_points_evenly_on_sphere(num_points,):
"""Deterministically sample points on a sphere that are evenly distributed.
Uses a generalization of the sunflower spiral to sample points that are
distibuted evenly on a sphere.
References:
http://extremelearning.com.au/how-to-evenly-distribute-points-on-a-sphere-more-effectively-than-the-canonical-fibonacci-lattice/#more-3069
https://mathoverflow.net/questions/24850/is-there-a-generalisation-of-the-sunflower-spiral-to-higher-dimensions
https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere/44164075#44164075
Args:
num_points: The number of points to sample.
Returns:
(num_points, 3) The sampled points.
"""
golden_ratio = (1 + 5**0.5) / 2
indices = jnp.arange(0, num_points, dtype=jnp.float32) + 0.5
azimuths = jnp.pi * 2 * golden_ratio * indices
elevations = jnp.arccos(1 - 2 * indices / num_points)
points = spherical_to_cartesian(1.0, elevations, azimuths) # pytype: disable=wrong-arg-types # jax-ndarray
return points
def is_point_in_convex_hull(point,
hull_normals,
hull_offsets,
padding = 0.0):
"""Computes whether the given points are inside or outside a convex hull.
The convex hull is defined using the normals and offsets of a facet.
If the dot product between a point and a normal is less than the offset, then
it is on the inner side of that facet. If this is true for all facets, then
the point is inside the convex hull.
References:
http://www.qhull.org/html/index.htm
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.html
Args:
point: (..., D) An array containing the points to test.
hull_normals: (F, D) The normals of the facets of the convex hull.
hull_offsets: (F, D) The offsets of the facets of the convex hull.
padding: A number to pad the convex hull by. A positive value expands the
convex hull while a negative number shrinks it.
Returns:
A boolean array of shape (...,) that is True if a point is inside the hull
and False otherwise.
"""
input_shape = point.shape[:-1]
point = point.reshape(-1, point.shape[-1])
dots = hull_normals @ point.T
mask = (dots <= -hull_offsets[:, None] + padding).all(axis=0)
return mask.reshape(input_shape)
def cosine_to_deg(array):
"""Converts cosine angle to degrees.
Args:
array: containing cosine angles (e.g. result of dot product).
Returns:
array with angles as degrees.
"""
return jnp.degrees(jnp.arccos(array.clip(-1, 1)))
# TODO(phenzler): Convert this to xnp once we have a more solid code base that
# supports xnp.
def onp_cosine_to_deg(array):
"""Converts cosine angle to degrees.
Args:
array: containing cosine angles (e.g. result of dot product).
Returns:
array with angles as degrees.
"""
return onp.degrees(onp.arccos(array.clip(-1, 1)))
def rotation_distance(rotation_mat1,
rotation_mat2):
"""Computes the angle between two rotation matrices in degrees.
Args:
rotation_mat1: (3, 3) The first batch of rotation matrix.
rotation_mat2: (3, 3) The second batch of rotation matrix.
Returns:
The angle in degrees between 0 and 180.
"""
axis_angle1 = rigid_body.log_so3(rotation_mat1)
axis_angle2 = rigid_body.log_so3(rotation_mat2)
orientation_error_deg = jnp.degrees(
jnp.linalg.norm(axis_angle1 - axis_angle2, axis=-1))
return jnp.where(
orientation_error_deg < 180,
orientation_error_deg, # pytype: disable=bad-return-type # jnp-type
360 - orientation_error_deg)
def compute_bbox_from_xyza(
xyza,
padding,
alpha_threshold = 0.99,
):
"""Computes a bounding box given an xyza array.
Args:
xyza: An array of shape (..., 4) containing the XYZ coordinates in the first
three channels and an alpha value in the last.
padding: A padding value to be added to all sides.
alpha_threshold: The threshold at which to binarize the alpha into a mask.
Returns:
A bounding box of shape (2, 3) containing (min_coords, max_coords).
"""
padding = onp.array(padding)
xyz = xyza[Ellipsis, :3]
alpha = xyza[Ellipsis, 3]
mask = alpha > alpha_threshold
xyz = xyz[mask]
xyz = xyz.reshape(-1, 3)
min_coord = xyz.min(axis=0) - padding
max_coord = xyz.max(axis=0) + padding
return onp.stack([min_coord, max_coord], axis=0)
|
evocodebench_data_28
|
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
|
evocodebench_data_29
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pyformat: mode=yapf
"""Math utility functions."""
from typing import Optional, Union
from internal import math
import jax
from jax import numpy as jnp
import optax
def matmul(a, b):
"""jnp.matmul defaults to bfloat16 on TPU, but this doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def safe_sqrt(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = 0.0):
"""A safe version of jnp.sqrt that avoid evaluating at zero.
Note: sqrt(x) = sqrt(eps) = 3e-4 when x < eps = 1.19e-7.
Args:
x: The operand.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
The sqrt(x), or sqrt(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.sqrt(safe_x)
def safe_acos(t,
eps = jnp.finfo(jnp.float32).eps):
"""A safe version of arccos which avoids evaluating at -1 or 1."""
return jnp.arccos(jnp.clip(t, -1.0 + eps, 1.0 - eps))
def safe_log(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = jnp.finfo(jnp.float32).eps):
"""Computes a safe log that avoids evaluating at zero.
Args:
x: Input array.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
log(x) or log(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.log(safe_x)
def normalize(
x,
axis = -1,
# pylint: disable=redefined-builtin
ord = None,
eps = jnp.finfo(jnp.float32).eps,
):
"""Normalize a vector."""
return x / optax.safe_norm(x, axis=axis, ord=ord, min_norm=eps, keepdims=True)
def inv_sqrtm(
matrix,
normalize_eigvals = False,
):
"""Takes the inverse matrix square root of a PSD matrix.
Forked from `coord.sqrtm`.
Args:
matrix: (..., d, d) A positive semi-definite matrix.
normalize_eigvals: If True, normalize the eigenvalues by the geometric mean.
Returns:
The inverse square root of the matrix, and (eigvec, eigval) if return_eigs
is True.
"""
eigvec, eigval = jax.lax.linalg.eigh(
matrix, symmetrize_input=False, sort_eigenvalues=False)
if normalize_eigvals:
# Equivalent to dividing by geometric mean, but numerically stabler.
log_eigval = jnp.log(eigval)
eigval = jnp.exp(log_eigval - jnp.mean(log_eigval, axis=-1, keepdims=True))
scaling = math.safe_div(1, math.safe_sqrt(eigval))
scaling = scaling[Ellipsis, None, :]
sqrtm_mat = matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return sqrtm_mat, (eigvec, eigval)
def to_homogeneous(v):
"""Converts a vector to a homogeneous representation.
Args:
v: (*, C) A non-homogeneous vector.
Returns:
(*, C+1) A homogeneous version of v.
"""
return jnp.concatenate([v, jnp.ones_like(v[Ellipsis, :1])], axis=-1)
def from_homogeneous(v):
"""Converts a homogeneous vector to a non-homogeneous vector.
Args:
v: (*, C+1) A homogeneous vector.
Returns:
(*, C) The non-homogeneous version of v.
"""
return v[Ellipsis, :-1] / v[Ellipsis, -1:]
def apply_homogeneous_transform(transform,
vectors):
"""Apply a homogeneous transformation to a collection of vectors.
Args:
transform: (C+1,C+1) A homogeneous transformation matrix.
vectors: (*,C) An array containing 3D points.
Returns:
(*,C) The points transformed by the array.
"""
vectors_h = to_homogeneous(vectors.reshape((-1, vectors.shape[-1])))
transformed = from_homogeneous(matmul(transform, vectors_h.T).T)
return transformed.reshape(vectors.shape)
def generalized_bias_and_gain(x, slope,
threshold):
"""Maps the input according to the generalized bias and gain function.
References:
https://arxiv.org/abs/2010.09714
Args:
x: The inputs array with values in [0, 1] to map.
slope: The slope parameter of the curve which controls the slope of the
curve at the threshold.
threshold: The value at which `x` reverses its shape, and the point at which
the output is guaranteed to be equal to the input.
Returns:
The output of the curve at each input point `x`.
"""
eps = jnp.finfo(jnp.float32).tiny
left_curve = (threshold * x) / (x + slope * (threshold - x) + eps)
right_curve = ((1 - threshold) * (x - 1) / (1 - x - slope *
(threshold - x) + eps) + 1)
return jnp.where(x < threshold, left_curve, right_curve)
|
evocodebench_data_30
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating step functions (piecewise-constant 1D functions).
We have a shared naming and dimension convention for these functions.
All input/output step functions are assumed to be aligned along the last axis.
`t` always indicates the x coordinates of the *endpoints* of a step function.
`y` indicates unconstrained values for the *bins* of a step function
`w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin
values that *integrate* to <= 1.
"""
from internal import linspline
from internal import math
from internal import utils
import jax
import jax.numpy as jnp
import numpy as np
def query(tq, t, y, left=None, right=None):
"""Query step function (t, y) at locations tq. Edges repeat by default."""
utils.assert_valid_stepfun(t, y)
# Query the step function to recover the interval value.
(i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu())
# Apply boundary conditions.
left = y[Ellipsis, :1] if left is None else left
right = y[Ellipsis, -1:] if right is None else right
yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq)
return yq
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
def pdf_to_weight(t, p):
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
utils.assert_valid_stepfun(t, p)
return p * jnp.diff(t)
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
def invert_cdf(u, t, w_logits):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
utils.assert_valid_stepfun(t, w_logits)
# Compute the PDF and CDF for each weight vector.
w = jax.nn.softmax(w_logits, axis=-1)
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu())
return t_new
def sample(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
eps=jnp.finfo(jnp.float32).eps,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
eps: float, something like numerical epsilon.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
# Draw uniform samples.
if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples)
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
)
return invert_cdf(u, t, w_logits)
def sample_intervals(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-jnp.inf, jnp.inf),
):
"""Sample *intervals* (rather than points) from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of intervals to sample.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
domain: (minval, maxval), the range of valid values for `t`.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
if num_samples <= 1:
raise ValueError(f'num_samples must be > 1, is {num_samples}.')
# Sample a set of points from the step function.
centers = sample(
rng, t, w_logits, num_samples, single_jitter, deterministic_center=True
)
# The intervals we return will span the midpoints of each adjacent sample.
mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2
# Each first/last fencepost is the reflection of the first/last midpoint
# around the first/last sampled center.
first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1]
last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:]
samples = jnp.concatenate([first, mid, last], axis=-1)
# We clamp to the limits of the input domain, provided by the caller.
samples = jnp.clip(samples, *domain)
return samples
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
def weighted_percentile(t, w, ps):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
utils.assert_valid_stepfun(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
jnp.array(ps) / 100, cw, t
)
return wprctile
def resample(t, tp, vp, use_avg=False):
"""Resample a step function defined by (tp, vp) into intervals t.
Notation roughly matches jnp.interp. Resamples by summation by default.
Args:
t: tensor with shape (..., n+1), the endpoints to resample into.
tp: tensor with shape (..., m+1), the endpoints of the step function being
resampled.
vp: tensor with shape (..., m), the values of the step function being
resampled.
use_avg: bool, if False, return the sum of the step function for each
interval in `t`. If True, return the average, weighted by the width of
each interval in `t`.
Returns:
v: tensor with shape (..., n), the values of the resampled step function.
"""
utils.assert_valid_stepfun(tp, vp)
if use_avg:
wp = jnp.diff(tp)
v_numer = resample(t, tp, vp * wp, use_avg=False)
v_denom = resample(t, tp, wp, use_avg=False)
v = math.safe_div(v_numer, v_denom)
return v
acc = jnp.cumsum(vp, axis=-1)
acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1)
acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
t, tp, acc0
)
v = jnp.diff(acc0_resampled, axis=-1)
return v
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
|
evocodebench_data_31
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import multiprocessing
import os
import warnings
from logging import Logger
from queue import Empty
from threading import Thread
from typing import Any, Dict, List, Optional, Tuple, Union
from litdata.constants import _TORCH_GREATER_EQUAL_2_1_0
from litdata.streaming.config import ChunksConfig
from litdata.streaming.item_loader import BaseItemLoader, PyTreeLoader
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer, _get_serializers
from litdata.utilities.env import _DistributedEnv, _WorkerEnv
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
if _TORCH_GREATER_EQUAL_2_1_0:
pass
logger = Logger(__name__)
_END_TOKEN = "END"
# Note: The timeout here should not be too short. We need to prevent the caller from aggressively
# querying the queue and consuming too many CPU cycles.
_DEFAULT_TIMEOUT = 0.1
_LONG_DEFAULT_TIMEOUT = 5
class PrepareChunksThread(Thread):
"""This thread is responsible to download the chunks associated to a given worker."""
def __init__(
self,
config: ChunksConfig,
item_loader: BaseItemLoader,
distributed_env: _DistributedEnv,
max_cache_size: Optional[int] = None,
max_pre_download: int = 2,
) -> None:
super().__init__(daemon=True)
self._config = config
self._item_loader = item_loader
self._max_pre_download = max_pre_download
self._pre_download_counter = 0
self._distributed_env = distributed_env
self._chunks_index_to_be_deleted: List[int] = []
self._max_cache_size = max_cache_size
self._parent_cache_dir = os.path.dirname(self._config._cache_dir)
self._to_download_queue: multiprocessing.Queue = multiprocessing.Queue()
self._to_delete_queue: multiprocessing.Queue = multiprocessing.Queue()
# Check whether a dataset slice fits on the node
num_bytes_per_nodes = self._config.num_bytes // self._distributed_env.num_nodes
self._delete_chunks_when_processed = num_bytes_per_nodes > max_cache_size if max_cache_size else False
self._has_exited = False
def download(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
for chunk_index in chunk_indexes:
self._to_download_queue.put(chunk_index)
def delete(self, chunk_indexes: List[int]) -> None:
"""Receive the list of the chunk indices to delete for the current epoch."""
for chunk_index in chunk_indexes:
self._to_delete_queue.put(chunk_index)
def _delete(self, chunk_index: int) -> None:
"""Inform the item loader of the chunk to delete."""
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.delete(chunk_index, chunk_filepath)
def stop(self) -> None:
"""Receive the list of the chunk indices to download for the current epoch."""
self._to_download_queue.put(_END_TOKEN)
def _maybe_delete_chunks(self) -> None:
reached_pre_download = self._pre_download_counter == self._max_pre_download
# we have already pre-downloaded some chunks, we just need to wait for them to be processed.
chunk_index = _get_from_queue(
self._to_delete_queue, timeout=_LONG_DEFAULT_TIMEOUT if reached_pre_download else _DEFAULT_TIMEOUT
)
if chunk_index is not None:
self._pre_download_counter -= 1
# Store the current chunk index
self._chunks_index_to_be_deleted.append(chunk_index)
# Get the current cache size and decide whether we need to start cleanup. Otherwise, keep track of it
while self._max_cache_size and self._chunks_index_to_be_deleted and self._can_delete_chunk():
# Delete the oldest chunk
self._delete(self._chunks_index_to_be_deleted.pop(0))
return
def _can_delete_chunk(self) -> bool:
if self._delete_chunks_when_processed:
return self._pre_download_counter >= self._max_pre_download - 1
return self._max_cache_size is not None and _get_folder_size(self._parent_cache_dir) >= self._max_cache_size
def _pre_load_chunk(self, chunk_index: int) -> None:
chunk_filepath, _, _ = self._config[ChunkedIndex(index=-1, chunk_index=chunk_index)]
self._item_loader.pre_load_chunk(chunk_index, chunk_filepath)
def run(self) -> None:
while True:
if self._pre_download_counter < self._max_pre_download:
chunk_index = _get_from_queue(self._to_download_queue)
if chunk_index == _END_TOKEN:
self._has_exited = True
return
if chunk_index is not None:
self._config.download_chunk_from_index(chunk_index)
# Preload item if possible to gain some time but only
# if this is one of the pre-downloaded chunk
if self._pre_download_counter > 0:
self._pre_load_chunk(chunk_index)
# Avoid downloading too many chunks in advance at the risk of over using the disk space
self._pre_download_counter += 1
if self._max_cache_size:
self._maybe_delete_chunks()
class BinaryReader:
def __init__(
self,
cache_dir: str,
max_cache_size: Optional[Union[int, str]] = None,
remote_input_dir: Optional[str] = None,
compression: Optional[str] = None,
item_loader: Optional[BaseItemLoader] = None,
serializers: Optional[Dict[str, Serializer]] = None,
) -> None:
"""The BinaryReader enables to read chunked dataset in an efficient way.
Arguments:
cache_dir: The path to cache folder.
remote_input_dir: The path to a remote folder where the data are located.
The scheme needs to be added to the path.
compression: The algorithm to decompress the chunks.
item_loader: The chunk sampler to create sub arrays from a chunk.
max_cache_size: The maximum cache size used by the reader when fetching the chunks.
serializers: Provide your own serializers.
"""
super().__init__()
warnings.filterwarnings("ignore", message=".*The given buffer is not writable.*")
self._cache_dir = cache_dir
self._remote_input_dir = remote_input_dir
if not os.path.exists(self._cache_dir):
raise FileNotFoundError(f"The provided cache_dir `{self._cache_dir}` doesn't exist.")
self._compression = compression
self._intervals: Optional[List[str]] = None
self._serializers: Dict[str, Serializer] = _get_serializers(serializers)
self._distributed_env = _DistributedEnv.detect()
self._rank: Optional[int] = None
self._config: Optional[ChunksConfig] = None
self._prepare_thread: Optional[PrepareChunksThread] = None
self._item_loader = item_loader or PyTreeLoader()
self._last_chunk_index: Optional[int] = None
self._max_cache_size = int(os.getenv("MAX_CACHE_SIZE", max_cache_size or 0))
def _get_chunk_index_from_index(self, index: int) -> int:
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self._config._get_chunk_index_from_index(index) # type: ignore
def _try_load_config(self) -> Optional[ChunksConfig]:
"""Try to load the chunks config if the index files are available."""
self._config = ChunksConfig.load(self._cache_dir, self._serializers, self._remote_input_dir, self._item_loader)
return self._config
@property
def config(self) -> ChunksConfig:
if self._config is None:
raise RuntimeError("The config should be defined.")
return self._config
@property
def rank(self) -> int:
"""Returns the rank of the writer."""
if self._rank is None:
self._worker_env = _WorkerEnv.detect()
self._rank = self._distributed_env.global_rank * self._worker_env.world_size + self._worker_env.rank
return self._rank
def read(self, index: ChunkedIndex) -> Any:
"""Read an item for the given from a chunk.
If the chunk isn't available locally or in memory, it will be downloaded.
Prefetching should reduce the wait time to be the batch available.
"""
if not isinstance(index, ChunkedIndex):
raise ValueError("The Reader.read(...) method expects a chunked Index.")
# Load the config containing the index
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
if self._config and (self._config._remote_dir or self._config._compressor):
# Create and start the prepare chunks thread
if self._prepare_thread is None and self._config:
self._prepare_thread = PrepareChunksThread(
self._config, self._item_loader, self._distributed_env, self._max_cache_size
)
self._prepare_thread.start()
if index.chunk_indexes:
self._prepare_thread.download(index.chunk_indexes)
# If the chunk_index is new, request for it to be downloaded.
if index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
self._prepare_thread.download([index.chunk_index])
if self._last_chunk_index is None:
self._last_chunk_index = index.chunk_index
# Fetch the element
chunk_filepath, begin, _ = self.config[index]
item = self._item_loader.load_item_from_chunk(index.index, index.chunk_index, chunk_filepath, begin)
# We need to request deletion after the latest element has been loaded.
# Otherwise, this could trigger segmentation fault error depending on the item loader used.
if self._config and self._config._remote_dir and index.chunk_index != self._last_chunk_index:
assert self._prepare_thread
assert self._last_chunk_index is not None
# inform the chunk has been completely consumed
self._prepare_thread.delete([self._last_chunk_index])
# track the new chunk index as the latest one
self._last_chunk_index = index.chunk_index
if index.is_last_index and self._prepare_thread:
# inform the thread it is time to stop
self._prepare_thread.stop()
self._prepare_thread = None
return item
def get_length(self) -> int:
"""Get the number of samples across all chunks."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return len(self.config)
def get_chunk_intervals(self) -> List[Tuple[int, int]]:
"""Get the index interval of each chunk."""
if self._config is None and self._try_load_config() is None:
raise Exception("The reader index isn't defined.")
return self.config.intervals
def __getstate__(self) -> Dict[str, Any]:
state = self.__dict__.copy()
state["_prepare_thread"] = None
return state
def _get_folder_size(path: str) -> int:
"""Collect the size of each files within a folder.
This method is robust to file deletion races
"""
size = 0
for dirpath, _, filenames in os.walk(str(path)):
for filename in filenames:
with contextlib.suppress(FileNotFoundError):
size += os.stat(os.path.join(dirpath, filename)).st_size
return size
def _get_from_queue(queue: multiprocessing.Queue, timeout: float = _DEFAULT_TIMEOUT) -> Optional[Any]:
try:
return queue.get(timeout=timeout)
except Empty:
pass
except OSError as err:
# handle closed queue before the thread terminates
if "handle is closed" in str(err) or "Bad file descriptor" in str(err):
logger.debug(err)
else:
raise err
except EOFError as err:
logger.debug(err)
return None
|
evocodebench_data_32
|
# Copyright (c) OpenMMLab. All rights reserved.
from logging import warning
import numpy as np
import torch
from mmdet3d.core.utils import array_converter
@array_converter(apply_to=('val', ))
def limit_period(val, offset=0.5, period=np.pi):
"""Limit the value into a period for periodic function.
Args:
val (torch.Tensor | np.ndarray): The value to be converted.
offset (float, optional): Offset to set the value range.
Defaults to 0.5.
period ([type], optional): Period of the value. Defaults to np.pi.
Returns:
(torch.Tensor | np.ndarray): Value in the range of
[-offset * period, (1-offset) * period]
"""
limited_val = val - torch.floor(val / period + offset) * period
return limited_val
@array_converter(apply_to=('points', 'angles'))
def rotation_3d_in_axis(points,
angles,
axis=0,
return_mat=False,
clockwise=False):
"""Rotate points by angles according to axis.
Args:
points (np.ndarray | torch.Tensor | list | tuple ):
Points of shape (N, M, 3).
angles (np.ndarray | torch.Tensor | list | tuple | float):
Vector of angles in shape (N,)
axis (int, optional): The axis to be rotated. Defaults to 0.
return_mat: Whether or not return the rotation matrix (transposed).
Defaults to False.
clockwise: Whether the rotation is clockwise. Defaults to False.
Raises:
ValueError: when the axis is not in range [0, 1, 2], it will
raise value error.
Returns:
(torch.Tensor | np.ndarray): Rotated points in shape (N, M, 3).
"""
batch_free = len(points.shape) == 2
if batch_free:
points = points[None]
if isinstance(angles, float) or len(angles.shape) == 0:
angles = torch.full(points.shape[:1], angles)
assert len(points.shape) == 3 and len(angles.shape) == 1 \
and points.shape[0] == angles.shape[0], f'Incorrect shape of points ' \
f'angles: {points.shape}, {angles.shape}'
assert points.shape[-1] in [2, 3], \
f'Points size should be 2 or 3 instead of {points.shape[-1]}'
rot_sin = torch.sin(angles)
rot_cos = torch.cos(angles)
ones = torch.ones_like(rot_cos)
zeros = torch.zeros_like(rot_cos)
if points.shape[-1] == 3:
if axis == 1 or axis == -2:
rot_mat_T = torch.stack([
torch.stack([rot_cos, zeros, -rot_sin]),
torch.stack([zeros, ones, zeros]),
torch.stack([rot_sin, zeros, rot_cos])
])
elif axis == 2 or axis == -1:
rot_mat_T = torch.stack([
torch.stack([rot_cos, rot_sin, zeros]),
torch.stack([-rot_sin, rot_cos, zeros]),
torch.stack([zeros, zeros, ones])
])
elif axis == 0 or axis == -3:
rot_mat_T = torch.stack([
torch.stack([ones, zeros, zeros]),
torch.stack([zeros, rot_cos, rot_sin]),
torch.stack([zeros, -rot_sin, rot_cos])
])
else:
raise ValueError(f'axis should in range '
f'[-3, -2, -1, 0, 1, 2], got {axis}')
else:
rot_mat_T = torch.stack([
torch.stack([rot_cos, rot_sin]),
torch.stack([-rot_sin, rot_cos])
])
if clockwise:
rot_mat_T = rot_mat_T.transpose(0, 1)
if points.shape[0] == 0:
points_new = points
else:
points_new = torch.einsum('aij,jka->aik', points, rot_mat_T)
if batch_free:
points_new = points_new.squeeze(0)
if return_mat:
rot_mat_T = torch.einsum('jka->ajk', rot_mat_T)
if batch_free:
rot_mat_T = rot_mat_T.squeeze(0)
return points_new, rot_mat_T
else:
return points_new
@array_converter(apply_to=('boxes_xywhr', ))
def xywhr2xyxyr(boxes_xywhr):
"""Convert a rotated boxes in XYWHR format to XYXYR format.
Args:
boxes_xywhr (torch.Tensor | np.ndarray): Rotated boxes in XYWHR format.
Returns:
(torch.Tensor | np.ndarray): Converted boxes in XYXYR format.
"""
boxes = torch.zeros_like(boxes_xywhr)
half_w = boxes_xywhr[..., 2] / 2
half_h = boxes_xywhr[..., 3] / 2
boxes[..., 0] = boxes_xywhr[..., 0] - half_w
boxes[..., 1] = boxes_xywhr[..., 1] - half_h
boxes[..., 2] = boxes_xywhr[..., 0] + half_w
boxes[..., 3] = boxes_xywhr[..., 1] + half_h
boxes[..., 4] = boxes_xywhr[..., 4]
return boxes
def get_box_type(box_type):
"""Get the type and mode of box structure.
Args:
box_type (str): The type of box structure.
The valid value are "LiDAR", "Camera", or "Depth".
Raises:
ValueError: A ValueError is raised when `box_type`
does not belong to the three valid types.
Returns:
tuple: Box type and box mode.
"""
from .box_3d_mode import (Box3DMode, CameraInstance3DBoxes,
DepthInstance3DBoxes, LiDARInstance3DBoxes)
box_type_lower = box_type.lower()
if box_type_lower == 'lidar':
box_type_3d = LiDARInstance3DBoxes
box_mode_3d = Box3DMode.LIDAR
elif box_type_lower == 'camera':
box_type_3d = CameraInstance3DBoxes
box_mode_3d = Box3DMode.CAM
elif box_type_lower == 'depth':
box_type_3d = DepthInstance3DBoxes
box_mode_3d = Box3DMode.DEPTH
else:
raise ValueError('Only "box_type" of "camera", "lidar", "depth"'
f' are supported, got {box_type}')
return box_type_3d, box_mode_3d
@array_converter(apply_to=('points_3d', 'proj_mat'))
def points_cam2img(points_3d, proj_mat, with_depth=False):
"""Project points in camera coordinates to image coordinates.
Args:
points_3d (torch.Tensor | np.ndarray): Points in shape (N, 3)
proj_mat (torch.Tensor | np.ndarray):
Transformation matrix between coordinates.
with_depth (bool, optional): Whether to keep depth in the output.
Defaults to False.
Returns:
(torch.Tensor | np.ndarray): Points in image coordinates,
with shape [N, 2] if `with_depth=False`, else [N, 3].
"""
points_shape = list(points_3d.shape)
points_shape[-1] = 1
assert len(proj_mat.shape) == 2, 'The dimension of the projection'\
f' matrix should be 2 instead of {len(proj_mat.shape)}.'
d1, d2 = proj_mat.shape[:2]
assert (d1 == 3 and d2 == 3) or (d1 == 3 and d2 == 4) or (
d1 == 4 and d2 == 4), 'The shape of the projection matrix'\
f' ({d1}*{d2}) is not supported.'
if d1 == 3:
proj_mat_expanded = torch.eye(
4, device=proj_mat.device, dtype=proj_mat.dtype)
proj_mat_expanded[:d1, :d2] = proj_mat
proj_mat = proj_mat_expanded
# previous implementation use new_zeros, new_one yields better results
points_4 = torch.cat([points_3d, points_3d.new_ones(points_shape)], dim=-1)
point_2d = points_4 @ proj_mat.T
point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
if with_depth:
point_2d_res = torch.cat([point_2d_res, point_2d[..., 2:3]], dim=-1)
return point_2d_res
@array_converter(apply_to=('points', 'cam2img'))
def points_img2cam(points, cam2img):
"""Project points in image coordinates to camera coordinates.
Args:
points (torch.Tensor): 2.5D points in 2D images, [N, 3],
3 corresponds with x, y in the image and depth.
cam2img (torch.Tensor): Camera intrinsic matrix. The shape can be
[3, 3], [3, 4] or [4, 4].
Returns:
torch.Tensor: points in 3D space. [N, 3],
3 corresponds with x, y, z in 3D space.
"""
assert cam2img.shape[0] <= 4
assert cam2img.shape[1] <= 4
assert points.shape[1] == 3
xys = points[:, :2]
depths = points[:, 2].view(-1, 1)
unnormed_xys = torch.cat([xys * depths, depths], dim=1)
pad_cam2img = torch.eye(4, dtype=xys.dtype, device=xys.device)
pad_cam2img[:cam2img.shape[0], :cam2img.shape[1]] = cam2img
inv_pad_cam2img = torch.inverse(pad_cam2img).transpose(0, 1)
# Do operation in homogeneous coordinates.
num_points = unnormed_xys.shape[0]
homo_xys = torch.cat([unnormed_xys, xys.new_ones((num_points, 1))], dim=1)
points3D = torch.mm(homo_xys, inv_pad_cam2img)[:, :3]
return points3D
def mono_cam_box2vis(cam_box):
"""This is a post-processing function on the bboxes from Mono-3D task. If
we want to perform projection visualization, we need to:
1. rotate the box along x-axis for np.pi / 2 (roll)
2. change orientation from local yaw to global yaw
3. convert yaw by (np.pi / 2 - yaw)
After applying this function, we can project and draw it on 2D images.
Args:
cam_box (:obj:`CameraInstance3DBoxes`): 3D bbox in camera coordinate
system before conversion. Could be gt bbox loaded from dataset
or network prediction output.
Returns:
:obj:`CameraInstance3DBoxes`: Box after conversion.
"""
warning.warn('DeprecationWarning: The hack of yaw and dimension in the '
'monocular 3D detection on nuScenes has been removed. The '
'function mono_cam_box2vis will be deprecated.')
from . import CameraInstance3DBoxes
assert isinstance(cam_box, CameraInstance3DBoxes), \
'input bbox should be CameraInstance3DBoxes!'
loc = cam_box.gravity_center
dim = cam_box.dims
yaw = cam_box.yaw
feats = cam_box.tensor[:, 7:]
# rotate along x-axis for np.pi / 2
# see also here: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L557 # noqa
dim[:, [1, 2]] = dim[:, [2, 1]]
# change local yaw to global yaw for visualization
# refer to https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L164-L166 # noqa
yaw += torch.atan2(loc[:, 0], loc[:, 2])
# convert yaw by (-yaw - np.pi / 2)
# this is because mono 3D box class such as `NuScenesBox` has different
# definition of rotation with our `CameraInstance3DBoxes`
yaw = -yaw - np.pi / 2
cam_box = torch.cat([loc, dim, yaw[:, None], feats], dim=1)
cam_box = CameraInstance3DBoxes(
cam_box, box_dim=cam_box.shape[-1], origin=(0.5, 0.5, 0.5))
return cam_box
def get_proj_mat_by_coord_type(img_meta, coord_type):
"""Obtain image features using points.
Args:
img_meta (dict): Meta info.
coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'.
Can be case-insensitive.
Returns:
torch.Tensor: transformation matrix.
"""
coord_type = coord_type.upper()
mapping = {'LIDAR': 'lidar2img', 'DEPTH': 'depth2img', 'CAMERA': 'cam2img'}
assert coord_type in mapping.keys()
return img_meta[mapping[coord_type]]
def yaw2local(yaw, loc):
"""Transform global yaw to local yaw (alpha in kitti) in camera
coordinates, ranges from -pi to pi.
Args:
yaw (torch.Tensor): A vector with local yaw of each box.
shape: (N, )
loc (torch.Tensor): gravity center of each box.
shape: (N, 3)
Returns:
torch.Tensor: local yaw (alpha in kitti).
"""
local_yaw = yaw - torch.atan2(loc[:, 0], loc[:, 2])
larger_idx = (local_yaw > np.pi).nonzero(as_tuple=False)
small_idx = (local_yaw < -np.pi).nonzero(as_tuple=False)
if len(larger_idx) != 0:
local_yaw[larger_idx] -= 2 * np.pi
if len(small_idx) != 0:
local_yaw[small_idx] += 2 * np.pi
return local_yaw
|
evocodebench_data_33
|
import json
import numpy as np
from agents.microagent import MicroAgent
class AgentSerializer:
@staticmethod
def to_dict(agent):
"""
Serialize the MicroAgent object to a dictionary for persistence.
"""
purpose_embedding = agent.purpose_embedding
if isinstance(purpose_embedding, np.ndarray):
purpose_embedding = purpose_embedding.tolist() # Convert ndarray to list
return {
"dynamic_prompt": agent.dynamic_prompt,
"purpose": agent.purpose,
"purpose_embedding": purpose_embedding,
"depth": agent.depth,
"max_depth": agent.max_depth,
"usage_count": agent.usage_count,
"id": agent.id,
"parent_id": agent.parent_id,
"working_agent": agent.working_agent,
"is_prime": agent.is_prime,
"evolve_count": agent.evolve_count,
"number_of_code_executions": agent.number_of_code_executions,
"last_input": agent.last_input,
}
@staticmethod
def from_dict(data, agent_lifecycle, openai_wrapper):
"""
Deserialize a dictionary back into a MicroAgent object.
"""
agent = MicroAgent(
data["dynamic_prompt"],
data["purpose"],
data["depth"],
agent_lifecycle,
openai_wrapper,
data["max_depth"],
data.get("working_agent", False),
data.get("is_prime", False),
id=data["id"],
parent_id=data["parent_id"]
)
if data.get("purpose_embedding") is not None:
agent.purpose_embedding = np.array(data["purpose_embedding"])
else:
agent.purpose_embedding = None
agent.usage_count = data.get("usage_count", 0)
agent.evolve_count = data.get("evolve_count", 0)
agent.number_of_code_executions = data.get("number_of_code_executions", 0)
agent.last_input = data.get("last_input", "")
return agent
|
evocodebench_data_34
|
from collections import defaultdict
from typing import Any, Dict, List, Tuple
def _pack_greedily(items: List[Any], weights: List[int], num_bins: int) -> Tuple[Dict[int, List[Any]], Dict[int, int]]:
"""Greedily pack items with given weights into bins such that the total weight of each bin is roughly equally
distributed among all bins."""
if len(items) != len(weights):
raise ValueError(f"Items and weights must have the same length, got {len(items)} and {len(weights)}.")
if any(w <= 0 for w in weights):
raise ValueError("All weights must be positive.")
sorted_items_and_weights = sorted(zip(items, weights), key=lambda x: x[1], reverse=True)
bin_contents = defaultdict(list)
bin_weights = {i: 0 for i in range(num_bins)}
for item, weight in sorted_items_and_weights:
min_bin_id = min(bin_weights, key=(lambda x: bin_weights[x]), default=0)
bin_contents[min_bin_id].append(item)
bin_weights[min_bin_id] += weight
return bin_contents, bin_weights
|
evocodebench_data_35
|
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()
|
evocodebench_data_36
|
import math
from typing import Dict, Tuple
import numpy as np
def area(array: np.ndarray) -> float:
"""Shoelace formula for simple polygon area calculation.
WARNING: This formula only works for simple polygons, i.e planar polygon without self-intersection nor holes.
These conditions are not checked within this function.
Args:
array (np.ndarray): np array representing a polygon as a list of points, i.e. of shape (_, 2).
Raises:
ValueError: if the input array does not have shape (_, 2)
Returns:
float: Polygon area
References:
[1] https://en.wikipedia.org/wiki/Shoelace_formula
[2] https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
"""
if len(array.shape) != 2 or array.shape[1] != 2:
raise ValueError(f"Unable to determine the area of a polygon with shape {array.shape}. Expecting (_, 2).")
xs, ys = array.T
area = 0.5 * np.abs(np.dot(xs, np.roll(ys, 1)) - np.dot(ys, np.roll(xs, 1)))
return float(area)
def estimate_diameter(polygon: np.ndarray) -> float:
"""Estimates the diameter of an arbitrary arc by evaluating the maximum distance between any two points on the arc.
Args:
polygon (np.ndarray): Polygon points.
Returns:
float: Estimated diameter length.
Reference:
[1] https://sparrow.dev/pairwise-distance-in-numpy/
"""
return float(np.linalg.norm(polygon[:, None, :] - polygon[None, :, :], axis=-1).max())
def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:
"""Convert xs and ys cartesian coordinates to polar coordinates.
Args:
xs (np.ndarray): x values.
ys (np.ndarray): y values.
center_x (float): center's x.
center_y (float): center's y.
Returns:
Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).
"""
x_rel: np.ndarray = xs - center_x
y_rel: np.ndarray = ys - center_y
C = np.vectorize(complex)(x_rel, y_rel)
rho = np.abs(C)
phi = np.angle(C) % (2 * np.pi)
return rho, phi
def polar2cartesian(
rhos: np.ndarray, phis: np.ndarray, center_x: float, center_y: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Convert polar coordinates to cartesian coordinates.
Args:
rho (np.ndarray): rho values.
phi (np.ndarray): phi values.
center_x (float): center's x.
center_y (float): center's y.
Returns:
Tuple[np.ndarray, np.ndarray]: Converted coordinates (xs, ys).
"""
xs = center_x + rhos * np.cos(phis)
ys = center_y + rhos * np.sin(phis)
return xs, ys
def orientation(moments: Dict[str, float]) -> float:
"""Compute the main orientation of a contour or a binary image given its precomputed cv2 moments.
Args:
moments (Dict[str, float]): cv2.moments of desired the binary image or contour.
Returns:
float: Main orientation of the shape. The orientation is a float in [-pi/2, pi/2[ representing the signed angle from the x axis.
"""
# Edge case of null denominator
if (moments["mu20"] - moments["mu02"]) == 0:
if moments["mu11"] == 0:
orientation = 0.0
else:
orientation = math.copysign(np.pi / 4, moments["mu11"])
else:
# General formula
orientation = 0.5 * np.arctan(2 * moments["mu11"] / (moments["mu20"] - moments["mu02"]))
if (moments["mu20"] - moments["mu02"]) < 0:
orientation += np.pi / 2
# Restricting the angle to [-pi/2, pi/2[
orientation = np.mod(orientation + np.pi / 2, np.pi) - np.pi / 2
return orientation
def eccentricity(moments: Dict[str, float]) -> float:
r"""Compute the eccentricity of a contour or a binary image given its precomputed cv2 moments.
The eccentricity is a number in [0, 1] which caracterises the "roundness" or "linearity" of a shape.
A perfect circle will have an eccentricity of 0, and an infinite line an eccentricity of 1.
For ellipses, the eccentricity is calculated as :math:`\frac{\sqrt{a^2 - b^2}}{a^2}`
with a (resp. b) the semi-major (resp. -minor) axis of the ellipses.
For `mu20 + mu02 == 0`, i.e. perfect line, the max theoretical value (1.0) is returned
Args:
moments (Dict[str, float]): cv2.moments of desired the binary image or contour.
Returns:
eccentricity (float): the eccentricity of the contour or binary map.
Reference:
[1] https://t1.daumcdn.net/cfile/tistory/15425F4150F4EBFC19
"""
if moments["mu20"] + moments["mu02"] == 0:
return 1.0
# fmt: off
eccentricity = ((moments["mu20"] - moments["mu02"]) ** 2 + 4 * moments["mu11"] ** 2) / (moments["mu20"] + moments["mu02"]) ** 2
# fmt: on
return eccentricity
def apply_weights_1d(scores_1d: np.ndarray, weights_1d: np.ndarray) -> float:
"""Apply weights for score fusion.
Args:
scores_1d (np.ndarray): scores to be fused.
weights_1d (np.ndarray): weights.
Raises:
ValueError: if the input 1d arrays do not have the same length.
Returns:
float: fused score.
"""
if len(scores_1d) != len(weights_1d):
raise ValueError("Unable to apply weights. Dimension is different between scores and weights.")
if len(weights_1d) == 0:
raise ValueError("Unable to apply weights. Empty arrays.")
if np.sum(weights_1d) == 0:
raise ValueError("Unable to apply weights. Sum of weights is zero.")
weighted_score = np.sum(np.multiply(scores_1d, weights_1d))
return weighted_score / np.sum(weights_1d)
def polygon_length(polygon: np.ndarray, max_point_distance: int = 20) -> float:
"""Compute the length of a polygon represented as a (_, 2)-dimensionnal numpy array.
One polygon can include several disjoint arcs, which should be identified as separate so that the distance
between them is not counted. If a polygon is made of two small arc separated by a large distance, then the large
distance between the two arcs will not be discounted in the polygon's length
WARNING: The input polygon is assumed to be non-looped, i.e. if the first and last point are not equal,
which is the case for all ou GeometryPolygons. The last implicit segment looping back from the
last to the first point is therefore not included in the computed polygon length.
Args:
polygon (np.ndarray): (_, 2) - shaped numpy array representing a polygon.
max_point_distance (int): Maximum distance between two points for them to be considered part of the same arc.
Returns:
float: length of the polygon, in pixels.
"""
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise ValueError(f"This function expects a polygon, i.e. an array of shape (_, 2). Got {polygon.shape}")
inter_point_distances = np.linalg.norm(np.roll(polygon, 1, axis=0) - polygon, axis=1)
inter_point_distances = inter_point_distances[inter_point_distances < max_point_distance]
return inter_point_distances.sum()
|
evocodebench_data_37
|
from typing import Callable, List
import cv2
import numpy as np
from pydantic import NonNegativeFloat
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import GeometryMask, GeometryPolygons
from iris.io.errors import VectorizationError
from iris.utils.math import area
def filter_polygon_areas(
polygons: List[np.ndarray], rel_tr: NonNegativeFloat = 0.03, abs_tr: NonNegativeFloat = 0.0
) -> List[np.ndarray]:
"""Filter out polygons whose area is below either an absolute threshold or a fraction of the largest area.
Args:
polygons (List[np.ndarray]): List of polygons to filter.
rel_tr (NonNegativeFloat, optional): Relative threshold. Defaults to 0.03.
abs_tr (NonNegativeFloat, optional): Absolute threshold. Defaults to 0.0.
Returns:
List[np.ndarray]: Filtered polygons' list.
"""
areas = [area(polygon) if len(polygon) > 2 else 1.0 for polygon in polygons]
area_factors = np.array(areas) / np.max(areas)
filtered_polygons = [
polygon
for area, area_factor, polygon in zip(areas, area_factors, polygons)
if area > abs_tr and area_factor > rel_tr
]
return filtered_polygons
class ContouringAlgorithm(Algorithm):
"""Implementation of a vectorization process through contouring raster image."""
class Parameters(Algorithm.Parameters):
"""Parameters class of the ContouringAlgorithm class."""
contour_filters: List[Callable[[List[np.ndarray]], List[np.ndarray]]]
__parameters_type__ = Parameters
def __init__(
self,
contour_filters: List[Callable[[List[np.ndarray]], List[np.ndarray]]] = [filter_polygon_areas],
) -> None:
"""Assign parameters.
Args:
contour_filters (List[Callable[[List[np.ndarray]], List[np.ndarray]]], optional): List of filter functions used to filter out noise in polygons.
Defaults to [ContouringAlgorithm.filter_polygon_areas].
"""
super().__init__(contour_filters=contour_filters)
def run(self, geometry_mask: GeometryMask) -> GeometryPolygons:
"""Contouring vectorization algorithm implementation.
Args:
geometry_mask (GeometryMask): Geometry segmentation map.
Raises:
VectorizationError: Raised if iris region not segmented or an error occur during iris region processing.
Returns:
GeometryPolygons: Geometry polygons points.
"""
if not np.any(geometry_mask.iris_mask):
raise VectorizationError("Geometry raster verification failed.")
geometry_contours = self._find_contours(geometry_mask)
return geometry_contours
def _find_contours(self, mask: GeometryMask) -> GeometryPolygons:
"""Find raw contours for different classes in raster.
Args:
mask (GeometryMask): Raster object.
Returns:
GeometryPolygons: Raw contours indicating polygons of different classes.
"""
eyeball_array = self._find_class_contours(mask.filled_eyeball_mask.astype(np.uint8))
iris_array = self._find_class_contours(mask.filled_iris_mask.astype(np.uint8))
pupil_array = self._find_class_contours(mask.pupil_mask.astype(np.uint8))
return GeometryPolygons(pupil_array=pupil_array, iris_array=iris_array, eyeball_array=eyeball_array)
def _find_class_contours(self, binary_mask: np.ndarray) -> np.ndarray:
"""Find contour between two different contours.
Args:
binary_mask (np.ndarray): Raster object.
Raises:
VectorizationError: Raised if number of contours found is different than 1.
Returns:
np.ndarray: Contour points array.
"""
contours, hierarchy = cv2.findContours(binary_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if hierarchy is None:
raise VectorizationError("_find_class_contours: No contour hierarchy found at all.")
parent_indices = np.flatnonzero(hierarchy[..., 3] == -1)
contours = [np.squeeze(contours[i]) for i in parent_indices]
contours = self._filter_contours(contours)
if len(contours) != 1:
raise VectorizationError("_find_class_contours: Number of contours must be equal to 1.")
return contours[0]
def _filter_contours(self, contours: List[np.ndarray]) -> List[np.ndarray]:
"""Filter contours based on predefined filters.
Args:
contours (List[np.ndarray]): Contours list.
Returns:
List[np.ndarray]: Filtered list of contours.
"""
for filter_func in self.params.contour_filters:
contours = filter_func(contours)
return contours
|
evocodebench_data_38
|
# Copyright The Lightning AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
from logging import Logger
from time import time
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from torch.utils.data import IterableDataset
from litdata.constants import (
_DEFAULT_CACHE_DIR,
_INDEX_FILENAME,
)
from litdata.streaming import Cache
from litdata.streaming.item_loader import BaseItemLoader
from litdata.streaming.resolver import Dir, _resolve_dir
from litdata.streaming.sampler import ChunkedIndex
from litdata.streaming.serializers import Serializer
from litdata.streaming.shuffle import FullShuffle, NoShuffle, Shuffle
from litdata.utilities.env import _DistributedEnv, _is_in_dataloader_worker, _WorkerEnv
logger = Logger(__name__)
class StreamingDataset(IterableDataset):
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class."""
def __init__(
self,
input_dir: Union[str, "Dir"],
item_loader: Optional[BaseItemLoader] = None,
shuffle: bool = False,
drop_last: Optional[bool] = None,
seed: int = 42,
serializers: Optional[Dict[str, Serializer]] = None,
max_cache_size: Union[int, str] = "100GB",
) -> None:
"""The streaming dataset can be used once your data have been optimised using the DatasetOptimiser class.
Arguments:
input_dir: Path to the folder where the input data is stored.
item_loader: The logic to load an item from a chunk.
shuffle: Whether to shuffle the data.
drop_last: If `True`, drops the last items to ensure that
all processes/workers return the same amount of data.
The argument `drop_last` is set to `True` in a distributed setting
and `False` otherwise.
seed: Random seed for shuffling.
serializers: The serializers used to serialize and deserialize the chunks.
max_cache_size: The maximum cache size used by the StreamingDataset.
"""
super().__init__()
if not isinstance(shuffle, bool):
raise ValueError(f"Shuffle should be a boolean. Found {shuffle}")
input_dir = _resolve_dir(input_dir)
self.input_dir = input_dir
self.item_loader = item_loader
self.shuffle: bool = shuffle
self.distributed_env = _DistributedEnv.detect()
if self.distributed_env.world_size > 1:
if drop_last is False:
logger.warn(
"You're operating within a distributed environment and have disabled the `drop_last` option. "
"Please note that this configuration may lead to training interruptions if your system depends "
"on distributed collectives."
)
else:
drop_last = True
self.drop_last = drop_last or False
self.seed = seed
self.max_cache_size = max_cache_size
self.cache: Optional[Cache] = None
self.worker_env: Optional[_WorkerEnv] = None
self.worker_chunks: List[int] = []
self.worker_intervals: List[List[int]] = []
self.current_indexes: List[int] = []
self.chunk_index = 0
self.num_chunks: Optional[int] = None
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.min_items_per_replica: Optional[int] = None
self.current_epoch = 1
self.random_state = None
self.shuffler: Optional[Shuffle] = None
self.serializers = serializers
self._state_dict: Optional[Dict[str, Any]] = None
def set_shuffle(self, shuffle: bool) -> None:
self.shuffle = shuffle
def set_epoch(self, current_epoch: int) -> None:
"""Set the current epoch to the dataset on epoch starts.
When using the StreamingDataLoader, this is done automatically
"""
# If the state dict has been reloaded, don't override the current epoch
# The StreamingDataloader would clean this out
if self._state_dict is None:
self.current_epoch = current_epoch
def _create_cache(self, worker_env: _WorkerEnv) -> Cache:
if _should_replace_path(self.input_dir.path):
cache_path = _try_create_cache_dir(
input_dir=self.input_dir.path if self.input_dir.path else self.input_dir.url
)
if cache_path is not None:
self.input_dir.path = cache_path
cache = Cache(
input_dir=self.input_dir,
item_loader=self.item_loader,
chunk_bytes=1,
serializers=self.serializers,
max_cache_size=self.max_cache_size,
)
cache._reader._try_load_config()
if not cache.filled:
raise ValueError(
f"The provided dataset `{self.input_dir}` doesn't contain any {_INDEX_FILENAME} file."
" HINT: Did you successfully optimize a dataset to the provided `input_dir`?"
)
return cache
def _create_shuffler(self, cache: Cache) -> Shuffle:
seed = self.seed
drop_last = self.drop_last
if self._state_dict is not None:
state: Dict[str, Any] = self._state_dict
seed = state["seed"]
drop_last = state["drop_last"]
return FullShuffle(cache, seed, drop_last) if self.shuffle else NoShuffle(cache, seed, drop_last)
def __len__(self) -> int:
if self.shuffler is None:
cache = self._create_cache(worker_env=_WorkerEnv.detect())
self.shuffler = self._create_shuffler(cache)
return self.shuffler.get_len(self.distributed_env, self.current_epoch)
def __iter__(self) -> "StreamingDataset":
# When the StreamingDataset is used within map or optimize, let's refetch the distributed env.
if os.getenv("DATA_OPTIMIZER_GLOBAL_RANK"):
self.distributed_env = _DistributedEnv.detect()
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
# Handle restart
if self._state_dict:
self._validate_state_dict()
state: Dict[str, Any] = self._state_dict
self.current_epoch = state["current_epoch"]
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
# Handle restart
if self._state_dict:
self._resume(chunks_replica, intervals_replica)
else:
chunks_per_replica, intervals_per_replica = self.shuffler.get_chunks_and_intervals_per_ranks(
self.distributed_env, self.current_epoch
)
chunks_replica = chunks_per_replica[self.distributed_env.global_rank % self.distributed_env.world_size]
intervals_replica = intervals_per_replica[
self.distributed_env.global_rank % self.distributed_env.world_size
]
self.worker_chunks = []
self.worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % self.worker_env.world_size != self.worker_env.rank:
continue
self.worker_chunks.append(chunk_index)
self.worker_intervals.append(chunk_interval)
self.num_chunks = len(self.worker_chunks)
self.current_indexes = []
self.chunk_index = 0
self.global_index = 0
self.index = 0
self.has_triggered_download = False
self.last_time = time()
return self
def _resume(self, chunks_replica: List[int], intervals_replica: List[Any]) -> None:
assert self._state_dict
assert self.worker_env
assert self.shuffler
state: Dict[str, Any] = self._state_dict
num_workers = state["num_workers"]
batch_size = state["batch_size"]
# TODO: Implement elastic sampling where the number of workers, ranks can change.
num_samples_yielded = self._state_dict["num_samples_yielded"]
# replay sampling from each worker / chunks using the batch size
workers_chunks, workers_intervals = _associate_chunks_to_workers(
num_workers, self.worker_env, chunks_replica, intervals_replica
)
indexes = _replay_sampling(num_samples_yielded, batch_size, num_workers)
chunks_index, indexes = _replay_chunks_sampling(workers_intervals, indexes)
# select the chunks and intervals associated to this worker
worker_rank = self.worker_env.rank
self.num_chunks = len(workers_intervals[worker_rank])
self.chunk_index = chunks_index[worker_rank]
self.worker_chunks = workers_chunks[worker_rank]
self.worker_intervals = workers_intervals[worker_rank]
# replay the indexes for the current chunks
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
# re-shuffle the indexes
current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
# skip any indexes already consumed
current_indexes = current_indexes[indexes[worker_rank] :]
self.current_indexes = current_indexes
self.global_index = num_samples_yielded
# bump the chunk_index
self.chunk_index += 1
def __getitem__(self, index: Union[ChunkedIndex, int]) -> Any:
if self.cache is None:
self.worker_env = _WorkerEnv.detect()
self.cache = self._create_cache(worker_env=self.worker_env)
self.shuffler = self._create_shuffler(self.cache)
if isinstance(index, int):
index = ChunkedIndex(index, self.cache._get_chunk_index_from_index(index))
return self.cache[index]
def __next__(self) -> Any:
# Prevent to create more batch on a given process
if self.global_index >= len(self):
self.current_epoch += 1
raise StopIteration
# Lazily re-populate the interval to reduce memory usage.
if len(self.current_indexes) == 0:
if self.chunk_index == self.num_chunks:
self.current_epoch += 1
raise StopIteration
# reset index
self.index = 0
interval = self.worker_intervals[self.chunk_index]
current_indexes = np.arange(interval[0], interval[1])
assert self.shuffler is not None
assert self.num_chunks is not None
self.current_indexes = self.shuffler(current_indexes, self.num_chunks, self.current_epoch, self.chunk_index)
self.chunk_index += 1
# Get the first index
index = self.current_indexes.pop(0)
# Call the `__getitem__` method.
data = self.__getitem__(
ChunkedIndex(
index=index,
chunk_index=self.worker_chunks[self.chunk_index - 1],
# We provide the chunks indexes only one the first
chunk_indexes=None if self.has_triggered_download else self.worker_chunks,
is_last_index=(self.chunk_index - 1) == len(self.worker_intervals) and len(self.current_indexes) == 1,
)
)
self.has_triggered_download = True
self.global_index += 1
self.index += 1
return data
def state_dict(self, num_samples_yielded: int, num_workers: int, batch_size: int) -> Dict[str, Any]:
if _is_in_dataloader_worker():
raise RuntimeError("The method `state_dict` should only be called in the main process.")
if self._state_dict is not None:
self._state_dict["num_samples_yielded"] = num_samples_yielded
return self._state_dict
state = {
"num_samples_yielded": num_samples_yielded,
"num_workers": num_workers,
"batch_size": batch_size,
"current_epoch": self.current_epoch,
"input_dir_path": self.input_dir.path,
"input_dir_url": self.input_dir.url,
"item_loader": self.item_loader.state_dict() if self.item_loader else None,
"drop_last": self.drop_last,
"seed": self.seed,
"world_size": self.distributed_env.world_size,
"shuffle": self.shuffle,
}
return state
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if state_dict:
# the state is restored within the workers
self._state_dict = state_dict
def _validate_state_dict(self) -> None:
assert self._state_dict
assert self.worker_env
assert self.cache
state: Dict[str, Any] = self._state_dict
if state["shuffle"] != self.shuffle:
raise ValueError(
"The provided `shuffle` state doesn't match the current one. "
f"Found `{self.shuffle}` instead of `{state['shuffle']}`."
)
if state["num_workers"] != self.worker_env.world_size:
raise ValueError(
"The provided `num_workers` state doesn't match the current one. "
f"Found `{self.worker_env.world_size}` instead of `{state['num_workers']}`."
)
# Note: We need to check whether the path has been resolved to its associated cache.
# In this case, validate the cache folder is the same.
if _should_replace_path(state["input_dir_path"]):
cache_path = _try_create_cache_dir(
input_dir=state["input_dir_path"] if state["input_dir_path"] else state["input_dir_url"]
)
if cache_path != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{cache_path}`."
)
elif state["input_dir_path"] != self.input_dir.path:
raise ValueError(
"The provided `input_dir` path state doesn't match the current one. "
f"Found `{self.input_dir.path}` instead of `{state['input_dir_path']}`."
)
if state["input_dir_url"] != self.input_dir.url:
raise ValueError(
"The provided `input_dir` URL state doesn't match the current one. "
f"Found `{self.input_dir.url}` instead of `{state['input_dir_url']}`."
)
if state["seed"] != self.seed:
raise ValueError(
"The provided `seed` state doesn't match the current one. "
f"Found `{self.seed}` instead of `{state['seed']}`."
)
if self.item_loader and state["item_loader"] != self.item_loader.state_dict():
raise ValueError(
"The provided `item_loader` state doesn't match the current one. "
f"Found `{self.item_loader.state_dict()}` instead of `{state['item_loader']}`."
)
if state["drop_last"] != self.drop_last:
raise ValueError(
"The provided `drop_last` state doesn't match the current one. "
f"Found `{self.drop_last}` instead of `{state['drop_last']}`."
)
def _try_create_cache_dir(input_dir: Optional[str]) -> Optional[str]:
hash_object = hashlib.md5((input_dir or "").encode())
if "LIGHTNING_CLUSTER_ID" not in os.environ or "LIGHTNING_CLOUD_PROJECT_ID" not in os.environ:
cache_dir = os.path.join(_DEFAULT_CACHE_DIR, hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
cache_dir = os.path.join("/cache", "chunks", hash_object.hexdigest())
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _should_replace_path(path: Optional[str]) -> bool:
"""Whether the input path is a special path to be replaced."""
if path is None or path == "":
return True
return path.startswith("/teamspace/datasets/") or path.startswith("/teamspace/s3_connections/")
def is_integer(value: str) -> bool:
try:
int(value)
return True
except Exception:
return False
def _associate_chunks_to_workers(
num_workers: int, worker_env: _WorkerEnv, chunks_replica: List[int], intervals_replica: List[Any]
) -> Any:
workers_chunks = {}
workers_intervals = {}
for worker_idx in range(num_workers):
worker_chunks = []
worker_intervals = []
for i, (chunk_index, chunk_interval) in enumerate(zip(chunks_replica, intervals_replica)):
if i % worker_env.world_size != worker_idx:
continue
worker_chunks.append(chunk_index)
worker_intervals.append(chunk_interval)
workers_chunks[worker_idx] = worker_chunks
workers_intervals[worker_idx] = worker_intervals
return workers_chunks, workers_intervals
def _replay_sampling(num_samples_yielded: int, batch_size: int, num_workers: int) -> Dict[int, int]:
"""This function replays the sampling from the dataloader."""
divisible_num_batches_yielded = num_samples_yielded // (num_workers * batch_size)
indexes = {}
for worker_idx in range(num_workers):
indexes[worker_idx] = divisible_num_batches_yielded * batch_size
num_samples_yielded = num_samples_yielded - (num_workers * divisible_num_batches_yielded * batch_size)
# take care of the reminder
worker_idx = 0 # reset the worker_idx
while True:
if num_samples_yielded >= batch_size:
indexes[worker_idx] += batch_size
worker_idx = (worker_idx + 1) % num_workers
num_samples_yielded -= batch_size
else:
indexes[worker_idx] += num_samples_yielded
break
return indexes
def _replay_chunks_sampling(
workers_intervals: Dict[int, List[Any]], indexes: Dict[int, int]
) -> Tuple[Dict[int, int], Dict[int, int]]:
chunks_index = {}
for worker_idx in range(len(workers_intervals)):
chunks_index[worker_idx] = 0
for worker_idx, intervals in workers_intervals.items():
for interval in intervals:
size = interval[-1] - interval[0]
if indexes[worker_idx] >= size:
indexes[worker_idx] -= size
chunks_index[worker_idx] += 1
return chunks_index, indexes
|
evocodebench_data_39
|
import functools
import time
from typing import List, Iterable, Tuple, Any, Optional, Callable
import pandas as pd
def measure_speed(func, *args, **kwargs):
"""
Method for measuring execution speed of the function.
"""
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
return result, end_time - start_time
def avoid_empty_result(return_index: List[int]):
"""
Decorator for avoiding empty results from the function.
When the func returns an empty result, it will return the origin results.
When the func returns a None, it will return the origin results.
When the return value is a tuple, it will check all the value or list is empty.
If so, it will return the origin results.
It keeps parameters at return_index of the function as the origin results.
:param return_index: The index of the result to be returned when there is no result.
:return: The origin results or the results from the function.
"""
def decorator_avoid_empty_result(func: Callable):
@functools.wraps(func)
def wrapper(*args, **kwargs) -> List:
func_result = func(*args, **kwargs)
if isinstance(func_result, tuple):
# if all the results are empty, return the origin results.
if all([not bool(result) for result in func_result]):
return [args[index] for index in return_index]
if not bool(func_result):
return [args[index] for index in return_index]
else:
return func_result
return wrapper
return decorator_avoid_empty_result
@avoid_empty_result([0, 3])
def filter_by_threshold(results, value, threshold, metadatas=None) -> Tuple[List, List]:
"""
Filter results by value's threshold.
:param results: The result list to be filtered.
:param value: The value list to be filtered.
It must have the same length with results.
:param threshold: The threshold value.
:param metadatas: The metadata of each result.
:return: Filtered list of results and filtered list of metadatas.
Metadatas will be returned even if you did not give input metadatas.
:rtype: Tuple[List, List]
"""
if metadatas is None:
metadatas = [None] * len(results)
assert len(results) == len(value), "results and value must have the same length."
try:
filtered_results, _, filtered_metadatas = zip(
*filter(lambda x: x[1] <= threshold, zip(results, value, metadatas)))
except ValueError:
return [], []
return list(filtered_results), list(filtered_metadatas)
def select_best_average(results: List[pd.DataFrame], columns: Iterable[str],
metadatas: Optional[List[Any]] = None) -> Tuple[pd.DataFrame, Any]:
"""
Select the best result by average value among given columns.
:param results: The list of results.
Each result must be pd.DataFrame.
:param columns: Column names to be averaged.
Standard to select the best result.
:param metadatas: The metadata of each result.
It will select one metadata with the best result.
:return: The best result and the best metadata.
The metadata will be returned even if you did not give input 'metadatas' parameter.
:rtype: Tuple[pd.DataFrame, Any]
"""
if metadatas is None:
metadatas = [None] * len(results)
assert len(results) == len(metadatas), "results and module_filename must have the same length."
assert all([isinstance(result, pd.DataFrame) for result in results]), \
"results must be pd.DataFrame."
assert all([column in result.columns for result in results for column in columns]), \
"columns must be in the columns of results."
each_average = [df[columns].mean(axis=1).mean() for df in results]
best_index = each_average.index(max(each_average))
return results[best_index], metadatas[best_index]
|
evocodebench_data_40
|
import math
from typing import Dict, Tuple
import numpy as np
def area(array: np.ndarray) -> float:
"""Shoelace formula for simple polygon area calculation.
WARNING: This formula only works for simple polygons, i.e planar polygon without self-intersection nor holes.
These conditions are not checked within this function.
Args:
array (np.ndarray): np array representing a polygon as a list of points, i.e. of shape (_, 2).
Raises:
ValueError: if the input array does not have shape (_, 2)
Returns:
float: Polygon area
References:
[1] https://en.wikipedia.org/wiki/Shoelace_formula
[2] https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
"""
if len(array.shape) != 2 or array.shape[1] != 2:
raise ValueError(f"Unable to determine the area of a polygon with shape {array.shape}. Expecting (_, 2).")
xs, ys = array.T
area = 0.5 * np.abs(np.dot(xs, np.roll(ys, 1)) - np.dot(ys, np.roll(xs, 1)))
return float(area)
def estimate_diameter(polygon: np.ndarray) -> float:
"""Estimates the diameter of an arbitrary arc by evaluating the maximum distance between any two points on the arc.
Args:
polygon (np.ndarray): Polygon points.
Returns:
float: Estimated diameter length.
Reference:
[1] https://sparrow.dev/pairwise-distance-in-numpy/
"""
return float(np.linalg.norm(polygon[:, None, :] - polygon[None, :, :], axis=-1).max())
def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:
"""Convert xs and ys cartesian coordinates to polar coordinates.
Args:
xs (np.ndarray): x values.
ys (np.ndarray): y values.
center_x (float): center's x.
center_y (float): center's y.
Returns:
Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).
"""
x_rel: np.ndarray = xs - center_x
y_rel: np.ndarray = ys - center_y
C = np.vectorize(complex)(x_rel, y_rel)
rho = np.abs(C)
phi = np.angle(C) % (2 * np.pi)
return rho, phi
def polar2cartesian(
rhos: np.ndarray, phis: np.ndarray, center_x: float, center_y: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Convert polar coordinates to cartesian coordinates.
Args:
rho (np.ndarray): rho values.
phi (np.ndarray): phi values.
center_x (float): center's x.
center_y (float): center's y.
Returns:
Tuple[np.ndarray, np.ndarray]: Converted coordinates (xs, ys).
"""
xs = center_x + rhos * np.cos(phis)
ys = center_y + rhos * np.sin(phis)
return xs, ys
def orientation(moments: Dict[str, float]) -> float:
"""Compute the main orientation of a contour or a binary image given its precomputed cv2 moments.
Args:
moments (Dict[str, float]): cv2.moments of desired the binary image or contour.
Returns:
float: Main orientation of the shape. The orientation is a float in [-pi/2, pi/2[ representing the signed angle from the x axis.
"""
# Edge case of null denominator
if (moments["mu20"] - moments["mu02"]) == 0:
if moments["mu11"] == 0:
orientation = 0.0
else:
orientation = math.copysign(np.pi / 4, moments["mu11"])
else:
# General formula
orientation = 0.5 * np.arctan(2 * moments["mu11"] / (moments["mu20"] - moments["mu02"]))
if (moments["mu20"] - moments["mu02"]) < 0:
orientation += np.pi / 2
# Restricting the angle to [-pi/2, pi/2[
orientation = np.mod(orientation + np.pi / 2, np.pi) - np.pi / 2
return orientation
def eccentricity(moments: Dict[str, float]) -> float:
r"""Compute the eccentricity of a contour or a binary image given its precomputed cv2 moments.
The eccentricity is a number in [0, 1] which caracterises the "roundness" or "linearity" of a shape.
A perfect circle will have an eccentricity of 0, and an infinite line an eccentricity of 1.
For ellipses, the eccentricity is calculated as :math:`\frac{\sqrt{a^2 - b^2}}{a^2}`
with a (resp. b) the semi-major (resp. -minor) axis of the ellipses.
For `mu20 + mu02 == 0`, i.e. perfect line, the max theoretical value (1.0) is returned
Args:
moments (Dict[str, float]): cv2.moments of desired the binary image or contour.
Returns:
eccentricity (float): the eccentricity of the contour or binary map.
Reference:
[1] https://t1.daumcdn.net/cfile/tistory/15425F4150F4EBFC19
"""
if moments["mu20"] + moments["mu02"] == 0:
return 1.0
# fmt: off
eccentricity = ((moments["mu20"] - moments["mu02"]) ** 2 + 4 * moments["mu11"] ** 2) / (moments["mu20"] + moments["mu02"]) ** 2
# fmt: on
return eccentricity
def apply_weights_1d(scores_1d: np.ndarray, weights_1d: np.ndarray) -> float:
"""Apply weights for score fusion.
Args:
scores_1d (np.ndarray): scores to be fused.
weights_1d (np.ndarray): weights.
Raises:
ValueError: if the input 1d arrays do not have the same length.
Returns:
float: fused score.
"""
if len(scores_1d) != len(weights_1d):
raise ValueError("Unable to apply weights. Dimension is different between scores and weights.")
if len(weights_1d) == 0:
raise ValueError("Unable to apply weights. Empty arrays.")
if np.sum(weights_1d) == 0:
raise ValueError("Unable to apply weights. Sum of weights is zero.")
weighted_score = np.sum(np.multiply(scores_1d, weights_1d))
return weighted_score / np.sum(weights_1d)
def polygon_length(polygon: np.ndarray, max_point_distance: int = 20) -> float:
"""Compute the length of a polygon represented as a (_, 2)-dimensionnal numpy array.
One polygon can include several disjoint arcs, which should be identified as separate so that the distance
between them is not counted. If a polygon is made of two small arc separated by a large distance, then the large
distance between the two arcs will not be discounted in the polygon's length
WARNING: The input polygon is assumed to be non-looped, i.e. if the first and last point are not equal,
which is the case for all ou GeometryPolygons. The last implicit segment looping back from the
last to the first point is therefore not included in the computed polygon length.
Args:
polygon (np.ndarray): (_, 2) - shaped numpy array representing a polygon.
max_point_distance (int): Maximum distance between two points for them to be considered part of the same arc.
Returns:
float: length of the polygon, in pixels.
"""
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise ValueError(f"This function expects a polygon, i.e. an array of shape (_, 2). Got {polygon.shape}")
inter_point_distances = np.linalg.norm(np.roll(polygon, 1, axis=0) - polygon, axis=1)
inter_point_distances = inter_point_distances[inter_point_distances < max_point_distance]
return inter_point_distances.sum()
|
evocodebench_data_41
|
import torch
from typing import Tuple, Callable, List
def matchup_channels(t: torch.Tensor, w: torch.Tensor):
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dimension
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1) # 65
return t, w
@torch.jit.script
def interpolate(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor:
"""One-dimensional linear interpolation for monotonically increasing sample
points.
Returns the one-dimensional piecewise linear interpolant to a function with
given discrete data points :math:`(xp, fp)`, evaluated at :math:`x`.
Args:
x: the :math:`x`-coordinates at which to evaluate the interpolated
values.
xp: the :math:`x`-coordinates of the data points, must be increasing.
fp: the :math:`y`-coordinates of the data points, same length as `xp`.
Returns:
the interpolated values, same size as `x`.
"""
if x.ndim == xp.ndim - 1:
x = x[None]
m = (fp[..., 1:] - fp[..., :-1]) / (xp[..., 1:] - xp[..., :-1] + 1e-8) # slope
b = fp[..., :-1] - (m * xp[..., :-1])
indices = torch.sum(torch.ge(x[..., :, None], xp[..., None, :]), -1) - 1 # torch.ge: x[i] >= xp[i] ? true: false
indices = torch.clamp(indices, 0, m.shape[-1] - 1)
return m.gather(dim=-1, index=indices) * x + b.gather(dim=-1, index=indices)
@torch.jit.script
def integrate_weights(w: torch.Tensor):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = torch.cumsum(w[..., :-1], dim=-1).clip(max=1.0)
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = torch.cat([cw.new_zeros(shape), cw, cw.new_ones(shape)], dim=-1)
return cw0
@torch.jit.script
def weighted_percentile(t: torch.Tensor, w: torch.Tensor, ps: List[float]):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
t, w = matchup_channels(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
# Vmap fn to an arbitrary number of leading dimensions.
cw_mat = cw.reshape([-1, cw.shape[-1]])
t_mat = t.reshape([-1, t.shape[-1]])
wprctile_mat = interpolate(torch.as_tensor(ps).to(t, non_blocking=True),
cw_mat,
t_mat)
wprctile = wprctile_mat.reshape(cw.shape[:-1] + (len(ps),))
return wprctile
def s_vals_to_z_vals(s: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
ig: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from s space to t space (with inverse of g)
return ig(s * g(tf) + (1 - s) * g(tn))
def z_vals_to_s_vals(t: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from t space back to s space (with function g)
return (g(t) - g(tn)) / (g(tf) - g(tn) + 1e-8)
# Hierarchical sampling (section 5.2)
def searchsorted(a: torch.Tensor, v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Find indices where v should be inserted into a to maintain order.
This behaves like jnp.searchsorted (its second output is the same as
jnp.searchsorted's output if all elements of v are in [a[0], a[-1]]) but is
faster because it wastes memory to save some compute.
Args:
a: tensor, the sorted reference points that we are scanning to see where v
should lie.
v: tensor, the query points that we are pretending to insert into a. Does
not need to be sorted. All but the last dimensions should match or expand
to those of a, the last dimension can differ.
Returns:
(idx_lo, idx_hi), where a[idx_lo] <= v < a[idx_hi], unless v is out of the
range [a[0], a[-1]] in which case idx_lo and idx_hi are both the first or
last index of a.
"""
i = torch.arange(a.shape[-1], device=a.device) # 128
v_ge_a = v[..., None, :] >= a[..., :, None]
idx_lo = torch.max(torch.where(v_ge_a, i[..., :, None], i[..., :1, None]), -2)[0] # 128
idx_hi = torch.min(torch.where(~v_ge_a, i[..., :, None], i[..., -1:, None]), -2)[0]
return idx_lo, idx_hi
def invert_cdf(u, t, w):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
# Compute the PDF and CDF for each weight vector.
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = interpolate(u, cw, t)
return t_new
def importance_sampling(t: torch.Tensor,
w: torch.Tensor,
num_samples: int,
perturb=True,
single_jitter=False,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
use_gpu_resampling: bool, If True this resamples the rays based on a
"gather" instruction, which is fast on GPUs but slow on TPUs. If False,
this resamples the rays based on brute-force searches, which is fast on
TPUs, but slow on GPUs.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dim
# preparing for size change
sh = *t.shape[:-1], num_samples # B, P, I
t = t.reshape(-1, t.shape[-1])
w = w.reshape(-1, w.shape[-1])
# assuming sampling in s space
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1)
# eps = torch.finfo(torch.float32).eps
eps = 1e-8
# Draw uniform samples.
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps if perturb else 0
d = 1 if single_jitter else num_samples
u = (
torch.linspace(0, 1 - u_max, num_samples, device=t.device, dtype=t.dtype) +
torch.rand(t.shape[:-1] + (d,), device=t.device, dtype=t.dtype) * max_jitter
)
u = invert_cdf(u, t, w)
# preparing for size change
u = u.reshape(sh)
return u
def weight_to_pdf(t: torch.Tensor, w: torch.Tensor, eps=torch.finfo(torch.float32).eps**2):
t, w = matchup_channels(t, w)
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
return w / (t[..., 1:] - t[..., :-1]).clip(eps)
def pdf_to_weight(t: torch.Tensor, p: torch.Tensor):
t, p = matchup_channels(t, p)
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
return p * (t[..., 1:] - t[..., :-1])
def max_dilate(t, w, dilation, domain=(-torch.inf, torch.inf)):
t, w = matchup_channels(t, w)
"""Dilate (via max-pooling) a non-negative step function."""
t0 = t[..., :-1] - dilation
t1 = t[..., 1:] + dilation
t_dilate = torch.sort(torch.cat([t, t0, t1], dim=-1), dim=-1)[0]
t_dilate = t_dilate.clip(*domain)
w_dilate = torch.max(
torch.where(
(t0[..., None, :] <= t_dilate[..., None])
& (t1[..., None, :] > t_dilate[..., None]),
w[..., None, :],
0,
),
dim=-1)[0][..., :-1]
return t_dilate, w_dilate
def max_dilate_weights(t: torch.Tensor,
w: torch.Tensor,
dilation: float,
domain=(-torch.inf, torch.inf),
renormalize=False,
eps=torch.finfo(torch.float32).eps**2):
"""Dilate (via max-pooling) a set of weights."""
p = weight_to_pdf(t, w)
t_dilate, p_dilate = max_dilate(t, p, dilation, domain=domain)
w_dilate = pdf_to_weight(t_dilate, p_dilate)
if renormalize:
w_dilate /= torch.sum(w_dilate, dim=-1, keepdim=True).clip(eps)
return t_dilate, w_dilate
def anneal_weights(t: torch.Tensor,
w: torch.Tensor,
train_frac: float,
anneal_slope: float = 10.0,
eps=torch.finfo(torch.float32).eps ** 2):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
# Optionally anneal the weights as a function of training iteration.
if anneal_slope > 0:
# Schlick's bias function, see https://arxiv.org/abs/2010.09714
def bias(x, s): return (s * x) / ((s - 1) * x + 1)
anneal = bias(train_frac, anneal_slope)
else:
anneal = 1.
# A slightly more stable way to compute weights**anneal. If the distance
# between adjacent intervals is zero then its weight is fixed to 0.
logits_resample = torch.where(
t[..., 1:] > t[..., :-1],
anneal * torch.log(w.clip(eps)), -torch.inf) # MARK: prone to nan
# If all samples are -inf, softmax will produce a nan (all -torch.inf)
w = torch.softmax(logits_resample, dim=-1)
return w
def query(tq, t, y, outside_value=0):
"""Look up the values of the step function (t, y) at locations tq."""
idx_lo, idx_hi = searchsorted(t, tq)
yq = torch.where(idx_lo == idx_hi, outside_value,
torch.take_along_dim(torch.cat([y, torch.full_like(y[..., :1], outside_value)], dim=-1), idx_lo, dim=-1)) # ?
return yq
|
evocodebench_data_42
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Camera pose and ray generation utility functions."""
import enum
import functools
import types
from typing import Final, List, Mapping, Optional, Text, Tuple, TypeAlias
from absl import logging
import chex
from internal import configs
from internal import geometry
from internal import math
from internal import rigid_body
from internal import spin_math
from internal import stepfun
from internal import utils
import jax
from jax import random
import jax.numpy as jnp
import jaxcam
import numpy as np
import scipy
_Array: TypeAlias = np.ndarray | jnp.ndarray
_ScalarArray: TypeAlias = float | _Array
_IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD: Final[float] = 0.95
def convert_to_ndc(
origins,
directions,
pixtocam,
near = 1.0,
xnp = np,
):
"""Converts a set of rays to normalized device coordinates (NDC).
Args:
origins: ndarray(float32), [..., 3], world space ray origins.
directions: ndarray(float32), [..., 3], world space ray directions.
pixtocam: ndarray(float32), [3, 3], inverse intrinsic matrix.
near: float, near plane along the negative z axis.
xnp: either numpy or jax.numpy.
Returns:
origins_ndc: ndarray(float32), [..., 3].
directions_ndc: ndarray(float32), [..., 3].
This function assumes input rays should be mapped into the NDC space for a
perspective projection pinhole camera, with identity extrinsic matrix (pose)
and intrinsic parameters defined by inputs focal, width, and height.
The near value specifies the near plane of the frustum, and the far plane is
assumed to be infinity.
The ray bundle for the identity pose camera will be remapped to parallel rays
within the (-1, -1, -1) to (1, 1, 1) cube. Any other ray in the original
world space can be remapped as long as it has dz < 0 (ray direction has a
negative z-coord); this allows us to share a common NDC space for "forward
facing" scenes.
Note that
projection(origins + t * directions)
will NOT be equal to
origins_ndc + t * directions_ndc
and that the directions_ndc are not unit length. Rather, directions_ndc is
defined such that the valid near and far planes in NDC will be 0 and 1.
See Appendix C in https://arxiv.org/abs/2003.08934 for additional details.
"""
# Shift ray origins to near plane, such that oz = -near.
# This makes the new near bound equal to 0.
t = -(near + origins[Ellipsis, 2]) / directions[Ellipsis, 2]
origins = origins + t[Ellipsis, None] * directions
dx, dy, dz = xnp.moveaxis(directions, -1, 0)
ox, oy, oz = xnp.moveaxis(origins, -1, 0)
xmult = 1.0 / pixtocam[0, 2] # Equal to -2. * focal / cx
ymult = 1.0 / pixtocam[1, 2] # Equal to -2. * focal / cy
# Perspective projection into NDC for the t = 0 near points
# origins + 0 * directions
origins_ndc = xnp.stack(
[xmult * ox / oz, ymult * oy / oz, -xnp.ones_like(oz)], axis=-1
)
# Perspective projection into NDC for the t = infinity far points
# origins + infinity * directions
infinity_ndc = xnp.stack(
[xmult * dx / dz, ymult * dy / dz, xnp.ones_like(oz)], axis=-1
)
# directions_ndc points from origins_ndc to infinity_ndc
directions_ndc = infinity_ndc - origins_ndc
return origins_ndc, directions_ndc
def pad_poses(p):
"""Pad [..., 3, 4] pose matrices with a homogeneous bottom row [0,0,0,1]."""
bottom = np.broadcast_to([0, 0, 0, 1.0], p[Ellipsis, :1, :4].shape)
return np.concatenate([p[Ellipsis, :3, :4], bottom], axis=-2)
def unpad_poses(p):
"""Remove the homogeneous bottom row from [..., 4, 4] pose matrices."""
return p[Ellipsis, :3, :4]
def recenter_poses(poses):
"""Recenter poses around the origin."""
cam2world = average_pose(poses)
transform = np.linalg.inv(pad_poses(cam2world))
poses = transform @ pad_poses(poses)
return unpad_poses(poses), transform
def average_pose(poses, lock_up = False):
"""New pose using average position, z-axis, and up vector of input poses."""
position = poses[:, :3, 3].mean(0)
z_axis = poses[:, :3, 2].mean(0)
up = poses[:, :3, 1].mean(0)
cam2world = viewmatrix(z_axis, up, position, lock_up=lock_up)
return cam2world
def viewmatrix(
lookdir,
up,
position,
lock_up = False,
):
"""Construct lookat view matrix."""
orthogonal_dir = lambda a, b: normalize(np.cross(a, b))
vecs = [None, normalize(up), normalize(lookdir)]
# x-axis is always the normalized cross product of `lookdir` and `up`.
vecs[0] = orthogonal_dir(vecs[1], vecs[2])
# Default is to lock `lookdir` vector, if lock_up is True lock `up` instead.
ax = 2 if lock_up else 1
# Set the not-locked axis to be orthogonal to the other two.
vecs[ax] = orthogonal_dir(vecs[(ax + 1) % 3], vecs[(ax + 2) % 3])
m = np.stack(vecs + [position], axis=1)
return m
def rotation_about_axis(degrees, axis=0):
"""Creates rotation matrix about one of the coordinate axes."""
radians = degrees / 180.0 * np.pi
rot2x2 = np.array(
[[np.cos(radians), -np.sin(radians)], [np.sin(radians), np.cos(radians)]]
)
r = np.eye(3)
r[1:3, 1:3] = rot2x2
r = np.roll(np.roll(r, axis, axis=0), axis, axis=1)
p = np.eye(4)
p[:3, :3] = r
return p
def normalize(x):
"""Normalization helper function."""
return x / np.linalg.norm(x)
def focus_point_fn(poses, xnp = np):
"""Calculate nearest point to all focal axes in poses."""
directions, origins = poses[:, :3, 2:3], poses[:, :3, 3:4]
m = xnp.eye(3) - directions * xnp.transpose(directions, [0, 2, 1])
mt_m = xnp.transpose(m, [0, 2, 1]) @ m
focus_pt = xnp.linalg.inv(mt_m.mean(0)) @ (mt_m @ origins).mean(0)[:, 0]
return focus_pt
# Constants for generate_spiral_path():
NEAR_STRETCH = 0.9 # Push forward near bound for forward facing render path.
FAR_STRETCH = 5.0 # Push back far bound for forward facing render path.
FOCUS_DISTANCE = 0.75 # Relative weighting of near, far bounds for render path.
def generate_spiral_path(
poses,
bounds,
n_frames = 120,
n_rots = 2,
zrate = 0.5,
):
"""Calculates a forward facing spiral path for rendering."""
# Find a reasonable 'focus depth' for this dataset as a weighted average
# of conservative near and far bounds in disparity space.
near_bound = bounds.min() * NEAR_STRETCH
far_bound = bounds.max() * FAR_STRETCH
# All cameras will point towards the world space point (0, 0, -focal).
focal = 1 / (((1 - FOCUS_DISTANCE) / near_bound + FOCUS_DISTANCE / far_bound))
# Get radii for spiral path using 90th percentile of camera positions.
positions = poses[:, :3, 3]
radii = np.percentile(np.abs(positions), 90, 0)
radii = np.concatenate([radii, [1.0]])
# Generate poses for spiral path.
render_poses = []
cam2world = average_pose(poses)
up = poses[:, :3, 1].mean(0)
for theta in np.linspace(0.0, 2.0 * np.pi * n_rots, n_frames, endpoint=False):
t = radii * [np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0]
position = cam2world @ t
lookat = cam2world @ [0, 0, -focal, 1.0]
z_axis = position - lookat
render_poses.append(viewmatrix(z_axis, up, position))
render_poses = np.stack(render_poses, axis=0)
return render_poses
def transform_poses_pca(poses):
"""Transforms poses so principal components lie on XYZ axes.
Args:
poses: a (N, 3, 4) array containing the cameras' camera to world transforms.
Returns:
A tuple (poses, transform), with the transformed poses and the applied
camera_to_world transforms.
"""
t = poses[:, :3, 3]
t_mean = t.mean(axis=0)
t = t - t_mean
eigval, eigvec = np.linalg.eig(t.T @ t)
# Sort eigenvectors in order of largest to smallest eigenvalue.
inds = np.argsort(eigval)[::-1]
eigvec = eigvec[:, inds]
rot = eigvec.T
if np.linalg.det(rot) < 0:
rot = np.diag(np.array([1, 1, -1])) @ rot
transform = np.concatenate([rot, rot @ -t_mean[:, None]], -1)
poses_recentered = unpad_poses(transform @ pad_poses(poses))
transform = np.concatenate([transform, np.eye(4)[3:]], axis=0)
# Flip coordinate system if z component of y-axis is negative
if poses_recentered.mean(axis=0)[2, 1] < 0:
poses_recentered = np.diag(np.array([1, -1, -1])) @ poses_recentered
transform = np.diag(np.array([1, -1, -1, 1])) @ transform
# Just make sure it's it in the [-1, 1]^3 cube
scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3]))
poses_recentered[:, :3, 3] *= scale_factor
transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform
return poses_recentered, transform
def transform_poses_focus(poses):
"""Transforms poses so that the "focus point" of capture is at the origin.
Args:
poses: a (N, 3, 4) array containing the cameras' camera to world transforms.
Returns:
A tuple (poses, transform), with the transformed poses and the applied
camera_to_world transforms.
"""
# Move the focus point to the origin.
focus_point = focus_point_fn(poses)
# Use average up vector as the Z axis.
swap_y_z = np.array([
[1, 0, 0],
[0, 0, 1],
[0, -1, 0.0],
])
rot = average_pose(poses, lock_up=True)[:3, :3] @ swap_y_z
transform = np.concatenate([rot.T, rot.T @ -focus_point[:, None]], -1)
poses_recentered = transform @ pad_poses(poses)
transform = np.concatenate([transform, np.eye(4)[3:]], axis=0)
# Just make sure it's it in the [-1, 1]^3 cube
scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3]))
poses_recentered[:, :3, 3] *= scale_factor
transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform
return poses_recentered, transform
def generate_ellipse_path(
poses,
n_frames = 120,
const_speed = True,
z_variation = 0.0,
z_phase = 0.0,
rad_mult_min = 1.0,
rad_mult_max = 1.0,
render_rotate_xaxis = 0.0,
render_rotate_yaxis = 0.0,
use_avg_z_height = False,
z_height_percentile = None,
lock_up = False,
):
"""Generate an elliptical render path based on the given poses."""
# Calculate the focal point for the path (cameras point toward this).
center = focus_point_fn(poses)
# Default path height sits at z=0 (in middle of zero-mean capture pattern).
xy_offset = center[:2]
# Calculate lengths for ellipse axes based on input camera positions.
xy_radii = np.percentile(np.abs(poses[:, :2, 3] - xy_offset), 90, axis=0)
# Use ellipse that is symmetric about the focal point in xy.
xy_low = xy_offset - xy_radii
xy_high = xy_offset + xy_radii
# Optional height variation, need not be symmetric.
z_min = np.percentile((poses[:, 2, 3]), 10, axis=0)
z_max = np.percentile((poses[:, 2, 3]), 90, axis=0)
if use_avg_z_height or z_height_percentile is not None:
# Center the path vertically around the average camera height, good for
# datasets recentered by transform_poses_focus function.
if z_height_percentile is None:
z_init = poses[:, 2, 3].mean(axis=0)
else:
z_init = np.percentile(poses[:, 2, 3], z_height_percentile, axis=0)
else:
# Center the path at zero, good for datasets recentered by
# transform_poses_pca function.
z_init = 0
z_low = z_init + z_variation * (z_min - z_init)
z_high = z_init + z_variation * (z_max - z_init)
xyz_low = np.array([*xy_low, z_low])
xyz_high = np.array([*xy_high, z_high])
def get_positions(theta):
# Interpolate between bounds with trig functions to get ellipse in x-y.
# Optionally also interpolate in z to change camera height along path.
t_x = np.cos(theta) * 0.5 + 0.5
t_y = np.sin(theta) * 0.5 + 0.5
t_z = np.cos(theta + 2 * np.pi * z_phase) * 0.5 + 0.5
t_xyz = np.stack([t_x, t_y, t_z], axis=-1)
positions = xyz_low + t_xyz * (xyz_high - xyz_low)
# Interpolate between min and max radius multipliers so the camera zooms in
# and out of the scene center.
t = np.sin(theta) * 0.5 + 0.5
rad_mult = rad_mult_min + (rad_mult_max - rad_mult_min) * t
positions = center + (positions - center) * rad_mult[:, None]
return positions
theta = np.linspace(0, 2.0 * np.pi, n_frames + 1, endpoint=True)
positions = get_positions(theta)
if const_speed:
# Resample theta angles so that the velocity is closer to constant.
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
theta = stepfun.sample(None, theta, np.log(lengths), n_frames + 1)
positions = get_positions(theta)
# Throw away duplicated last position.
positions = positions[:-1]
# Set path's up vector to axis closest to average of input pose up vectors.
avg_up = poses[:, :3, 1].mean(0)
avg_up = avg_up / np.linalg.norm(avg_up)
ind_up = np.argmax(np.abs(avg_up))
up = np.eye(3)[ind_up] * np.sign(avg_up[ind_up])
poses = np.stack([viewmatrix(p - center, up, p, lock_up) for p in positions])
poses = poses @ rotation_about_axis(-render_rotate_yaxis, axis=1)
poses = poses @ rotation_about_axis(render_rotate_xaxis, axis=0)
return poses
def generate_interpolated_path(
poses,
n_interp,
spline_degree = 5,
smoothness = 0.03,
rot_weight = 0.1,
lock_up = False,
fixed_up_vector = None,
lookahead_i = None,
frames_per_colmap = None,
const_speed = False,
n_buffer = None,
periodic = False,
n_interp_as_total = False,
):
"""Creates a smooth spline path between input keyframe camera poses.
Spline is calculated with poses in format (position, lookat-point, up-point).
Args:
poses: (n, 3, 4) array of input pose keyframes.
n_interp: returned path will have n_interp * (n - 1) total poses.
spline_degree: polynomial degree of B-spline.
smoothness: parameter for spline smoothing, 0 forces exact interpolation.
rot_weight: relative weighting of rotation/translation in spline solve.
lock_up: if True, forced to use given Up and allow Lookat to vary.
fixed_up_vector: replace the interpolated `up` with a fixed vector.
lookahead_i: force the look direction to look at the pose `i` frames ahead.
frames_per_colmap: conversion factor for the desired average velocity.
const_speed: renormalize spline to have constant delta between each pose.
n_buffer: Number of buffer frames to insert at the start and end of the
path. Helps keep the ends of a spline path straight.
periodic: make the spline path periodic (perfect loop).
n_interp_as_total: use n_interp as total number of poses in path rather than
the number of poses to interpolate between each input.
Returns:
Array of new camera poses with shape (n_interp * (n - 1), 3, 4), or
(n_interp, 3, 4) if n_interp_as_total is set.
"""
def poses_to_points(poses, dist):
"""Converts from pose matrices to (position, lookat, up) format."""
pos = poses[:, :3, -1]
lookat = poses[:, :3, -1] - dist * poses[:, :3, 2]
up = poses[:, :3, -1] + dist * poses[:, :3, 1]
return np.stack([pos, lookat, up], 1)
def points_to_poses(points):
"""Converts from (position, lookat, up) format to pose matrices."""
poses = []
for i in range(len(points)):
pos, lookat_point, up_point = points[i]
if lookahead_i is not None:
if i + lookahead_i < len(points):
lookat = pos - points[i + lookahead_i][0]
else:
lookat = pos - lookat_point
up = (up_point - pos) if fixed_up_vector is None else fixed_up_vector
poses.append(viewmatrix(lookat, up, pos, lock_up=lock_up))
return np.array(poses)
def insert_buffer_poses(poses, n_buffer):
"""Insert extra poses at the start and end of the path."""
def average_distance(points):
distances = np.linalg.norm(points[1:] - points[0:-1], axis=-1)
return np.mean(distances)
def shift(pose, dz):
result = np.copy(pose)
z = result[:3, 2]
z /= np.linalg.norm(z)
# Move along forward-backward axis. -z is forward.
result[:3, 3] += z * dz
return result
dz = average_distance(poses[:, :3, 3])
prefix = np.stack([shift(poses[0], (i + 1) * dz) for i in range(n_buffer)])
prefix = prefix[::-1] # reverse order
suffix = np.stack(
[shift(poses[-1], -(i + 1) * dz) for i in range(n_buffer)]
)
result = np.concatenate([prefix, poses, suffix])
return result
def remove_buffer_poses(poses, u, n_frames, u_keyframes, n_buffer):
u_keyframes = u_keyframes[n_buffer:-n_buffer]
mask = (u >= u_keyframes[0]) & (u <= u_keyframes[-1])
poses = poses[mask]
u = u[mask]
n_frames = len(poses)
return poses, u, n_frames, u_keyframes
def interp(points, u, k, s):
"""Runs multidimensional B-spline interpolation on the input points."""
sh = points.shape
pts = np.reshape(points, (sh[0], -1))
k = min(k, sh[0] - 1)
tck, u_keyframes = scipy.interpolate.splprep(pts.T, k=k, s=s, per=periodic)
new_points = np.array(scipy.interpolate.splev(u, tck))
new_points = np.reshape(new_points.T, (len(u), sh[1], sh[2]))
return new_points, u_keyframes
if n_buffer is not None:
poses = insert_buffer_poses(poses, n_buffer)
points = poses_to_points(poses, dist=rot_weight)
if n_interp_as_total:
n_frames = n_interp + 1 # Add extra since final pose is discarded.
else:
n_frames = n_interp * (points.shape[0] - 1)
u = np.linspace(0, 1, n_frames, endpoint=True)
new_points, u_keyframes = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
if n_buffer is not None:
poses, u, n_frames, u_keyframes = remove_buffer_poses(
poses, u, n_frames, u_keyframes, n_buffer
)
if frames_per_colmap is not None:
# Recalculate the number of frames to achieve desired average velocity.
positions = poses[:, :3, -1]
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
total_length_colmap = lengths.sum()
print('old n_frames:', n_frames)
print('total_length_colmap:', total_length_colmap)
n_frames = int(total_length_colmap * frames_per_colmap)
print('new n_frames:', n_frames)
u = np.linspace(
np.min(u_keyframes), np.max(u_keyframes), n_frames, endpoint=True
)
new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
if const_speed:
# Resample timesteps so that the velocity is nearly constant.
positions = poses[:, :3, -1]
lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
u = stepfun.sample(None, u, np.log(lengths), n_frames + 1)
new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness)
poses = points_to_poses(new_points)
return poses[:-1], u[:-1], u_keyframes
def safe_interpolate_1d(
x,
spline_degree,
smoothness,
t_input,
t_output,
):
"""Interpolate 1d signal x (defined at t_input and queried at t_output)."""
# TODO(bmild): switch interpolation t values to match those chosen for path.
# One needs at least n=k+1 points to fit a polynomial of degree k to n points.
n = len(x)
spline_degree = min(spline_degree, n - 1)
if spline_degree > 0:
tck = scipy.interpolate.splrep(t_input, x, s=smoothness, k=spline_degree)
return scipy.interpolate.splev(t_output, tck).astype(x.dtype)
else: # n = 0 or 1
fill_value = x[0] if n else 0.0
return np.full(t_output.shape, fill_value, dtype=x.dtype)
def identify_file_names(dir_or_text_file):
"""Load filenames from text file or directory."""
if utils.isdir(dir_or_text_file):
# If `dir_or_text_file` is a directory, grab the filenames.
subset_names = sorted(utils.listdir(dir_or_text_file))
else:
# If `dir_or_text_file` is a text file, treat each line as a filename.
with utils.open_file(dir_or_text_file, 'r') as fp:
names = fp.read()
if isinstance(names, bytes):
names = names.decode('utf-8')
# Decode bytes into string and split into lines.
subset_names = names.splitlines()
return subset_names
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
def get_meters_per_colmap_from_calibration_images(
config, poses, image_names
):
"""Uses calibration images to get how many meters is a single COLMAP unit."""
# By default, the input camera poses are scaled to fit in the [-1, 1]^3 cube.
# This default value implies a scaling of 2 / .25 = 8 meters between the
# farthest apart camera poses.
meters_per_colmap = 8.0
if config.render_calibration_keyframes is not None:
# Use provided calibration keyframes to determine metric world scale.
calib_names = identify_file_names(config.render_calibration_keyframes)
indices = []
for i in range(0, len(calib_names), 2):
# Grab pairs of calibration images filenames.
name0, name1 = calib_names[i : i + 2]
# Check if both are in the set of colmap-posed images.
if name0 in image_names and name1 in image_names:
indices.append((image_names.index(name0), image_names.index(name1)))
if indices:
# Extract colmap-space positions from the camera pose matrices.
positions = poses[indices][Ellipsis, :3, -1]
# Every pair of calibration keyframes should have world space distance
# `render_calibration_distance` according to the capture handbook.
colmap_lengths = np.linalg.norm(
positions[:, 0] - positions[:, 1], axis=-1
)
colmap_length = colmap_lengths.mean(axis=0)
# Ratio of world distance to colmap distance.
meters_per_colmap = config.render_calibration_distance / colmap_length
print('colmap lengths', colmap_lengths)
print('avg', colmap_length)
print('meters_per_colmap', meters_per_colmap)
return meters_per_colmap
def calibrate_spline_speed(
config, poses, image_names
):
"""Uses input config to determine a conversion factor for the spline speed."""
if config.render_spline_meters_per_sec is None:
return None
meters_per_colmap = get_meters_per_colmap_from_calibration_images(
config, poses, image_names
)
meters_per_sec = config.render_spline_meters_per_sec
frames_per_sec = config.render_video_fps
frames_per_colmap = meters_per_colmap / meters_per_sec * frames_per_sec
print('returning frames_per_colmap', frames_per_colmap)
return frames_per_colmap
def create_render_spline_path(
config,
image_names,
poses,
exposures,
):
"""Creates spline interpolation render path from subset of dataset poses.
Args:
config: configs.Config object.
image_names: a list of image filenames.
poses: [N, 3, 4] array of extrinsic camera pose matrices.
exposures: optional list of floating point exposure values.
Returns:
spline_indices: list of indices used to select spline keyframe poses.
render_poses: array of interpolated extrinsic camera poses for the path.
render_exposures: optional list of interpolated exposures for the path.
"""
def remove_outlier_spline_indices(
spline_indices, poses, q_max, q_mult
):
"""Identify spline indices correspond to inlier poses."""
poses = poses[spline_indices]
points = poses[:, :3, -1]
distances = np.linalg.norm(points[1:] - points[:-1], axis=-1)
mask = distances < q_mult * np.quantile(distances, q_max)
mask = np.concatenate([mask, [True]], axis=0) # Keep the last pose.
num_inliers = int(np.sum(mask))
num_total = len(spline_indices)
print(
f'remove_outlier_spline_indices: {num_inliers}/{num_total} spline '
'path poses remaining after outlier removal.'
)
return spline_indices[mask]
# Grab poses corresponding to the image filenames.
spline_indices = identify_file_indices(
config.render_spline_keyframes, image_names
)
if (
config.render_spline_outlier_keyframe_quantile is not None
and config.render_spline_outlier_keyframe_multiplier is not None
):
spline_indices = remove_outlier_spline_indices(
spline_indices,
poses,
q_max=config.render_spline_outlier_keyframe_quantile,
q_mult=config.render_spline_outlier_keyframe_multiplier,
)
keyframes = poses[spline_indices]
frames_per_colmap = calibrate_spline_speed(config, poses, image_names)
if config.render_spline_fixed_up:
# Fix path to use world-space "up" vector instead of "banking" with spline.
all_up_vectors = poses[:, :3, 1] # second column of pose matrix is up.
fixed_up_vector = normalize(all_up_vectors.mean(axis=0))
else:
fixed_up_vector = None
render_poses, frame_timesteps, keyframe_timesteps = (
generate_interpolated_path(
keyframes,
n_interp=config.render_spline_n_interp,
spline_degree=config.render_spline_degree,
smoothness=config.render_spline_smoothness,
rot_weight=config.render_spline_rot_weight,
lock_up=config.render_spline_lock_up,
fixed_up_vector=fixed_up_vector,
lookahead_i=config.render_spline_lookahead_i,
frames_per_colmap=frames_per_colmap,
const_speed=config.render_spline_const_speed,
n_buffer=config.render_spline_n_buffer,
)
)
if config.render_spline_interpolate_exposure:
if exposures is None:
raise ValueError(
'config.render_spline_interpolate_exposure is True but '
'create_render_spline_path() was passed exposures=None.'
)
# Interpolate per-frame exposure value.
log_exposure = np.log(exposures[spline_indices])
# Use aggressive smoothing for exposure interpolation to avoid flickering.
log_exposure_interp = safe_interpolate_1d(
log_exposure,
spline_degree=5,
smoothness=config.render_spline_interpolate_exposure_smoothness,
t_input=keyframe_timesteps,
t_output=frame_timesteps,
)
render_exposures = np.exp(log_exposure_interp)
else:
render_exposures = None
return spline_indices, render_poses, render_exposures
def intrinsic_matrix(
fx,
fy,
cx,
cy,
xnp = np,
):
"""Intrinsic matrix for a pinhole camera in OpenCV coordinate system."""
return xnp.array([
[fx, 0, cx],
[0, fy, cy],
[0, 0, 1.0],
])
def get_pixtocam(
focal,
width,
height,
xnp = np,
):
"""Inverse intrinsic matrix for a perfect pinhole camera."""
camtopix = intrinsic_matrix(focal, focal, width * 0.5, height * 0.5, xnp)
return xnp.linalg.inv(camtopix)
def pixel_coordinates(
width, height, xnp = np
):
"""Tuple of the x and y integer coordinates for a grid of pixels."""
return xnp.meshgrid(xnp.arange(width), xnp.arange(height), indexing='xy')
def _radial_and_tangential_distort(
x,
y,
k1 = 0,
k2 = 0,
k3 = 0,
k4 = 0,
p1 = 0,
p2 = 0,
):
"""Computes the distorted pixel positions."""
r2 = x * x + y * y
radial_distortion = r2 * (k1 + r2 * (k2 + r2 * (k3 + r2 * k4)))
dx_radial = x * radial_distortion
dy_radial = y * radial_distortion
dx_tangential = 2 * p1 * x * y + p2 * (r2 + 2 * x * x)
dy_tangential = 2 * p2 * x * y + p1 * (r2 + 2 * y * y)
return x + dx_radial + dx_tangential, y + dy_radial + dy_tangential
def _compute_residual_and_jacobian(
x,
y,
xd,
yd,
k1 = 0.0,
k2 = 0.0,
k3 = 0.0,
k4 = 0.0,
p1 = 0.0,
p2 = 0.0,
):
"""Auxiliary function of radial_and_tangential_undistort()."""
# Adapted from https://github.com/google/nerfies/blob/main/nerfies/camera.py
# let r(x, y) = x^2 + y^2;
# d(x, y) = 1 + k1 * r(x, y) + k2 * r(x, y) ^2 + k3 * r(x, y)^3 +
# k4 * r(x, y)^4;
r = x * x + y * y
d = 1.0 + r * (k1 + r * (k2 + r * (k3 + r * k4)))
# The perfect projection is:
# xd = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2);
# yd = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2);
#
# Let's define
#
# fx(x, y) = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2) - xd;
# fy(x, y) = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2) - yd;
#
# We are looking for a solution that satisfies
# fx(x, y) = fy(x, y) = 0;
fx = d * x + 2 * p1 * x * y + p2 * (r + 2 * x * x) - xd
fy = d * y + 2 * p2 * x * y + p1 * (r + 2 * y * y) - yd
# Compute derivative of d over [x, y]
d_r = k1 + r * (2.0 * k2 + r * (3.0 * k3 + r * 4.0 * k4))
d_x = 2.0 * x * d_r
d_y = 2.0 * y * d_r
# Compute derivative of fx over x and y.
fx_x = d + d_x * x + 2.0 * p1 * y + 6.0 * p2 * x
fx_y = d_y * x + 2.0 * p1 * x + 2.0 * p2 * y
# Compute derivative of fy over x and y.
fy_x = d_x * y + 2.0 * p2 * y + 2.0 * p1 * x
fy_y = d + d_y * y + 2.0 * p2 * x + 6.0 * p1 * y
return fx, fy, fx_x, fx_y, fy_x, fy_y
def _radial_and_tangential_undistort(
xd,
yd,
k1 = 0,
k2 = 0,
k3 = 0,
k4 = 0,
p1 = 0,
p2 = 0,
eps = 1e-9,
max_iterations=10,
xnp = np,
):
"""Computes undistorted (x, y) from (xd, yd)."""
# From https://github.com/google/nerfies/blob/main/nerfies/camera.py
# Initialize from the distorted point.
x = xnp.copy(xd)
y = xnp.copy(yd)
for _ in range(max_iterations):
fx, fy, fx_x, fx_y, fy_x, fy_y = _compute_residual_and_jacobian(
x=x, y=y, xd=xd, yd=yd, k1=k1, k2=k2, k3=k3, k4=k4, p1=p1, p2=p2
)
denominator = fy_x * fx_y - fx_x * fy_y
x_numerator = fx * fy_y - fy * fx_y
y_numerator = fy * fx_x - fx * fy_x
step_x = xnp.where(
xnp.abs(denominator) > eps,
x_numerator / denominator,
xnp.zeros_like(denominator),
)
step_y = xnp.where(
xnp.abs(denominator) > eps,
y_numerator / denominator,
xnp.zeros_like(denominator),
)
x = x + step_x
y = y + step_y
return x, y
class ProjectionType(enum.Enum):
"""Camera projection type (perspective pinhole, fisheye, or 360 pano)."""
PERSPECTIVE = 'perspective'
FISHEYE = 'fisheye'
PANORAMIC = 'pano'
def pixels_to_rays(
pix_x_int,
pix_y_int,
pixtocams,
camtoworlds,
distortion_params = None,
pixtocam_ndc = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Calculates rays given pixel coordinates, intrinisics, and extrinsics.
Given 2D pixel coordinates pix_x_int, pix_y_int for cameras with
inverse intrinsics pixtocams and extrinsics camtoworlds (and optional
distortion coefficients distortion_params and NDC space projection matrix
pixtocam_ndc), computes the corresponding 3D camera rays.
Vectorized over the leading dimensions of the first four arguments.
Args:
pix_x_int: int array, shape SH, x coordinates of image pixels.
pix_y_int: int array, shape SH, y coordinates of image pixels.
pixtocams: float array, broadcastable to SH + [3, 3], inverse intrinsics.
camtoworlds: float array, broadcastable to SH + [3, 4], camera extrinsics.
distortion_params: dict of floats, optional camera distortion parameters.
pixtocam_ndc: float array, [3, 3], optional inverse intrinsics for NDC.
camtype: camera_utils.ProjectionType, fisheye or perspective camera.
xnp: either numpy or jax.numpy.
Returns:
origins: float array, shape SH + [3], ray origin points.
directions: float array, shape SH + [3], ray direction vectors.
viewdirs: float array, shape SH + [3], normalized ray direction vectors.
radii: float array, shape SH + [1], ray differential radii.
imageplane: float array, shape SH + [2], xy coordinates on the image plane.
If the image plane is at world space distance 1 from the pinhole, then
imageplane will be the xy coordinates of a pixel in that space (so the
camera ray direction at the origin would be (x, y, -1) in OpenGL coords).
"""
# Must add half pixel offset to shoot rays through pixel centers.
def pix_to_dir(x, y):
return xnp.stack([x + 0.5, y + 0.5, xnp.ones_like(x)], axis=-1)
# We need the dx and dy rays to calculate ray radii for mip-NeRF cones.
pixel_dirs_stacked = xnp.stack(
[
pix_to_dir(pix_x_int, pix_y_int),
pix_to_dir(pix_x_int + 1, pix_y_int),
pix_to_dir(pix_x_int, pix_y_int + 1),
],
axis=0,
)
# For jax, need to specify high-precision matmul.
matmul = math.matmul if xnp == jnp else xnp.matmul
mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0]
# Apply inverse intrinsic matrices.
camera_dirs_stacked = mat_vec_mul(pixtocams, pixel_dirs_stacked)
if distortion_params is not None:
# Correct for distortion.
x, y = _radial_and_tangential_undistort(
camera_dirs_stacked[Ellipsis, 0],
camera_dirs_stacked[Ellipsis, 1],
**distortion_params,
xnp=xnp,
)
camera_dirs_stacked = xnp.stack([x, y, xnp.ones_like(x)], -1)
if camtype == ProjectionType.FISHEYE:
theta = xnp.sqrt(xnp.sum(xnp.square(camera_dirs_stacked[Ellipsis, :2]), axis=-1))
theta = xnp.minimum(xnp.pi, theta)
sin_theta_over_theta = xnp.sin(theta) / theta
camera_dirs_stacked = xnp.stack(
[
camera_dirs_stacked[Ellipsis, 0] * sin_theta_over_theta,
camera_dirs_stacked[Ellipsis, 1] * sin_theta_over_theta,
xnp.cos(theta),
],
axis=-1,
)
elif camtype == ProjectionType.PANORAMIC:
theta = camera_dirs_stacked[Ellipsis, 0]
phi = camera_dirs_stacked[Ellipsis, 1]
# Negation on y and z components accounts for expected OpenCV convention.
camera_dirs_stacked = xnp.stack(
[
-xnp.sin(phi) * xnp.sin(theta),
-xnp.cos(phi),
-xnp.sin(phi) * xnp.cos(theta),
],
axis=-1,
)
# Flip from OpenCV to OpenGL coordinate system.
camera_dirs_stacked = matmul(
camera_dirs_stacked, xnp.diag(xnp.array([1.0, -1.0, -1.0]))
)
# Extract 2D image plane (x, y) coordinates.
imageplane = camera_dirs_stacked[0, Ellipsis, :2]
# Apply camera rotation matrices.
directions_stacked = mat_vec_mul(
camtoworlds[Ellipsis, :3, :3], camera_dirs_stacked
)
# Extract the offset rays.
directions, dx, dy = directions_stacked
origins = xnp.broadcast_to(camtoworlds[Ellipsis, :3, -1], directions.shape)
viewdirs = directions / xnp.linalg.norm(directions, axis=-1, keepdims=True)
if pixtocam_ndc is None:
# Distance from each unit-norm direction vector to its neighbors.
dx_norm = xnp.linalg.norm(dx - directions, axis=-1)
dy_norm = xnp.linalg.norm(dy - directions, axis=-1)
else:
# Convert ray origins and directions into projective NDC space.
ndc_fn = functools.partial(convert_to_ndc, pixtocam=pixtocam_ndc, xnp=xnp)
origins_dx, _ = ndc_fn(origins, dx)
origins_dy, _ = ndc_fn(origins, dy)
origins, directions = ndc_fn(origins, directions)
# In NDC space, we use the offset between origins instead of directions.
dx_norm = xnp.linalg.norm(origins_dx - origins, axis=-1)
dy_norm = xnp.linalg.norm(origins_dy - origins, axis=-1)
# Cut the distance in half, multiply it to match the variance of a uniform
# distribution the size of a pixel (1/12, see paper).
# TODO(barron): Add a unit test that this is correct.
radii = (0.5 * (dx_norm + dy_norm))[Ellipsis, None] * 2 / xnp.sqrt(12)
return origins, directions, viewdirs, radii, imageplane
def points_to_pixels(
points,
pixtocams,
camtoworlds,
distortion_params = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Calculates pixel coordinates given 3D points, intrinisics, and extrinsics.
Given 3D point coordinates points and cameras with inverse intrinsics
pixtocams and extrinsics camtoworlds (and optional distortion coefficients
distortion_params), computes the corresponding 2D pixel coordinates.
Vectorized over the leading dimensions of the first four arguments.
Args:
points: float array, [..., 3], 3D coordinates of points to project.
pixtocams: float array, [..., 3, 3], inverse intrinsics.
camtoworlds: float array, [..., 3, 4], camera extrinsics.
distortion_params: dict of floats or float arrays [...], optional camera
distortion parameters.
camtype: camera_utils.ProjectionType, type of camera model.
xnp: either numpy (host compute) or jax.numpy (device compute).
Returns:
coordinates: float array, [..., 2], pixel coordinates.
depth: float array, [...], per-point orthographic depth.
"""
if camtype != ProjectionType.PERSPECTIVE:
raise ValueError(f'points_to_pixels only supports perspective projection, '
f'not {camtype} mode.')
# For jax, need to specify high-precision matmul.
matmul = math.matmul if xnp == jnp else xnp.matmul
mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0]
rotation = camtoworlds[Ellipsis, :3, :3]
rotation_inv = xnp.swapaxes(rotation, -1, -2)
translation = camtoworlds[Ellipsis, :3, -1]
# Points (directions) in the camera coordinate frame.
points_camera = mat_vec_mul(rotation_inv, points - translation)
# Projection to image plane by dividing out -z.
depth = -points_camera[Ellipsis, -1]
camera_dirs = points_camera / depth[Ellipsis, None]
# OpenGL to OpenCV coordinates.
camera_dirs = matmul(camera_dirs, xnp.diag(xnp.array([1.0, -1.0, -1.0])))
if distortion_params is not None:
# Correct for distortion.
x, y = _radial_and_tangential_distort(
camera_dirs[Ellipsis, 0],
camera_dirs[Ellipsis, 1],
**distortion_params,
)
camera_dirs = xnp.stack([x, y, xnp.ones_like(x)], -1)
# Apply intrinsics matrix.
pixel_dirs = mat_vec_mul(xnp.linalg.inv(pixtocams), camera_dirs)
# Remove half pixel offset.
coordinates = pixel_dirs[Ellipsis, :2] - xnp.array([0.5, 0.5])
return coordinates, depth
def rays_planes_intersection(
z_min,
z_max,
origins,
directions,
xnp = np,
):
"""Crops rays to a range of z values.
This is useful for situations where the scene lies within a range of
altitudes, but the cameras are very far away, as with aerial data.
Args:
z_min: float z value of the lower cropping plane.
z_max: float z value of the upper cropping plane.
origins: ray origins points.
directions: ray direction vectors.
xnp: either numpy or jax.numpy.
Returns:
t_min: parametric location of the cropped ray origins
t_max: parametric location of the ends of the cropped rays
"""
t1 = (z_min - origins[Ellipsis, 2]) / directions[Ellipsis, 2]
t2 = (z_max - origins[Ellipsis, 2]) / directions[Ellipsis, 2]
t_min = xnp.maximum(0, xnp.minimum(t1, t2))
t_max = xnp.maximum(t1, t2)
return t_min, t_max
def _intersect_ranges(
r1,
r2,
xnp = np,
):
start = xnp.maximum(r1[0], r2[0])
end = xnp.minimum(r1[1], r2[1])
return (start, end)
def ray_box_intersection(
ray_o, ray_d, corners, xnp = np
):
"""Returns enter/exit distances along the ray for box defined by `corners`."""
t1 = (corners[0] - ray_o) / ray_d
t2 = (corners[1] - ray_o) / ray_d
t_min = xnp.minimum(t1, t2).max(axis=-1)
t_max = xnp.maximum(t1, t2).min(axis=-1)
return t_min, t_max
def modify_rays_with_bbox(
rays, corners, xnp = np
):
"""Sets near/far by bbox intersection and multiplies lossmult by mask."""
lossmult = rays.lossmult
near = rays.near
far = rays.far
t_min, t_max = ray_box_intersection(
rays.origins, rays.directions, corners, xnp=xnp
)
t_min, t_max = t_min[Ellipsis, None], t_max[Ellipsis, None]
hits = t_min <= t_max
inear, ifar = _intersect_ranges((near, far), (t_min, t_max), xnp=xnp)
overlaps = inear <= ifar
valid = hits * overlaps
if lossmult is None:
lossmult = valid.astype(xnp.float32)
else:
lossmult = xnp.where(valid, lossmult, 0.0)
near = xnp.where(valid, inear, 0.0)
far = xnp.where(valid, ifar, 0.0)
return rays.replace(lossmult=lossmult, near=near, far=far)
def ray_sphere_intersection(
ray_o,
ray_d,
center,
radius,
xnp = np,
):
"""Calculates distance to hit a sphere for a ray.
Args:
ray_o: Ray origin (..., 3)
ray_d: Ray direction (..., 3)
center: Sphere center (..., 3)
radius: Sphere radius (..., 1)
xnp: Numpy or Jax module
Returns:
t_min, t_max, hit. When no hit is found, t_min = t_max = 0.
"""
oc = ray_o - center
a = (ray_d**2).sum(axis=-1)
b = 2 * (oc * ray_d).sum(axis=-1)
c = (oc * oc).sum(axis=-1) - radius**2
det = b**2 - 4.0 * a * c
hit = (det >= 0) * (a > 0)
# Nb: Results are 'wrong' if valid = false, this is just to make jax
# not freak out.
det = xnp.where(hit, det, 0.0)
a = xnp.where(hit, a, 1.0)
t_min = xnp.where(hit, (-b - xnp.sqrt(det)) / (2.0 * a), 0.0)
t_max = xnp.where(hit, (-b + xnp.sqrt(det)) / (2.0 * a), 0.0)
return t_min, t_max, hit
def gather_cameras(cameras, cam_idx, xnp=np):
"""Gathers relevant camera parameters for each ray."""
pixtocams, camtoworlds, distortion_params = cameras[:3]
if pixtocams.ndim > 2:
pixtocams_idx = pixtocams[cam_idx]
else:
pixtocams_idx = pixtocams
if camtoworlds.ndim > 2:
camtoworlds_idx = camtoworlds[cam_idx]
else:
camtoworlds_idx = camtoworlds
if distortion_params is not None:
distortion_params_idx = {}
for k, v in distortion_params.items(): # pytype: disable=attribute-error # jax-ndarray
if not xnp.isscalar(v):
distortion_params_idx[k] = v[cam_idx]
else:
distortion_params_idx[k] = v
else:
distortion_params_idx = None
return (
pixtocams_idx,
camtoworlds_idx,
distortion_params_idx,
)
def cast_ray_batch(
cameras,
rays,
camtype = ProjectionType.PERSPECTIVE,
scene_bbox = None,
xnp = np,
):
"""Maps from input cameras and uncast Rays batch to output cast Rays batch.
`cameras` is a Tuple of five sets of camera parameters.
pixtocams: 1 or N stacked [3, 3] inverse intrinsic matrices.
camtoworlds: 1 or N stacked [3, 4] extrinsic pose matrices.
distortion_params: optional, dict[str, float] containing pinhole model
distortion parameters.
pixtocam_ndc: optional, [3, 3] inverse intrinsic matrix for mapping to NDC.
z_range: optional range of Z values
Args:
cameras: described above.
rays: ray data including integer pixel coordinates and camera indices.
These fields can be an arbitrary batch shape.
camtype: camera_utils.ProjectionType, fisheye or perspective camera.
scene_bbox: min and max corner of scene bounding box, if applicable.
xnp: either numpy or jax.numpy.
Returns:
rays: Rays dataclass with computed 3D world space ray data.
"""
# rays.cam_idx has shape [..., 1], remove this hanging dimension.
cam_idx = rays.cam_idx[Ellipsis, 0]
cameras_idx = gather_cameras(cameras, cam_idx, xnp=xnp)
pixtocams, camtoworlds, distortion_params = cameras_idx
pixtocam_ndc, z_range = cameras[3:5]
# Compute rays from pixel coordinates.
origins, directions, viewdirs, radii, imageplane = pixels_to_rays(
rays.pixels[Ellipsis, 0],
rays.pixels[Ellipsis, 1],
pixtocams,
camtoworlds,
distortion_params=distortion_params,
pixtocam_ndc=pixtocam_ndc,
camtype=camtype,
xnp=xnp,
)
if z_range is not None:
t_min, t_max = rays_planes_intersection(
z_range[0], z_range[1], origins, directions, xnp
)
t_min = xnp.broadcast_to(t_min[Ellipsis, None], origins.shape)
t_max = xnp.broadcast_to(t_max[Ellipsis, None], origins.shape)
hit_mask = t_max < t_min
origins = xnp.where(hit_mask, origins, origins + directions * t_min)
directions = xnp.where(hit_mask, directions, directions * (t_max - t_min))
# Preserve all metadata and add the cast rays.
rays = rays.replace(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
)
if scene_bbox is not None:
rays = modify_rays_with_bbox(rays, scene_bbox, xnp=xnp)
return rays
def cast_general_rays(
camtoworld,
pixtocam,
height,
width,
near,
far,
distortion_params = None,
pixtocam_ndc = None,
camtype = ProjectionType.PERSPECTIVE,
xnp = np,
):
"""Wrapper for generating a general ray batch."""
pix_x_int, pix_y_int = pixel_coordinates(width, height, xnp=xnp)
ray_args = pixels_to_rays(
pix_x_int,
pix_y_int,
pixtocam,
camtoworld,
distortion_params=distortion_params,
pixtocam_ndc=pixtocam_ndc,
camtype=camtype,
xnp=xnp,
)
broadcast_scalar = lambda x: xnp.broadcast_to(x, pix_x_int.shape)[Ellipsis, None]
ray_kwargs = {
'pixels': xnp.stack([pix_x_int, pix_y_int], axis=-1),
'near': broadcast_scalar(near),
'far': broadcast_scalar(far),
'cam_idx': broadcast_scalar(0),
}
return utils.Rays(*ray_args, **ray_kwargs)
def cast_pinhole_rays(
camtoworld,
height,
width,
focal,
near,
far,
xnp = np,
):
"""Generates a pinhole camera ray batch (w/o distortion)."""
return cast_general_rays(
camtoworld,
get_pixtocam(focal, width, height, xnp=xnp),
height,
width,
near,
far,
camtype=ProjectionType.PERSPECTIVE,
xnp=xnp,
)
def cast_spherical_rays(
camtoworld,
height,
width,
near,
far,
xnp,
):
"""Generates a spherical camera ray batch."""
return cast_general_rays(
camtoworld,
xnp.diag(xnp.array([2.0 * np.pi / width, np.pi / height, 1.0])),
height,
width,
near,
far,
camtype=ProjectionType.PANORAMIC,
xnp=xnp,
)
def jax_camera_from_tuple(
camera_tuple,
image_size,
projection_type,
):
"""Converts a camera tuple into a JAX camera.
Args:
camera_tuple: A tuple containing `inv_intrinsics`, the inverse intrinsics
matrix; `extrinsics`, the camera to world matrix; and `distortion_params`,
the dictionary of distortion parameters.
image_size: An array containing the (width, height) image size.
projection_type: The projection type of the camera.
Returns:
A JAX camera class instance encoding the same camera information.
"""
if projection_type.value not in {
ProjectionType.PERSPECTIVE.value,
ProjectionType.FISHEYE.value,
}:
raise ValueError(f'Projection {projection_type} is not supported.')
inv_intrinsics, extrinsic, distortion_params = camera_tuple[:3]
intrinsics = jnp.linalg.inv(inv_intrinsics)
focal_length = intrinsics[0, 0]
principal_point = intrinsics[:2, 2]
pixel_aspect_ratio = intrinsics[1, 1] / intrinsics[0, 0]
radial_distortion = None
tangential_distortion = None
if distortion_params is not None:
if (
'k1' in distortion_params
and 'k2' in distortion_params
and 'k3' in distortion_params
):
radial_keys = ['k1', 'k2', 'k3', 'k4']
radial_distortion = jnp.array(
[distortion_params[k] for k in radial_keys if k in distortion_params]
)
if 'p1' in distortion_params and 'p2' in distortion_params:
tangential_distortion = jnp.array([
distortion_params['p1'],
distortion_params['p2'],
])
extrinsic = jnp.concatenate(
[extrinsic[:3, :4], jnp.array([[0, 0, 0, 1]])], axis=0
)
# Convert to OpenCV coordinates.
extrinsic = math.matmul(extrinsic, jnp.diag(jnp.array([1, -1, -1, 1])))
world_to_cam = jnp.linalg.inv(extrinsic)
camera = jaxcam.Camera.create(
focal_length=focal_length,
pixel_aspect_ratio=pixel_aspect_ratio,
radial_distortion=radial_distortion,
tangential_distortion=tangential_distortion,
principal_point=principal_point,
image_size=image_size,
is_fisheye=(projection_type.value == ProjectionType.FISHEYE.value),
)
camera = jaxcam.update_world_to_camera_matrix(camera, world_to_cam)
return camera
def tuple_from_jax_camera(
jax_camera,
):
"""Converts a JAX camera into a camera tuple."""
focal_x = jax_camera.focal_length
focal_y = jax_camera.focal_length * jax_camera.pixel_aspect_ratio
intrinsic = jnp.block([
[focal_x, jax_camera.skew, jax_camera.principal_point[0]],
[0, focal_y, jax_camera.principal_point[1]],
[0, 0, 1],
])
pix_to_cam = jnp.linalg.inv(intrinsic)
world_to_cam = jaxcam.world_to_camera_matrix(jax_camera)
cam_to_world = jnp.linalg.inv(world_to_cam)
# Convert back to OpenGL coordinates.
cam_to_world = math.matmul(cam_to_world, jnp.diag(jnp.array([1, -1, -1, 1])))
cam_to_world = cam_to_world[:3, :]
distortion_params = None
if jax_camera.has_distortion:
distortion_params = {}
if jax_camera.has_radial_distortion:
distortion_params.update({
'k1': jax_camera.radial_distortion[0],
'k2': jax_camera.radial_distortion[1],
'k3': jax_camera.radial_distortion[2],
'k4': jax_camera.radial_distortion[3],
})
if jax_camera.has_tangential_distortion:
distortion_params.update({
'p1': jax_camera.tangential_distortion[0],
'p2': jax_camera.tangential_distortion[1],
})
return pix_to_cam, cam_to_world, distortion_params
def rotation_distance(
rotation_mat1, rotation_mat2
):
"""Computes the angle between two rotation matrices in degrees.
Args:
rotation_mat1: (3, 3) The first batch of rotation matrix.
rotation_mat2: (3, 3) The second batch of rotation matrix.
Returns:
The angle in degrees between 0 and 180.
"""
axis_angle1 = rigid_body.log_so3(rotation_mat1)
axis_angle2 = rigid_body.log_so3(rotation_mat2)
orientation_error_deg = jnp.degrees(
jnp.linalg.norm(axis_angle1 - axis_angle2, axis=-1)
)
return jnp.where( # pytype: disable=bad-return-type # jnp-type
orientation_error_deg < 180,
orientation_error_deg,
360 - orientation_error_deg,
)
def compute_camera_metrics(
cameras_gt, cameras_pred
):
"""Computes the metrics between two cameras."""
orientation_diffs = jax.vmap(rotation_distance)(
cameras_pred.orientation, cameras_gt.orientation
)
translation_diffs = jnp.abs(cameras_pred.translation - cameras_gt.translation)
diffs = {
'focal_length': jnp.abs(
cameras_pred.focal_length - cameras_gt.focal_length
),
'position': jnp.linalg.norm(
cameras_pred.position - cameras_gt.position, axis=-1
),
'translation_x': translation_diffs[Ellipsis, 0],
'translation_y': translation_diffs[Ellipsis, 1],
'translation_z': translation_diffs[Ellipsis, 2],
'orientation': jnp.abs(orientation_diffs),
'principal_points': jnp.linalg.norm(
cameras_pred.principal_point - cameras_gt.principal_point,
axis=-1,
),
}
if cameras_pred.radial_distortion is not None:
radial_distortion_gt = jnp.zeros(4)
if cameras_gt.has_radial_distortion:
radial_distortion_gt = cameras_gt.radial_distortion
for i in range(cameras_pred.radial_distortion.shape[-1]):
diffs[f'radial_distortion_{i}'] = jnp.abs(
cameras_pred.radial_distortion[Ellipsis, i] - radial_distortion_gt[Ellipsis, i]
)
if cameras_pred.tangential_distortion is not None:
tangential_distortion_gt = jnp.zeros(2)
if cameras_gt.has_tangential_distortion:
tangential_distortion_gt = cameras_gt.radial_distortion
for i in range(cameras_pred.tangential_distortion.shape[-1]):
diffs[f'tangential_distortion_{i}'] = jnp.abs(
cameras_pred.tangential_distortion[Ellipsis, i]
- tangential_distortion_gt[Ellipsis, i]
)
return diffs
def perturb_cameras(
rng,
cameras,
sigma_look_at,
sigma_position,
sigma_focal_length = 0.0,
sigma_dolly_z = 0.0,
single_dolly = True,
dolly_use_average = False,
):
"""Randomly perturb camera positions and orientations.
For position the 3D coordinate is simply shifted according to
an offset vector. For the orientation an offset angle is calculated based
on spherical coordinates. The underlying offsets are randomly chosen using
normal distributions absed on the input sigmas.
Args:
rng: A PRNGKey.
cameras: Cameras to perturb.
sigma_look_at: Strength of look-at position offset. Higher means stronger.
sigma_position: Strength of position offset. Higher means stronger.
sigma_focal_length: Strength of focal length zoom z-axis scale. Higher means
stronger. This is essentially a percentage (0.2 means 20%).
sigma_dolly_z: Strength of Dolly zoom z-axis scale. Higher means stronger.
This is essentially a percentage (0.2 means 20%).
single_dolly: If True, only have a single perturbation for dolly zoom.
dolly_use_average: If True, set the dolly z to the average of the input
instead of perturbing.
Returns:
Perturbed cameras.
"""
# Dolly zoom.
if sigma_dolly_z > 0.0 or dolly_use_average:
# Turn out "percentage" into a log scale. This is equivalent to having
# minval = log(1+s) and maxval = log(1/(1+s)) but sampling from a normal
# distribution.
log_sigma_dolly_z = jnp.log1p(sigma_dolly_z)
rng, dolly_key = random.split(rng)
translation = cameras.translation
x, y, z = jnp.split(translation, 3, -1)
if dolly_use_average:
new_z = jnp.broadcast_to(z.mean(axis=0, keepdims=True), z.shape)
elif single_dolly:
new_z = z * jnp.exp(random.normal(dolly_key, (1,)) * log_sigma_dolly_z)
else:
new_z = z * jnp.exp(random.normal(dolly_key, z.shape) * log_sigma_dolly_z)
new_focal_length = cameras.focal_length * (new_z / z).squeeze(-1)
new_translation = jnp.concatenate([x, y, new_z], axis=-1)
new_position = jax.vmap(spin_math.matmul)(
-cameras.orientation.swapaxes(-1, -2), new_translation
)
cameras = cameras.replace(
position=new_position, focal_length=new_focal_length
)
# Perturb focal length.
rng, key = random.split(rng)
new_focal_length = cameras.focal_length * jnp.exp(
random.normal(key, cameras.shape) * jnp.log1p(sigma_focal_length)
)
cameras = cameras.replace(focal_length=new_focal_length)
camera_positions = cameras.position
up_vectors = -cameras.orientation[Ellipsis, 1, :]
# Perturb camera positions.
rng, key = random.split(rng)
perturb_dir = spin_math.normalize(random.normal(key, camera_positions.shape))
camera_positions_perturbed = np.array(
sigma_position * perturb_dir + camera_positions
)
# Perturb look-at point.
look_at_positions = jax.vmap(geometry.line_closest_point)(
cameras.position, cameras.optical_axis, jnp.zeros_like(cameras.position)
)
rng, key = random.split(rng)
perturb_dir = math.normalize(random.normal(key, camera_positions.shape))
look_at_positions_perturbed = np.array(
sigma_look_at * perturb_dir + look_at_positions
)
# Apply the look-at function.
new_cameras = []
for camera, camera_position, look_at_position, up_vector in zip(
cameras,
camera_positions_perturbed,
look_at_positions_perturbed,
up_vectors,
):
new_cameras.append(
jaxcam.look_at(
camera=camera,
eye=camera_position,
center=look_at_position,
world_up=up_vector,
)
)
cameras = jaxcam.concatenate(new_cameras)
return cameras
|
evocodebench_data_43
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
|
evocodebench_data_44
|
_FORMAT_TO_RATIO = {
"kb": 1000,
"mb": 1000**2,
"gb": 1000**3,
"tb": 1000**4,
}
def _convert_bytes_to_int(bytes_str: str) -> int:
"""Convert human-readable byte format to an integer."""
for suffix in _FORMAT_TO_RATIO:
bytes_str = bytes_str.lower().strip()
if bytes_str.lower().endswith(suffix):
try:
return int(float(bytes_str[0 : -len(suffix)]) * _FORMAT_TO_RATIO[suffix])
except ValueError:
raise ValueError(
f"Unsupported value/suffix {bytes_str}. Supported suffix are "
f'{["b"] + list(_FORMAT_TO_RATIO.keys())}.'
)
raise ValueError(f"The supported units are {_FORMAT_TO_RATIO.keys()}")
def _human_readable_bytes(num_bytes: float) -> str:
for unit in ("B", "KB", "MB", "GB", "TB"):
if abs(num_bytes) < 1000.0:
return f"{num_bytes:3.1f} {unit}"
num_bytes /= 1000.0
return f"{num_bytes:.1f} PB"
|
evocodebench_data_45
|
from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
)
return values
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
|
evocodebench_data_46
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Geometry utilities."""
from typing import Union
import chex
from internal import rigid_body
from internal import spin_math
import jax
from jax import numpy as jnp
from jax import random
import numpy as onp
import optax
_ArrayType = Union[onp.ndarray, jnp.ndarray]
def line_distance(point1, dir1, point2,
dir2):
"""Compute the distance between two lines in 3D.
Note that this is the distance between lines and not line segments or rays;
i.e., it does not consider endpoints and will compute the distance assuming
the line extends infinitely in both directions.
Args:
point1: (3,) a point on the first line.
dir1: (3,) the direction vector of the first line.
point2: (3,) a point on the second line.
dir2: (3,) the direction vector of the second line.
Returns:
The distance between the two lines.
"""
is_parallel = are_lines_parallel(dir1, dir2)
skew_dist = skew_line_distance(point1, dir1, point2, dir2)
parallel_dist = line_to_point_distance(point1, dir1, point2)
return jnp.where(is_parallel, parallel_dist, skew_dist)
def skew_line_closest_points(point1, dir1,
point2,
dir2):
"""Compute the shortest distance between two skew lines.
See:
https://en.wikipedia.org/wiki/Skew_lines#Nearest_points
Args:
point1: a point on the first line.
dir1: the direction vector of the first line.
point2: a point on the second line.
dir2: the direction vector of the second line.
Returns:
The distance between the two skew lines.
"""
# Make sure direction vectors are unit.
dir1 = spin_math.normalize(dir1)
dir2 = spin_math.normalize(dir2)
# The vector perpendicular to both lines.
n = jnp.cross(dir1, dir2)
# Compute the point on line 1 nearest to line 2.
n2 = jnp.cross(dir2, n)
c1 = point1 + jnp.dot(point2 - point1, n2) / jnp.dot(dir1, n2) * dir1
# Compute the point on line 2 nearest to line 1.
n1 = jnp.cross(dir1, n)
c2 = point2 + jnp.dot(point1 - point2, n1) / jnp.dot(dir2, n1) * dir2
return c1, c2 # pytype: disable=bad-return-type # jax-ndarray
def skew_line_distance(point1, dir1,
point2, dir2):
"""Compute the shortest distance between two skew lines.
Args:
point1: a point on the first line.
dir1: the direction vector of the first line.
point2: a point on the second line.
dir2: the direction vector of the second line.
Returns:
The distance between the two skew lines.
"""
c1, c2 = skew_line_closest_points(point1, dir1, point2, dir2)
return jnp.linalg.norm(c1 - c2)
def line_closest_point(line_point, line_dir,
query_point):
"""Return the closest point on the line to a point.
Args:
line_point: a point on the line.
line_dir: the direction vector of the line.
query_point: the query point.
Returns:
The closest point on the line to the query point.
"""
# Make sure direction vector is unit.
line_dir = spin_math.normalize(line_dir)
# Find the point along the line that is closest.
t = jnp.dot(query_point - line_point, line_dir)
return line_point + t * line_dir
def line_to_point_distance(line_point, line_dir,
query_point):
"""Return the distance from point to a line.
Args:
line_point: a point on the line.
line_dir: the direction vector of the line.
query_point: the point to compute the distance to.
Returns:
The closest distance between the line and the point.
"""
closest_point = line_closest_point(line_point, line_dir, query_point)
return jnp.linalg.norm(query_point - closest_point)
def ray_sphere_intersection(origin,
direction,
radius = 1.0):
"""Computes the intersecting point between a ray and a sphere.
Variables use notation from Wikipedia:
u: direction of ray
o: origin of ray
References:
https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
Args:
origin: The origin of the ray.
direction: The direction of the ray.
radius: The radius of the sphere.
Returns:
The intersecting point on the sphere.
"""
u_dot_o = jnp.sum(direction * origin, axis=-1, keepdims=True)
nabla = u_dot_o**2 - (jnp.linalg.norm(origin, keepdims=True)**2 - radius**2)
# This this is a ray and not a line, we only need to consider the case where
# nabla is positive.
distance = -u_dot_o + jnp.sqrt(nabla)
return origin + distance * direction
def are_lines_parallel(dir1, dir2):
eps = jnp.finfo(jnp.float32).eps
dir1 = spin_math.normalize(dir1)
dir2 = spin_math.normalize(dir2)
return jnp.dot(dir1, dir2) >= 1.0 - eps # pytype: disable=bad-return-type # jnp-type
def spherical_equirectangular_grid(
height,
width,
min_elevation = 0,
max_elevation = jnp.pi,
min_azimuth = 0,
max_azimuth = 2 * jnp.pi):
"""Creates an equirectangular grid (panorama) in spherical coordinates.
Args:
height: The height of the output grid.
width: The width of the output grid.
min_elevation: The minimum value for the elevation.
max_elevation: The maximum value for the elevation.
min_azimuth: The minimum value for the azimuth.
max_azimuth: The maximum value for the azimuth.
Returns:
elevations: (height, width) An array containing the elevations.
azimuths: (height, width) An array containing the azimuths.
"""
elevations = jnp.linspace(min_elevation, max_elevation, height)
# Prevent duplicate sample since 0 and 2*pi are the same azimuth.
azimuths = jnp.linspace(min_azimuth, max_azimuth, width, endpoint=False)
azimuths, elevations = jnp.meshgrid(azimuths, elevations)
return elevations, azimuths # pytype: disable=bad-return-type # jax-ndarray
def spherical_to_cartesian(
r,
theta,
phi,
):
"""Converts spherical to cartesian coordinates.
For more details see cartesian_to_spherical below.
Args:
r: (..., 1) Radius of spherical coordinate.
theta: (..., 1) Elevation of spherical coordinate.
phi: (..., 1) Azimuth of spherical coordinate.
Returns:
Cartesian coordinates of shape (..., 3) defined by x, y, z.
"""
x = r * jnp.sin(theta) * jnp.cos(phi)
y = r * jnp.sin(theta) * jnp.sin(phi)
z = r * jnp.cos(theta)
return jnp.stack([x, y, z], axis=-1)
def cartesian_to_spherical(
cartesian_vector,
eps = onp.float32(onp.finfo(onp.float32).tiny)
):
"""Converts cartesian to spherical coordinates.
Uses a right-handed coordinate system where z is up and y is right. The
spherical coordinates are defined by radius (r), inclination (theta)
∈ [0, π]) from fixed zenit direction (z) and azimuth (phi) ∈ [0, 2π]) from
x-axis to y-axis.
We are using the phyiscal coordinate system as described here:
https://en.wikipedia.org/wiki/Spherical_coordinate_system.
Args:
cartesian_vector: (..., 3) Cartesian coordinates defined by (x, y, z).
eps: Epsilon used for safe_acos.
Returns:
Spherical coordinates as tuple of r, elevation (theta), azimuth (phi).
"""
x = cartesian_vector[Ellipsis, 0]
y = cartesian_vector[Ellipsis, 1]
z = cartesian_vector[Ellipsis, 2]
r = optax.safe_norm(cartesian_vector, min_norm=eps, axis=-1)
theta = spin_math.safe_acos(z / r)
phi = jnp.arctan2(y, x)
return r, theta, phi # pytype: disable=bad-return-type # jax-ndarray
def sample_random_points_on_sphere(key, num_points,
min_radius,
max_radius):
"""Sample points uniformly on sphere with random radius within bounds.
Args:
key: Seed for random sampling.
num_points: Number of points to sample.
min_radius: Minimum euclidean distance of point from center of sphere.
max_radius: Maximum euclidean distance of point from center of sphere.
Returns:
Array of uniform points (N, 3) on sphere with random radius.
"""
key1, key2, _ = random.split(key, 3)
random_radii = random.uniform(
key1, (num_points, 1), minval=min_radius, maxval=max_radius)
v = spin_math.normalize(random.normal(key2, (num_points, 3)))
return v * random_radii # pytype: disable=bad-return-type # jax-ndarray
def sample_points_evenly_on_sphere(num_points,):
"""Deterministically sample points on a sphere that are evenly distributed.
Uses a generalization of the sunflower spiral to sample points that are
distibuted evenly on a sphere.
References:
http://extremelearning.com.au/how-to-evenly-distribute-points-on-a-sphere-more-effectively-than-the-canonical-fibonacci-lattice/#more-3069
https://mathoverflow.net/questions/24850/is-there-a-generalisation-of-the-sunflower-spiral-to-higher-dimensions
https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere/44164075#44164075
Args:
num_points: The number of points to sample.
Returns:
(num_points, 3) The sampled points.
"""
golden_ratio = (1 + 5**0.5) / 2
indices = jnp.arange(0, num_points, dtype=jnp.float32) + 0.5
azimuths = jnp.pi * 2 * golden_ratio * indices
elevations = jnp.arccos(1 - 2 * indices / num_points)
points = spherical_to_cartesian(1.0, elevations, azimuths) # pytype: disable=wrong-arg-types # jax-ndarray
return points
def is_point_in_convex_hull(point,
hull_normals,
hull_offsets,
padding = 0.0):
"""Computes whether the given points are inside or outside a convex hull.
The convex hull is defined using the normals and offsets of a facet.
If the dot product between a point and a normal is less than the offset, then
it is on the inner side of that facet. If this is true for all facets, then
the point is inside the convex hull.
References:
http://www.qhull.org/html/index.htm
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.html
Args:
point: (..., D) An array containing the points to test.
hull_normals: (F, D) The normals of the facets of the convex hull.
hull_offsets: (F, D) The offsets of the facets of the convex hull.
padding: A number to pad the convex hull by. A positive value expands the
convex hull while a negative number shrinks it.
Returns:
A boolean array of shape (...,) that is True if a point is inside the hull
and False otherwise.
"""
input_shape = point.shape[:-1]
point = point.reshape(-1, point.shape[-1])
dots = hull_normals @ point.T
mask = (dots <= -hull_offsets[:, None] + padding).all(axis=0)
return mask.reshape(input_shape)
def cosine_to_deg(array):
"""Converts cosine angle to degrees.
Args:
array: containing cosine angles (e.g. result of dot product).
Returns:
array with angles as degrees.
"""
return jnp.degrees(jnp.arccos(array.clip(-1, 1)))
# TODO(phenzler): Convert this to xnp once we have a more solid code base that
# supports xnp.
def onp_cosine_to_deg(array):
"""Converts cosine angle to degrees.
Args:
array: containing cosine angles (e.g. result of dot product).
Returns:
array with angles as degrees.
"""
return onp.degrees(onp.arccos(array.clip(-1, 1)))
def rotation_distance(rotation_mat1,
rotation_mat2):
"""Computes the angle between two rotation matrices in degrees.
Args:
rotation_mat1: (3, 3) The first batch of rotation matrix.
rotation_mat2: (3, 3) The second batch of rotation matrix.
Returns:
The angle in degrees between 0 and 180.
"""
axis_angle1 = rigid_body.log_so3(rotation_mat1)
axis_angle2 = rigid_body.log_so3(rotation_mat2)
orientation_error_deg = jnp.degrees(
jnp.linalg.norm(axis_angle1 - axis_angle2, axis=-1))
return jnp.where(
orientation_error_deg < 180,
orientation_error_deg, # pytype: disable=bad-return-type # jnp-type
360 - orientation_error_deg)
def compute_bbox_from_xyza(
xyza,
padding,
alpha_threshold = 0.99,
):
"""Computes a bounding box given an xyza array.
Args:
xyza: An array of shape (..., 4) containing the XYZ coordinates in the first
three channels and an alpha value in the last.
padding: A padding value to be added to all sides.
alpha_threshold: The threshold at which to binarize the alpha into a mask.
Returns:
A bounding box of shape (2, 3) containing (min_coords, max_coords).
"""
padding = onp.array(padding)
xyz = xyza[Ellipsis, :3]
alpha = xyza[Ellipsis, 3]
mask = alpha > alpha_threshold
xyz = xyz[mask]
xyz = xyz.reshape(-1, 3)
min_coord = xyz.min(axis=0) - padding
max_coord = xyz.max(axis=0) + padding
return onp.stack([min_coord, max_coord], axis=0)
|
evocodebench_data_47
|
# @Author : Shichao Song
# @Email : song.shichao@outlook.com
from typing import Callable
import evaluate
import jieba
from loguru import logger
from text2vec import Similarity
def catch_all_exceptions(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
logger.warning(repr(e))
return wrapper
@catch_all_exceptions
def bleu4_score(
continuation: str,
reference: str,
with_penalty = False
) -> float:
import math
from nltk.translate.bleu_score import sentence_bleu
# Tokenize the continuation and reference texts using the custom tokenizer function
continuation_tokens = custom_tokenizer(continuation)
reference_tokens = custom_tokenizer(reference)
# Calculate the BLEU score using the nltk.translate.bleu_score.sentence_bleu function
bleu_score = sentence_bleu([reference_tokens], continuation_tokens, weights=(0.25, 0.25, 0.25, 0.25))
# If the with_penalty flag is set to True, adjust the BLEU score for brevity penalty
if with_penalty:
# Calculate the length of the reference and continuation texts
reference_length = len(reference_tokens)
continuation_length = len(continuation_tokens)
# Calculate the brevity penalty factor
if continuation_length > reference_length:
brevity_penalty = 1
else:
brevity_penalty = math.exp(1 - (reference_length / continuation_length))
# Adjust the BLEU score with the brevity penalty
bleu_score = bleu_score * brevity_penalty
return bleu_score
@catch_all_exceptions
def rougeL_score(
continuation: str,
reference: str
) -> float:
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load('uhgeval/.cache/huggingface/rouge')
results = rouge.compute(predictions=[continuation], references=[[reference]], tokenizer=f, rouge_types=['rougeL'])
score = results['rougeL']
return score
@catch_all_exceptions
def kw_precision(
continuation: str,
reference: str,
kw_extracter: Callable[[str], list[str]],
with_kw_list: bool = True
) -> float | tuple[float, list[str], list[str]]:
"""Measure the rationality of a generated continuation sentence with respect to the original news object."""
kws = kw_extracter(continuation)
if len(kws) == 0:
return 0, [], [] if with_kw_list else 0
appeared_kws = [kw for kw in kws if kw in reference]
precision = len(appeared_kws) / len(kws)
return precision, appeared_kws, kws if with_kw_list else precision
@catch_all_exceptions
def bert_score(
continuation: str,
reference: str
) -> float:
"""
Note:
Requesting the network to connect to Hugging Face.
"""
sim = Similarity()
score = sim.get_score(continuation, reference)
return score
def classifications(
predictions: list[bool],
references: list[bool]
) -> tuple[float, float, float, float]:
"""
Calculate accuracy, precision, recall, and F1 in a binary classification problem.
Args:
predictions (list[bool]): List of predicted values (0 or 1).
references (list[bool]): List of true values (0 or 1).
Returns:
tuple: Accuracy, precision, recall, and F1 scores.
"""
true_positive = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 1)
false_positive = sum(1 for a, b in zip(references, predictions) if a == 0 and b == 1)
false_negative = sum(1 for a, b in zip(references, predictions) if a == 1 and b == 0)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = sum(1 for a, b in zip(references, predictions) if a == b) / len(predictions) if len(predictions) > 0 else 0
return accuracy, precision, recall, f1
|
evocodebench_data_48
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Any
import pydoc
from fvcore.common.registry import Registry # for backward compatibility.
"""
``Registry`` and `locate` provide ways to map a string (typically found
in config files) to callable objects.
"""
__all__ = ["Registry", "locate"]
def _convert_target_to_string(t: Any) -> str:
"""
Inverse of ``locate()``.
Args:
t: any object with ``__module__`` and ``__qualname__``
"""
module, qualname = t.__module__, t.__qualname__
# Compress the path to this object, e.g. ``module.submodule._impl.class``
# may become ``module.submodule.class``, if the later also resolves to the same
# object. This simplifies the string, and also is less affected by moving the
# class implementation.
module_parts = module.split(".")
for k in range(1, len(module_parts)):
prefix = ".".join(module_parts[:k])
candidate = f"{prefix}.{qualname}"
try:
if locate(candidate) is t:
return candidate
except ImportError:
pass
return f"{module}.{qualname}"
def locate(name: str) -> Any:
"""
Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``,
such as "module.submodule.class_name".
Raise Exception if it cannot be found.
"""
obj = pydoc.locate(name)
# Some cases (e.g. torch.optim.sgd.SGD) not handled correctly
# by pydoc.locate. Try a private function from hydra.
if obj is None:
try:
# from hydra.utils import get_method - will print many errors
from hydra.utils import _locate
except ImportError as e:
raise ImportError(f"Cannot dynamically locate object {name}!") from e
else:
obj = _locate(name) # it raises if fails
return obj
|
evocodebench_data_49
|
# Copyright (c) Facebook, Inc. and its affiliates.
import io
import numpy as np
import torch
from detectron2 import model_zoo
from detectron2.config import CfgNode, instantiate
from detectron2.data import DatasetCatalog
from detectron2.data.detection_utils import read_image
from detectron2.modeling import build_model
from detectron2.structures import Boxes, Instances, ROIMasks
from detectron2.utils.file_io import PathManager
"""
Internal utilities for tests. Don't use except for writing tests.
"""
def get_model_no_weights(config_path):
"""
Like model_zoo.get, but do not load any weights (even pretrained)
"""
cfg = model_zoo.get_config(config_path)
if isinstance(cfg, CfgNode):
if not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
return build_model(cfg)
else:
return instantiate(cfg.model)
def random_boxes(num_boxes, max_coord=100, device="cpu"):
"""
Create a random Nx4 boxes tensor, with coordinates < max_coord.
"""
boxes = torch.rand(num_boxes, 4, device=device) * (max_coord * 0.5)
boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression
# Note: the implementation of this function in torchvision is:
# boxes[:, 2:] += torch.rand(N, 2) * 100
# but it does not guarantee non-negative widths/heights constraints:
# boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]:
boxes[:, 2:] += boxes[:, :2]
return boxes
def get_sample_coco_image(tensor=True):
"""
Args:
tensor (bool): if True, returns 3xHxW tensor.
else, returns a HxWx3 numpy array.
Returns:
an image, in BGR color.
"""
try:
file_name = DatasetCatalog.get("coco_2017_val_100")[0]["file_name"]
if not PathManager.exists(file_name):
raise FileNotFoundError()
except IOError:
# for public CI to run
file_name = PathManager.get_local_path(
"http://images.cocodataset.org/train2017/000000000009.jpg"
)
ret = read_image(file_name, format="BGR")
if tensor:
ret = torch.from_numpy(np.ascontiguousarray(ret.transpose(2, 0, 1)))
return ret
def convert_scripted_instances(instances):
"""
Convert a scripted Instances object to a regular :class:`Instances` object
"""
assert hasattr(
instances, "image_size"
), f"Expect an Instances object, but got {type(instances)}!"
ret = Instances(instances.image_size)
for name in instances._field_names:
val = getattr(instances, "_" + name, None)
if val is not None:
ret.set(name, val)
return ret
def assert_instances_allclose(input, other, *, rtol=1e-5, msg="", size_as_tensor=False):
"""
Args:
input, other (Instances):
size_as_tensor: compare image_size of the Instances as tensors (instead of tuples).
Useful for comparing outputs of tracing.
"""
if not isinstance(input, Instances):
input = convert_scripted_instances(input)
if not isinstance(other, Instances):
other = convert_scripted_instances(other)
if not msg:
msg = "Two Instances are different! "
else:
msg = msg.rstrip() + " "
size_error_msg = msg + f"image_size is {input.image_size} vs. {other.image_size}!"
if size_as_tensor:
assert torch.equal(
torch.tensor(input.image_size), torch.tensor(other.image_size)
), size_error_msg
else:
assert input.image_size == other.image_size, size_error_msg
fields = sorted(input.get_fields().keys())
fields_other = sorted(other.get_fields().keys())
assert fields == fields_other, msg + f"Fields are {fields} vs {fields_other}!"
for f in fields:
val1, val2 = input.get(f), other.get(f)
if isinstance(val1, (Boxes, ROIMasks)):
# boxes in the range of O(100) and can have a larger tolerance
assert torch.allclose(val1.tensor, val2.tensor, atol=100 * rtol), (
msg + f"Field {f} differs too much!"
)
elif isinstance(val1, torch.Tensor):
if val1.dtype.is_floating_point:
mag = torch.abs(val1).max().cpu().item()
assert torch.allclose(val1, val2, atol=mag * rtol), (
msg + f"Field {f} differs too much!"
)
else:
assert torch.equal(val1, val2), msg + f"Field {f} is different!"
else:
raise ValueError(f"Don't know how to compare type {type(val1)}")
def reload_script_model(module):
"""
Save a jit module and load it back.
Similar to the `getExportImportCopy` function in torch/testing/
"""
buffer = io.BytesIO()
torch.jit.save(module, buffer)
buffer.seek(0)
return torch.jit.load(buffer)
|
evocodebench_data_50
|
from typing import Tuple, List
import pandas as pd
from autorag.nodes.retrieval import retrieval_node
@retrieval_node
def hybrid_cc(
ids: Tuple,
scores: Tuple,
top_k: int,
weights: Tuple = (0.5, 0.5)) -> Tuple[List[List[str]], List[List[float]]]:
"""
Hybrid CC function.
CC (convex combination) is a method to fuse multiple retrieval results.
It is a method that first normalizes the scores of each retrieval result,
and then combines them with the given weights.
To use this function, you must input ids and scores as tuple.
It is uniquer than other retrieval modules, because it does not really execute retrieval,
but just fuse the results of other retrieval functions.
So you have to run more than two retrieval modules before running this function.
And collect ids and scores result from each retrieval module.
Make it as tuple and input it to this function.
:param ids: The tuple of ids that you want to fuse.
The length of this must be the same as the length of scores.
:param scores: The retrieve scores that you want to fuse.
The length of this must be the same as the length of ids.
:param top_k: The number of passages to be retrieved.
:param weights: Weight for each retrieval result.
Default is (0.5, 0.5).
You must set its length as the same as the length of ids and scores.
Plus, the sum of the weights must be 1.
:return: The tuple of ids and fused scores that fused by CC.
"""
assert len(ids) == len(scores), "The length of ids and scores must be the same."
assert len(ids) == len(weights), "The length of weights must be the same as the length of ids."
assert len(ids) > 1, "You must input more than one retrieval results."
assert top_k > 0, "top_k must be greater than 0."
assert sum(weights) == 1, "The sum of weights must be 1."
id_df = pd.DataFrame({f'id_{i}': id_list for i, id_list in enumerate(ids)})
score_df = pd.DataFrame({f'score_{i}': score_list for i, score_list in enumerate(scores)})
df = pd.concat([id_df, score_df], axis=1)
def cc_pure_apply(row):
ids_tuple = tuple(row[[f'id_{i}' for i in range(len(ids))]].values)
scores_tuple = tuple(row[[f'score_{i}' for i in range(len(scores))]].values)
return pd.Series(cc_pure(ids_tuple, scores_tuple, weights, top_k))
df[['cc_id', 'cc_score']] = df.apply(cc_pure_apply, axis=1)
return df['cc_id'].tolist(), df['cc_score'].tolist()
def cc_pure(ids: Tuple, scores: Tuple, weights: Tuple, top_k: int) -> Tuple[
List[str], List[float]]:
df = pd.concat([pd.Series(dict(zip(_id, score))) for _id, score in zip(ids, scores)], axis=1)
normalized_scores = (df - df.min()) / (df.max() - df.min())
normalized_scores = normalized_scores.fillna(0)
normalized_scores['weighted_sum'] = normalized_scores.mul(weights).sum(axis=1)
normalized_scores = normalized_scores.sort_values(by='weighted_sum', ascending=False)
return normalized_scores.index.tolist()[:top_k], normalized_scores['weighted_sum'][:top_k].tolist()
|
evocodebench_data_51
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# scikit-learn, Copyright (c) 2007-2010 David Cournapeau, Fabian Pedregosa, Olivier
# Grisel Licensed under BSD 3 clause.
from collections.abc import Callable, Iterator
from enum import Enum
from functools import wraps
import numpy as np
import numpy.typing as npt
import pandas as pd
import sklearn as sk
import sklearn.base as skb
__all__ = [
"AutoEnum",
"cached_property_slots",
"cache_method",
"input_to_array",
"args_names",
"format_measure",
"bisection",
"safe_split",
"fit_single_estimator",
"fit_and_predict",
"deduplicate_names",
"default_asset_names",
"check_estimator",
]
GenericAlias = type(list[int])
class AutoEnum(str, Enum):
"""Base Enum class used in `skfolio`"""
@staticmethod
def _generate_next_value_(
name: str, start: int, count: int, last_values: any
) -> str:
"""Overriding `auto()`"""
return name.lower()
@classmethod
def has(cls, value: str) -> bool:
"""Check if a value is in the Enum.
Parameters
----------
value : str
Input value.
Returns
-------
x : bool
True if the value is in the Enum, False otherwise.
"""
return value in cls._value2member_map_
def __repr__(self) -> str:
"""Representation of the Enum"""
return self.name
# noinspection PyPep8Naming
class cached_property_slots:
"""Cached property decorator for slots"""
def __init__(self, func):
self.func = func
self.public_name = None
self.private_name = None
self.__doc__ = func.__doc__
def __set_name__(self, owner, name):
self.public_name = name
self.private_name = f"_{name}"
def __get__(self, instance, owner=None):
if instance is None:
return self
if self.private_name is None:
raise TypeError(
"Cannot use cached_property instance without calling __set_name__"
" on it."
)
try:
value = getattr(instance, self.private_name)
except AttributeError:
value = self.func(instance)
setattr(instance, self.private_name, value)
return value
def __set__(self, instance, owner=None):
raise AttributeError(
f"'{type(instance).__name__}' object attribute '{self.public_name}' is"
" read-only"
)
__class_getitem__ = classmethod(GenericAlias)
def _make_key(args, kwds) -> int:
"""Make a cache key from optionally typed positional and keyword arguments"""
key = args
if kwds:
for item in kwds.items():
key += item
return hash(key)
def cache_method(cache_name: str) -> Callable:
"""Decorator that caches class methods results into a class dictionary.
Parameters
----------
cache_name : str
Name of the dictionary class attribute.
Returns
-------
func : Callable
Decorating function that caches class methods.
"""
# To avoid memory leakage and proper garbage collection, self should not be part of
# the cache key.
# This is a known issue when we use functools.lru_cache on class methods.
def decorating_function(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
func_name = method.__name__
key = _make_key(args, kwargs)
try:
cache = getattr(self, cache_name)
except AttributeError:
raise AttributeError(
"You first need to create a dictionary class attribute named "
f"'{cache_name}'"
) from None
if not isinstance(cache, dict):
raise AttributeError(
f"'The cache named '{cache_name}' must be a "
f"dictionary, got {type(cache)}"
)
if func_name not in cache:
cache[func_name] = {}
c = cache[func_name]
if key not in c:
c[key] = method(self, *args, **kwargs)
return c[key]
return wrapper
return decorating_function
def args_names(func: object) -> list[str]:
"""Returns the argument names of a function.
Parameters
----------
func : object
Function.
Returns
-------
args : list[str]
The list of function arguments.
"""
return [
v for v in func.__code__.co_varnames[: func.__code__.co_argcount] if v != "self"
]
def check_estimator(
estimator: skb.BaseEstimator | None, default: skb.BaseEstimator, check_type: any
):
"""Check the estimator type and returns its cloned version it provided, otherwise
return the default estimator.
Parameters
----------
estimator : BaseEstimator, optional
Estimator.
default : BaseEstimator
Default estimator to return when `estimator` is `None`.
check_type : any
Expected type of the estimator to check against.
Returns
-------
estimator: Estimator
The checked estimator or the default.
"""
if estimator is None:
return default
if not isinstance(estimator, check_type):
raise TypeError(f"Expected type {check_type}, got {type(estimator)}")
return sk.clone(estimator)
def input_to_array(
items: dict | npt.ArrayLike,
n_assets: int,
fill_value: any,
dim: int,
assets_names: np.ndarray | None,
name: str,
) -> np.ndarray:
"""Convert a collection of items (array-like or dictionary) into
a numpy array and verify its shape.
Parameters
----------
items : np.ndarray | dict | list
Items to verify and convert to array.
n_assets : int
Expected number of assets.
Used to verify the shape of the converted array.
fill_value : any
When `items` is a dictionary, elements that are not in `asset_names` are filled
with `fill_value` in the converted array.
dim : int
Dimension of the final array.
Possible values are `1` or `2`.
assets_names : ndarray, optional
Asset names used when `items` is a dictionary.
name : str
Name of the items used for error messages.
Returns
-------
values : ndarray of shape (n_assets) for dim=1 or (n_groups, n_assets) for dim=2
Converted array.
"""
if dim not in [1, 2]:
raise ValueError(f"dim must be 1 or 2, got {dim}")
if isinstance(items, dict):
if assets_names is None:
raise ValueError(
f"If `{name}` is provided as a dictionary, you must input `X` as a"
" DataFrame with assets names in columns"
)
if dim == 1:
arr = np.array([items.get(asset, fill_value) for asset in assets_names])
else:
# add assets and convert dict to ordered array
arr = {}
for asset in assets_names:
elem = items.get(asset)
if elem is None:
elem = [asset]
elif np.isscalar(elem):
elem = [asset, elem]
else:
elem = [asset, *elem]
arr[asset] = elem
arr = (
pd.DataFrame.from_dict(arr, orient="index")
.loc[assets_names]
.to_numpy()
.T
)
else:
arr = np.asarray(items)
if arr.ndim != dim:
raise ValueError(f"`{name}` must be a {dim}D array, got a {arr.ndim}D array")
if not isinstance(fill_value, str) and np.isnan(arr).any():
raise ValueError(f"`{name}` contains NaN")
if arr.shape[-1] != n_assets:
if dim == 1:
s = "(n_assets,)"
else:
s = "(n_groups, n_assets)"
raise ValueError(
f"`{name}` must be a of shape {s} with n_assets={n_assets}, "
f"got {arr.shape[0]}"
)
return arr
def format_measure(x: float, percent: bool = False) -> str:
"""Format a measure number into a user-friendly string.
Parameters
----------
x : float
Number to format.
percent : bool, default=False
If this is set to True, the number is formatted in percentage.
Returns
-------
formatted : str
Formatted string.
"""
if np.isnan(x):
return str(x)
if percent:
xn = x * 100
f = "%"
else:
xn = x
f = "f"
if xn == 0:
n = 0
else:
n = min(6, max(int(-np.log10(abs(xn))) + 2, 2))
return "{value:{fmt}}".format(value=x, fmt=f".{n}{f}")
def bisection(x: list[np.ndarray]) -> Iterator[list[np.ndarray, np.ndarray]]:
"""Generator to bisect a list of array.
Parameters
----------
x : list[ndarray]
A list of array.
Yields
------
arr : Iterator[list[ndarray, ndarray]]
Bisected array.
"""
for e in x:
n = len(e)
if n > 1:
mid = n // 2
yield [e[0:mid], e[mid:n]]
def safe_indexing(
X: npt.ArrayLike | pd.DataFrame, indices: npt.ArrayLike | None, axis: int = 0
):
"""
Return rows, items or columns of X using indices.
Parameters
----------
X : array-like
Data from which to sample rows.
indices : array-like, optional
Indices of rows or columns.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
subset :
Subset of X on axis 0.
"""
if indices is None:
return X
if hasattr(X, "iloc"):
return X.take(indices, axis=axis)
if axis == 0:
return X[indices]
return X[:, indices]
def safe_split(
X: npt.ArrayLike,
y: npt.ArrayLike | None = None,
indices: np.ndarray | None = None,
axis: int = 0,
):
"""Create subset of dataset.
Slice X, y according to indices for cross-validation.
Parameters
----------
X : array-like
Data to be indexed.
y : array-like
Data to be indexed.
indices : ndarray of int, optional
Rows or columns to select from X and y.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
X_subset : array-like
Indexed data.
y_subset : array-like
Indexed targets.
"""
X_subset = safe_indexing(X, indices=indices, axis=axis)
if y is not None:
y_subset = safe_indexing(y, indices=indices, axis=axis)
else:
y_subset = None
return X_subset, y_subset
def fit_single_estimator(
estimator: any,
X: npt.ArrayLike,
y: npt.ArrayLike | None = None,
indices: np.ndarray | None = None,
axis: int = 0,
):
"""function used to fit an estimator within a job.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_observations, n_assets)
The data to fit.
y : array-like of shape (n_observations, n_targets), optional
The target array if provided.
indices : ndarray of int, optional
Rows or columns to select from X and y.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
fitted_estimator : estimator
The fitted estimator.
"""
X, y = safe_split(X, y, indices=indices, axis=axis)
estimator.fit(X, y)
return estimator
def fit_and_predict(
estimator: any,
X: npt.ArrayLike,
y: npt.ArrayLike | None,
train: np.ndarray,
test: np.ndarray | list[np.ndarray],
fit_params: dict,
method: str,
column_indices: np.ndarray | None = None,
) -> npt.ArrayLike | list[npt.ArrayLike]:
"""Fit the estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_observations, n_assets)
The data to fit.
y : array-like of shape (n_observations, n_factors) or None
The factor array if provided
train : ndarray of int of shape (n_train_observations,)
Indices of training samples.
test : ndarray of int of shape (n_test_samples,) or list of ndarray
Indices of test samples or list of indices.
fit_params : dict
Parameters that will be passed to ``estimator.fit``.
method : str
Invokes the passed method name of the passed estimator.
column_indices : ndarray, optional
Indices of columns to select.
The default (`None`) is to select all columns.
Returns
-------
predictions : array-like or list of array-like
If `test` is an array, it returns the array-like result of calling
'estimator.method' on `test`.
Otherwise, if `test` is a list of arrays, it returns the list of array-like
results of calling 'estimator.method' on each test set in `test`.
"""
fit_params = fit_params if fit_params is not None else {}
X, y = safe_split(X, y, indices=column_indices, axis=1)
X_train, y_train = safe_split(X, y, indices=train, axis=0)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
if isinstance(test, list):
predictions = []
for t in test:
X_test, _ = safe_split(X, indices=t, axis=0)
predictions.append(func(X_test))
else:
X_test, _ = safe_split(X, indices=test, axis=0)
predictions = func(X_test)
return predictions
def default_asset_names(n_assets: int) -> np.ndarray:
"""Default asset names are `["x0", "x1", ..., "x(n_assets - 1)"]`
Parameters
----------
n_assets : int
Number of assets.
Returns
-------
asset_names : ndarray of str
Default assets names.
"""
return np.asarray([f"x{i}" for i in range(n_assets)], dtype=object)
def deduplicate_names(names: npt.ArrayLike) -> list[str]:
"""Rename duplicated names by appending "_{duplicate_nb}" at the end.
This function is inspired by the pandas function `_maybe_dedup_names`.
Parameters
----------
names : array-like of shape (n_names,)
List of names.
Returns
-------
names : list[str]
Deduplicate names.
"""
names = list(names)
counts = {}
for i, col in enumerate(names):
cur_count = counts.get(col, 0)
if cur_count > 0:
names[i] = f"{col}_{cur_count}"
counts[col] = cur_count + 1
return names
|
evocodebench_data_52
|
import concurrent
import json
import logging
import os
import random
import shutil
import signal
import tempfile
import traceback
import types
from abc import abstractmethod
from dataclasses import dataclass
from multiprocessing import Process, Queue
from pathlib import Path
from queue import Empty
from time import sleep, time
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
from urllib import parse
import numpy as np
import torch
from tqdm.auto import tqdm as _tqdm
from litdata.constants import (
_BOTO3_AVAILABLE,
_DEFAULT_FAST_DEV_RUN_ITEMS,
_INDEX_FILENAME,
_IS_IN_STUDIO,
_LIGHTNING_CLOUD_LATEST,
_TORCH_GREATER_EQUAL_2_1_0,
)
from litdata.processing.readers import BaseReader, StreamingDataLoaderReader
from litdata.processing.utilities import _create_dataset
from litdata.streaming import Cache
from litdata.streaming.cache import Dir
from litdata.streaming.client import S3Client
from litdata.streaming.dataloader import StreamingDataLoader
from litdata.streaming.resolver import _resolve_dir
from litdata.utilities.broadcast import broadcast_object
from litdata.utilities.packing import _pack_greedily
if _TORCH_GREATER_EQUAL_2_1_0:
from torch.utils._pytree import tree_flatten, tree_unflatten, treespec_loads
if _LIGHTNING_CLOUD_LATEST:
from lightning_cloud.openapi import V1DatasetType
if _BOTO3_AVAILABLE:
import botocore
logger = logging.Logger(__name__)
def _get_num_nodes() -> int:
"""Returns the number of nodes."""
return int(os.getenv("DATA_OPTIMIZER_NUM_NODES", 1))
def _get_node_rank() -> int:
"""Returns the current node rank of the instance."""
return int(os.getenv("DATA_OPTIMIZER_NODE_RANK", 0))
def _get_fast_dev_run() -> int:
"""Returns whether fast dev mode is enabled."""
return bool(int(os.getenv("DATA_OPTIMIZER_FAST_DEV_RUN", 1)))
def _get_default_cache() -> str:
return "/cache" if _IS_IN_STUDIO else tempfile.gettempdir()
def _get_cache_dir(name: Optional[str] = None) -> str:
"""Returns the cache directory used by the Cache to store the chunks."""
cache_dir = os.getenv("DATA_OPTIMIZER_CACHE_FOLDER", f"{_get_default_cache()}/chunks")
if name is None:
return cache_dir
return os.path.join(cache_dir, name.lstrip("/"))
def _get_cache_data_dir(name: Optional[str] = None) -> str:
"""Returns the cache data directory used by the DataProcessor workers to download the files."""
cache_dir = os.getenv("DATA_OPTIMIZER_DATA_CACHE_FOLDER", f"{_get_default_cache()}/data")
if name is None:
return os.path.join(cache_dir)
return os.path.join(cache_dir, name.lstrip("/"))
def _wait_for_file_to_exist(s3: S3Client, obj: parse.ParseResult, sleep_time: int = 2) -> Any:
"""This function check."""
while True:
try:
return s3.client.head_object(Bucket=obj.netloc, Key=obj.path.lstrip("/"))
except botocore.exceptions.ClientError as e:
if "the HeadObject operation: Not Found" in str(e):
sleep(sleep_time)
else:
raise e
def _wait_for_disk_usage_higher_than_threshold(input_dir: str, threshold_in_gb: int = 25, sleep_time: int = 3) -> None:
usage = shutil.disk_usage(input_dir)
while (usage.free / 1000 / 1000 / 1000) <= threshold_in_gb:
sleep(sleep_time)
usage = shutil.disk_usage(input_dir)
return
def _download_data_target(input_dir: Dir, cache_dir: str, queue_in: Queue, queue_out: Queue) -> None:
"""This function is used to download data from a remote directory to a cache directory to optimise reading."""
s3 = S3Client()
while True:
# 2. Fetch from the queue
r: Optional[Tuple[int, List[str]]] = queue_in.get()
# 3. Terminate the process if we received a termination signal
if r is None:
queue_out.put(None)
return
# 4. Unpack
index, paths = r
# 5. Check whether all the files are already downloaded
if input_dir.path and all(
os.path.exists(p.replace(input_dir.path, cache_dir) if input_dir else p) for p in paths
):
queue_out.put(index)
continue
if input_dir.url is not None or input_dir.path is not None:
if input_dir.url:
# 6. Wait for the removers to catch up when we are downloading data.
_wait_for_disk_usage_higher_than_threshold("/", 25)
# 7. Download all the required paths to unblock the current index
for path in paths:
if input_dir.path:
local_path = path.replace(input_dir.path, cache_dir)
if input_dir.url and input_dir.path:
path = path.replace(input_dir.path, input_dir.url)
obj = parse.urlparse(path)
if obj.scheme == "s3":
dirpath = os.path.dirname(local_path)
os.makedirs(dirpath, exist_ok=True)
with open(local_path, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif os.path.isfile(path):
if not path.startswith("/teamspace/studios/this_studio"):
os.makedirs(os.path.dirname(local_path), exist_ok=True)
shutil.copyfile(path, local_path)
else:
raise ValueError(f"The provided {input_dir.url} isn't supported.")
# 7. Inform the worker the current files are available
queue_out.put(index)
def _remove_target(input_dir: Dir, cache_dir: str, queue_in: Queue) -> None:
"""This function is used to delete files from the cache directory to minimise disk space."""
while True:
# 1. Collect paths
paths = queue_in.get()
# 2. Terminate the process if we received a termination signal
if paths is None:
return
# 3. Iterate through the paths and delete them sequentially.
for path in paths:
if input_dir:
if not path.startswith(cache_dir) and input_dir.path is not None:
path = path.replace(input_dir.path, cache_dir)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path) and "s3_connections" not in path:
os.remove(path)
def _upload_fn(upload_queue: Queue, remove_queue: Queue, cache_dir: str, output_dir: Dir) -> None:
"""This function is used to upload optimised chunks from a local to remote dataset directory."""
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if obj.scheme == "s3":
s3 = S3Client()
while True:
data: Optional[Union[str, Tuple[str, str]]] = upload_queue.get()
tmpdir = None
if isinstance(data, str) or data is None:
local_filepath = data
else:
tmpdir, local_filepath = data
# Terminate the process if we received a termination signal
if local_filepath is None:
return
# Upload the file to the target cloud storage
if not local_filepath.startswith(cache_dir):
local_filepath = os.path.join(cache_dir, local_filepath)
if obj.scheme == "s3":
try:
if tmpdir is None:
output_filepath = os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
else:
output_filepath = os.path.join(str(obj.path).lstrip("/"), local_filepath.replace(tmpdir, "")[1:])
s3.client.upload_file(
local_filepath,
obj.netloc,
output_filepath,
)
except Exception as e:
print(e)
elif output_dir.path:
if tmpdir is None:
output_filepath = os.path.join(output_dir.path, os.path.basename(local_filepath))
else:
output_filepath = os.path.join(output_dir.path, local_filepath.replace(tmpdir, "")[1:])
os.makedirs(os.path.dirname(output_filepath), exist_ok=True)
shutil.move(local_filepath, output_filepath)
else:
raise ValueError(f"The provided {output_dir.path} isn't supported.")
# Inform the remover to delete the file
if remove_queue and os.path.exists(local_filepath):
remove_queue.put([local_filepath])
def _map_items_to_workers_sequentially(num_workers: int, user_items: List[Any]) -> List[List[Any]]:
from typing import List, Any
import os
total_nodes = _get_num_nodes()
node_rank = _get_node_rank()
total_workers = total_nodes * num_workers
items_per_worker = len(user_items) // total_workers
extra_items = len(user_items) % total_workers
start = 0
result = []
for i in range(total_workers):
worker_items = items_per_worker + 1 if i < extra_items else items_per_worker
end = start + worker_items
result.append(user_items[start:end])
start = end
if len(result) != num_workers:
raise RuntimeError("Improper assignment of items to workers")
return result
def _map_items_to_workers_weighted(
num_workers: int,
user_items: List[Any],
weights: Optional[List[int]] = None,
file_size: bool = True,
) -> List[List[Any]]:
# Associate the items to the workers based on number of nodes and node rank.
weights = [1] * len(user_items) if weights is None else weights
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
world_size = num_nodes * num_workers
worker_items, worker_weights = _pack_greedily(items=user_items, weights=weights, num_bins=world_size)
worker_ids_this_node = range(node_rank * num_workers, (node_rank + 1) * num_workers)
for worker_id, size in worker_weights.items():
if worker_id not in worker_ids_this_node:
continue
if file_size:
print(f"Worker {worker_id} gets {size / 1e6:.1f} MB ({len(worker_items[worker_id])} files)")
else:
print(f"Worker {worker_id} gets ({len(worker_items[worker_id])}) items for a total weight of {size}.")
return [np.random.permutation(worker_items[worker_id]).tolist() for worker_id in worker_ids_this_node]
def _get_num_bytes(item: Any, base_path: str) -> int:
flattened_item, _ = tree_flatten(item)
num_bytes = 0
for element in flattened_item:
if isinstance(element, str):
element = Path(element).resolve()
if not element.exists():
continue
file_bytes = os.path.getsize(element)
if file_bytes == 0:
raise RuntimeError(f"The file {element} has 0 bytes!")
num_bytes += file_bytes
return num_bytes
def _get_item_filesizes(items: List[Any], base_path: str = "") -> List[int]:
"""Computes the total size in bytes of all file paths for every datastructure in the given list."""
item_sizes = []
cpu_count = os.cpu_count() or 1
# Parallelize to accelerate retrieving the number of file bytes to read for each item
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count * 2 if cpu_count > 4 else cpu_count) as executor:
futures = [executor.submit(_get_num_bytes, item, base_path) for item in items]
for future in futures:
item_sizes.append(future.result())
return item_sizes
def _to_path(element: str) -> str:
return element if _IS_IN_STUDIO and element.startswith("/teamspace") else str(Path(element).resolve())
def _is_path(input_dir: Optional[str], element: Any) -> bool:
if not isinstance(element, str):
return False
if _IS_IN_STUDIO and input_dir is not None:
if element.startswith(input_dir):
return True
element = str(Path(element).absolute())
if element.startswith(input_dir):
return True
return os.path.exists(element)
class BaseWorker:
def __init__(
self,
worker_index: int,
num_workers: int,
node_rank: int,
data_recipe: "DataRecipe",
input_dir: Dir,
output_dir: Dir,
items: List[Any],
progress_queue: Queue,
error_queue: Queue,
stop_queue: Queue,
num_downloaders: int,
num_uploaders: int,
remove: bool,
reader: Optional[BaseReader] = None,
) -> None:
"""The BaseWorker is responsible to process the user data."""
self.worker_index = worker_index
self.num_workers = num_workers
self.node_rank = node_rank
self.data_recipe = data_recipe
self.input_dir = input_dir
self.output_dir = output_dir
self.items = items
self.num_items = len(self.items)
self.num_downloaders = num_downloaders
self.num_uploaders = num_uploaders
self.remove = remove
self.reader = reader
self.paths: List[List[str]] = []
self.remover: Optional[Process] = None
self.downloaders: List[Process] = []
self.uploaders: List[Process] = []
self.to_download_queues: List[Queue] = []
self.to_upload_queues: List[Queue] = []
self.stop_queue = stop_queue
self.ready_to_process_queue: Queue = Queue()
self.remove_queue: Queue = Queue()
self.progress_queue: Queue = progress_queue
self.error_queue: Queue = error_queue
self._counter = 0
self._last_time = time()
self._index_counter = 0
def run(self) -> None:
try:
self._setup()
self._loop()
except Exception:
traceback_format = traceback.format_exc()
print(traceback_format)
self.error_queue.put(traceback_format)
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is done.")
def _setup(self) -> None:
self._set_environ_variables()
self._create_cache()
self._collect_paths()
self._start_downloaders()
self._start_uploaders()
self._start_remover()
def _loop(self) -> None:
num_downloader_finished = 0
while True:
index = self.ready_to_process_queue.get()
if index is None:
num_downloader_finished += 1
if num_downloader_finished == self.num_downloaders:
print(f"Worker {str(_get_node_rank() * self.num_workers + self.worker_index)} is terminating.")
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe_end()
if self.output_dir.url if self.output_dir.url else self.output_dir.path:
# Inform the uploaders they are doing working
for i in range(self.num_uploaders):
self.to_upload_queues[i].put(None)
# Wait for them all to be finished
for uploader in self.uploaders:
uploader.join()
if self.remove:
assert self.remover
self.remove_queue.put(None)
self.remover.join()
if self.progress_queue:
self.progress_queue.put((self.worker_index, self._counter))
return
continue
if isinstance(self.data_recipe, DataChunkRecipe):
self._handle_data_chunk_recipe(index)
else:
self._handle_data_transform_recipe(index)
self._counter += 1
# Don't send the last progress update, so the main thread awaits for the uploader and remover
if self.progress_queue and (time() - self._last_time) > 1 and self._counter < (self.num_items - 2):
self.progress_queue.put((self.worker_index, self._counter))
self._last_time = time()
if self.remove and self.input_dir.path is not None and self.reader is None:
self.remove_queue.put(self.paths[index])
try:
self.stop_queue.get(timeout=0.0001)
return
except Empty:
pass
def _set_environ_variables(self) -> None:
# set the optimizer global rank and world_size
os.environ["DATA_OPTIMIZER_GLOBAL_RANK"] = str(_get_node_rank() * self.num_workers + self.worker_index)
os.environ["DATA_OPTIMIZER_NUM_WORKERS"] = str(self.num_workers)
def _create_cache(self) -> None:
self.cache_data_dir = _get_cache_data_dir()
os.makedirs(self.cache_data_dir, exist_ok=True)
self.cache_chunks_dir = _get_cache_dir()
os.makedirs(self.cache_chunks_dir, exist_ok=True)
if isinstance(self.data_recipe, DataTransformRecipe):
return
self.cache = Cache(
self.cache_chunks_dir,
chunk_bytes=self.data_recipe.chunk_bytes,
chunk_size=self.data_recipe.chunk_size,
compression=self.data_recipe.compression,
)
self.cache._reader._rank = _get_node_rank() * self.num_workers + self.worker_index
def _try_upload(self, data: Optional[Union[str, Tuple[str, str]]]) -> None:
if not data or (self.output_dir.url if self.output_dir.url else self.output_dir.path) is None:
return
if isinstance(data, str):
assert os.path.exists(data), data
else:
assert os.path.exists(data[-1]), data
self.to_upload_queues[self._counter % self.num_uploaders].put(data)
def _collect_paths(self) -> None:
if self.input_dir.path is None or self.reader is not None:
for index in range(len(self.items)):
self.ready_to_process_queue.put(index)
for _ in range(self.num_downloaders):
self.ready_to_process_queue.put(None)
return
items = []
for item in self.items:
flattened_item, spec = tree_flatten(item)
# For speed reasons, we assume starting with `self.input_dir` is enough to be a real file.
# Other alternative would be too slow.
# TODO: Try using dictionary for higher accurary.
indexed_paths = {
index: _to_path(element)
for index, element in enumerate(flattened_item)
if _is_path(self.input_dir.path, element)
}
if len(indexed_paths) == 0:
raise ValueError(
f"The provided item {item} didn't contain any filepaths. The input_dir is {self.input_dir.path}."
)
paths = []
for index, path in indexed_paths.items():
paths.append(path)
if self.input_dir and not self.input_dir.path.startswith("/teamspace/studios/this_studio"):
path = path.replace(self.input_dir.path, self.cache_data_dir)
flattened_item[index] = path
self.paths.append(paths)
items.append(tree_unflatten(flattened_item, spec))
self.items = items
def _start_downloaders(self) -> None:
if self.input_dir.path is None or self.reader is not None:
return
for _ in range(self.num_downloaders):
to_download_queue: Queue = Queue()
p = Process(
target=_download_data_target,
args=(
self.input_dir,
self.cache_data_dir,
to_download_queue,
self.ready_to_process_queue,
),
)
p.start()
self.downloaders.append(p)
self.to_download_queues.append(to_download_queue)
for index, paths in enumerate(self.paths):
self.to_download_queues[index % self.num_downloaders].put((index, paths))
for downloader_index in range(self.num_downloaders):
self.to_download_queues[downloader_index].put(None)
def _start_remover(self) -> None:
if not self.remove:
return
self.remover = Process(
target=_remove_target,
args=(
self.input_dir,
self.cache_data_dir,
self.remove_queue,
),
)
self.remover.start()
def _start_uploaders(self) -> None:
if self.output_dir.path is None and self.output_dir.url is None:
return
for _ in range(self.num_uploaders):
to_upload_queue: Queue = Queue()
p = Process(
target=_upload_fn,
args=(
to_upload_queue,
self.remove_queue,
self.cache_chunks_dir,
self.output_dir,
),
)
p.start()
self.uploaders.append(p)
self.to_upload_queues.append(to_upload_queue)
def _handle_data_chunk_recipe(self, index: int) -> None:
try:
current_item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data_or_generator = self.data_recipe.prepare_item(current_item)
if isinstance(item_data_or_generator, types.GeneratorType):
for item_data in item_data_or_generator:
if item_data is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data)
self._try_upload(chunk_filepath)
self._index_counter += 1
elif item_data_or_generator is not None:
chunk_filepath = self.cache._add_item(self._index_counter, item_data_or_generator)
self._try_upload(chunk_filepath)
self._index_counter += 1
except Exception as e:
raise RuntimeError(f"Failed processing {self.items[index]}") from e
def _handle_data_chunk_recipe_end(self) -> None:
chunks_filepaths = self.cache.done()
if chunks_filepaths and len(self.to_upload_queues):
for i, chunk_filepath in enumerate(chunks_filepaths):
if isinstance(chunk_filepath, str) and os.path.exists(chunk_filepath):
self.to_upload_queues[i % self.num_uploaders].put(chunk_filepath)
def _handle_data_transform_recipe(self, index: int) -> None:
# Don't use a context manager to avoid deleting files that are being uploaded.
output_dir = tempfile.mkdtemp()
item = self.items[index] if self.reader is None else self.reader.read(self.items[index])
item_data = self.data_recipe.prepare_item(item, str(output_dir), len(self.items) - 1 == index)
if item_data is not None:
raise ValueError(
"When using a `DataTransformRecipe`, the `prepare_item` shouldn't return anything."
" Simply store your files under the output_dir."
)
filepaths = []
for directory, _, filenames in os.walk(output_dir):
for filename in filenames:
filepaths.append(os.path.join(directory, filename))
for filepath in filepaths:
self._try_upload((output_dir, filepath))
class DataWorkerProcess(BaseWorker, Process):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""The DataWorkerProcess is responsible to process the user data inside processes."""
BaseWorker.__init__(self, *args, **kwargs)
Process.__init__(self)
@dataclass
class _Result:
size: Optional[int] = None
num_bytes: Optional[str] = None
data_format: Optional[str] = None
compression: Optional[str] = None
num_chunks: Optional[int] = None
num_bytes_per_chunk: Optional[List[int]] = None
T = TypeVar("T")
class DataRecipe:
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
pass
@abstractmethod
def prepare_item(self, *args: Any, **kwargs: Any) -> Any:
pass
def __init__(self) -> None:
self._name: Optional[str] = None
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
return _Result(size=size)
class DataChunkRecipe(DataRecipe):
def __init__(
self,
chunk_size: Optional[int] = None,
chunk_bytes: Optional[Union[int, str]] = None,
compression: Optional[str] = None,
):
super().__init__()
if chunk_size is not None and chunk_bytes is not None:
raise ValueError("Either one of the `chunk_size` or the `chunk_bytes` need to be provided.")
self.chunk_size = chunk_size
self.chunk_bytes = 1 << 26 if chunk_size is None else chunk_bytes
self.compression = compression
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T) -> Any:
"""The return of this `prepare_item` method is persisted in chunked binary files."""
def _done(self, size: int, delete_cached_files: bool, output_dir: Dir) -> _Result:
num_nodes = _get_num_nodes()
cache_dir = _get_cache_dir()
chunks = [file for file in os.listdir(cache_dir) if file.endswith(".bin")]
if chunks and delete_cached_files and output_dir.path is not None:
raise RuntimeError(f"All the chunks should have been deleted. Found {chunks}")
merge_cache = Cache(cache_dir, chunk_bytes=1)
node_rank = _get_node_rank()
merge_cache._merge_no_wait(node_rank if num_nodes > 1 else None)
self._upload_index(output_dir, cache_dir, num_nodes, node_rank)
if num_nodes == node_rank + 1:
with open(os.path.join(cache_dir, _INDEX_FILENAME)) as f:
config = json.load(f)
size = sum([c["dim"] if c["dim"] is not None else c["chunk_size"] for c in config["chunks"]])
num_bytes = sum([c["chunk_bytes"] for c in config["chunks"]])
if config["config"] is not None:
data_format = tree_unflatten(
config["config"]["data_format"], treespec_loads(config["config"]["data_spec"])
)
else:
data_format = None
num_chunks = len(config["chunks"])
# The platform can't store more than 1024 entries.
# Note: This isn't really used right now, so it is fine to skip if too big.
num_bytes_per_chunk = [c["chunk_size"] for c in config["chunks"]] if num_chunks < 1024 else []
return _Result(
size=size,
num_bytes=num_bytes,
data_format=data_format,
compression=config["config"]["compression"] if config["config"] else None,
num_chunks=len(config["chunks"]),
num_bytes_per_chunk=num_bytes_per_chunk,
)
return _Result(
size=size,
)
def _upload_index(self, output_dir: Dir, cache_dir: str, num_nodes: int, node_rank: Optional[int]) -> None:
"""This method upload the index file to the remote cloud directory."""
if output_dir.path is None and output_dir.url is None:
return
obj = parse.urlparse(output_dir.url if output_dir.url else output_dir.path)
if num_nodes > 1:
local_filepath = os.path.join(cache_dir, f"{node_rank}-{_INDEX_FILENAME}")
else:
local_filepath = os.path.join(cache_dir, _INDEX_FILENAME)
if obj.scheme == "s3":
s3 = S3Client()
s3.client.upload_file(
local_filepath, obj.netloc, os.path.join(str(obj.path).lstrip("/"), os.path.basename(local_filepath))
)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(local_filepath, os.path.join(output_dir.path, os.path.basename(local_filepath)))
if num_nodes == 1 or node_rank is None:
return
# Merge the index files generated by each node.
# Note: When using the Data Optimizer, they should be a single process on each node executing this section
# So no risk to get race conditon.
if num_nodes == node_rank + 1:
# Get the index file locally
for node_rank in range(num_nodes - 1):
output_dir_path = output_dir.url if output_dir.url else output_dir.path
assert output_dir_path
remote_filepath = os.path.join(output_dir_path, f"{node_rank}-{_INDEX_FILENAME}")
node_index_filepath = os.path.join(cache_dir, os.path.basename(remote_filepath))
if obj.scheme == "s3":
obj = parse.urlparse(remote_filepath)
_wait_for_file_to_exist(s3, obj)
with open(node_index_filepath, "wb") as f:
s3.client.download_fileobj(obj.netloc, obj.path.lstrip("/"), f)
elif output_dir.path and os.path.isdir(output_dir.path):
shutil.copyfile(remote_filepath, node_index_filepath)
merge_cache = Cache(cache_dir, chunk_bytes=1)
merge_cache._merge_no_wait()
self._upload_index(output_dir, cache_dir, 1, None)
class DataTransformRecipe(DataRecipe):
@abstractmethod
def prepare_structure(self, input_dir: Optional[str]) -> List[T]:
"""Return the structure of your data.
Each element should contain at least a filepath.
"""
@abstractmethod
def prepare_item(self, item_metadata: T, output_dir: str, is_last: bool) -> None:
"""Use your item metadata to process your files and save the file outputs into `output_dir`."""
class DataProcessor:
def __init__(
self,
input_dir: Union[str, Dir],
output_dir: Optional[Union[str, Dir]] = None,
num_workers: Optional[int] = None,
num_downloaders: Optional[int] = None,
num_uploaders: Optional[int] = None,
delete_cached_files: bool = True,
fast_dev_run: Optional[Union[bool, int]] = None,
random_seed: Optional[int] = 42,
reorder_files: bool = True,
weights: Optional[List[int]] = None,
reader: Optional[BaseReader] = None,
):
"""The `DatasetOptimiser` provides an efficient way to process data across multiple machine into chunks to make
training faster.
Arguments:
input_dir: The path to where the input data are stored.
output_dir: The path to where the output data are stored.
num_workers: The number of worker threads to use.
num_downloaders: The number of file downloaders to use.
num_uploaders: The number of file uploaders to use.
delete_cached_files: Whether to delete the cached files.
fast_dev_run: Whether to run a quick dev run.
random_seed: The random seed to be set before shuffling the data.
reorder_files: By default, reorders the files by file size to distribute work equally among all workers.
Set this to ``False`` if the order in which samples are processed should be preserved.
weights: Provide a list of weights associated to the inputs.
This is used to evenly split the work among the workers.
reader: Map the inputs to worker inputs and provides a read method to read a slice of the data.
"""
self.input_dir = _resolve_dir(input_dir)
self.output_dir = _resolve_dir(output_dir)
self.num_workers = num_workers or (1 if fast_dev_run else (os.cpu_count() or 1) * 4)
self.num_downloaders = num_downloaders or 2
self.num_uploaders = num_uploaders or 5
self.delete_cached_files = delete_cached_files
self.fast_dev_run = _get_fast_dev_run() if fast_dev_run is None else fast_dev_run
self.workers: Any = []
self.workers_tracker: Dict[int, int] = {}
self.progress_queue: Optional[Queue] = None
self.error_queue: Queue = Queue()
self.stop_queues: List[Queue] = []
self.reorder_files = reorder_files
self.weights = weights
self.reader = reader
if self.reader is not None and self.weights is not None:
raise ValueError("Either the reader or the weights needs to be defined.")
# Ensure the input dir is the same across all nodes
self.input_dir = broadcast_object("input_dir", self.input_dir)
if self.output_dir:
# Ensure the output dir is the same across all nodes
self.output_dir = broadcast_object("output_dir", self.output_dir)
print(f"Storing the files under {self.output_dir.path}")
self.random_seed = random_seed
def run(self, data_recipe: DataRecipe) -> None:
"""The `DataProcessor.run(...)` method triggers the data recipe processing over your dataset."""
if not isinstance(data_recipe, DataRecipe):
raise ValueError("The provided value should be a data recipe.")
t0 = time()
print(f"Setup started with fast_dev_run={self.fast_dev_run}.")
# Force random seed to be fixed
random.seed(self.random_seed)
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Call the setup method of the user
user_items: List[Any] = data_recipe.prepare_structure(self.input_dir.path if self.input_dir else None)
if not isinstance(user_items, (list, StreamingDataLoader)):
raise ValueError("The `prepare_structure` should return a list of item metadata.")
if isinstance(user_items, StreamingDataLoader):
self.reader = StreamingDataLoaderReader(user_items)
if self.reader:
user_items = self.reader.remap_items(user_items, self.num_workers)
if self.weights is not None:
if len(self.weights) != len(user_items):
raise ValueError("The provided weights length should match the inputs' length.")
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=self.weights, file_size=False
)
elif self.reorder_files and self.input_dir.path:
# TODO: Only do this on node 0, and broadcast the item sizes to the other nodes.
item_sizes = _get_item_filesizes(user_items, base_path=self.input_dir.path)
workers_user_items = _map_items_to_workers_weighted(
num_workers=self.num_workers, user_items=user_items, weights=item_sizes
)
else:
workers_user_items = _map_items_to_workers_sequentially(num_workers=self.num_workers, user_items=user_items)
print(f"Setup finished in {round(time() - t0, 3)} seconds. Found {len(user_items)} items to process.")
if self.fast_dev_run:
items_to_keep = self.fast_dev_run if type(self.fast_dev_run) is int else _DEFAULT_FAST_DEV_RUN_ITEMS
workers_user_items = [w[:items_to_keep] for w in workers_user_items]
print(f"Fast dev run is enabled. Limiting to {items_to_keep} items per process.")
num_items = sum([len(items) for items in workers_user_items])
self._cleanup_cache()
print(f"Starting {self.num_workers} workers with {num_items} items.")
if self.input_dir is None and self.src_resolver is not None and self.input_dir:
self.input_dir = self.src_resolver(self.input_dir)
print(f"The remote_dir is `{self.input_dir}`.")
signal.signal(signal.SIGINT, self._signal_handler)
self._create_process_workers(data_recipe, workers_user_items)
print("Workers are ready ! Starting data processing...")
current_total = 0
has_failed = False
pbar = _tqdm(
desc="Progress",
total=num_items,
smoothing=0,
position=-1,
mininterval=1,
leave=True,
dynamic_ncols=True,
)
while True:
try:
error = self.error_queue.get(timeout=0.001)
self._exit_on_error(error)
except Empty:
assert self.progress_queue
try:
index, counter = self.progress_queue.get(timeout=0.001)
except Empty:
continue
self.workers_tracker[index] = counter
new_total = sum(self.workers_tracker.values())
pbar.update(new_total - current_total)
current_total = new_total
if current_total == num_items:
break
# Exit early if all the workers are done.
# This means there were some kinda of errors.
if all(not w.is_alive() for w in self.workers):
has_failed = True
break
pbar.close()
num_nodes = _get_num_nodes()
node_rank = _get_node_rank()
# TODO: Understand why it hangs.
if num_nodes == 1:
for w in self.workers:
w.join(0)
print("Workers are finished.")
result = data_recipe._done(len(user_items), self.delete_cached_files, self.output_dir)
if num_nodes == node_rank + 1 and self.output_dir.url and _IS_IN_STUDIO:
assert self.output_dir.path
_create_dataset(
input_dir=self.input_dir.path,
storage_dir=self.output_dir.path,
dataset_type=V1DatasetType.CHUNKED
if isinstance(data_recipe, DataChunkRecipe)
else V1DatasetType.TRANSFORMED,
empty=False,
size=result.size,
num_bytes=result.num_bytes,
data_format=result.data_format,
compression=result.compression,
num_chunks=result.num_chunks,
num_bytes_per_chunk=result.num_bytes_per_chunk,
)
print("Finished data processing!")
# TODO: Understand why it is required to avoid long shutdown.
if _get_num_nodes() > 1:
os._exit(int(has_failed))
def _exit_on_error(self, error: str) -> None:
for w in self.workers:
w.join(0)
raise RuntimeError(f"We found the following error {error}.")
def _create_process_workers(self, data_recipe: DataRecipe, workers_user_items: List[List[Any]]) -> None:
self.progress_queue = Queue()
workers: List[DataWorkerProcess] = []
stop_queues: List[Queue] = []
for worker_idx, worker_user_items in enumerate(workers_user_items):
stop_queues.append(Queue())
worker = DataWorkerProcess(
worker_idx,
self.num_workers,
_get_node_rank(),
data_recipe,
self.input_dir,
self.output_dir,
worker_user_items,
self.progress_queue,
self.error_queue,
stop_queues[-1],
self.num_downloaders,
self.num_uploaders,
self.delete_cached_files,
self.reader,
)
worker.start()
workers.append(worker)
# Note: Don't store within the loop as weakref aren't serializable
self.workers = workers
self.stop_queues = stop_queues
def _signal_handler(self, signal: Any, frame: Any) -> None:
"""On temrination, we stop all the processes to avoid leaking RAM."""
for stop_queue in self.stop_queues:
stop_queue.put(None)
for w in self.workers:
w.join(0)
os._exit(0)
def _cleanup_cache(self) -> None:
cache_dir = _get_cache_dir()
# Cleanup the cache dir folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir, ignore_errors=True)
os.makedirs(cache_dir, exist_ok=True)
cache_data_dir = _get_cache_data_dir()
# Cleanup the cache data folder to avoid corrupted files from previous run to be there.
if os.path.exists(cache_data_dir):
shutil.rmtree(cache_data_dir, ignore_errors=True)
os.makedirs(cache_data_dir, exist_ok=True)
|
evocodebench_data_53
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating step functions (piecewise-constant 1D functions).
We have a shared naming and dimension convention for these functions.
All input/output step functions are assumed to be aligned along the last axis.
`t` always indicates the x coordinates of the *endpoints* of a step function.
`y` indicates unconstrained values for the *bins* of a step function
`w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin
values that *integrate* to <= 1.
"""
from internal import linspline
from internal import math
from internal import utils
import jax
import jax.numpy as jnp
import numpy as np
def query(tq, t, y, left=None, right=None):
"""Query step function (t, y) at locations tq. Edges repeat by default."""
utils.assert_valid_stepfun(t, y)
# Query the step function to recover the interval value.
(i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu())
# Apply boundary conditions.
left = y[Ellipsis, :1] if left is None else left
right = y[Ellipsis, -1:] if right is None else right
yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq)
return yq
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
def pdf_to_weight(t, p):
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
utils.assert_valid_stepfun(t, p)
return p * jnp.diff(t)
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
def invert_cdf(u, t, w_logits):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
utils.assert_valid_stepfun(t, w_logits)
# Compute the PDF and CDF for each weight vector.
w = jax.nn.softmax(w_logits, axis=-1)
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu())
return t_new
def sample(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
eps=jnp.finfo(jnp.float32).eps,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
eps: float, something like numerical epsilon.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
# Draw uniform samples.
if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples)
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
)
return invert_cdf(u, t, w_logits)
def sample_intervals(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-jnp.inf, jnp.inf),
):
"""Sample *intervals* (rather than points) from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of intervals to sample.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
domain: (minval, maxval), the range of valid values for `t`.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
if num_samples <= 1:
raise ValueError(f'num_samples must be > 1, is {num_samples}.')
# Sample a set of points from the step function.
centers = sample(
rng, t, w_logits, num_samples, single_jitter, deterministic_center=True
)
# The intervals we return will span the midpoints of each adjacent sample.
mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2
# Each first/last fencepost is the reflection of the first/last midpoint
# around the first/last sampled center.
first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1]
last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:]
samples = jnp.concatenate([first, mid, last], axis=-1)
# We clamp to the limits of the input domain, provided by the caller.
samples = jnp.clip(samples, *domain)
return samples
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
def weighted_percentile(t, w, ps):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
utils.assert_valid_stepfun(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
jnp.array(ps) / 100, cw, t
)
return wprctile
def resample(t, tp, vp, use_avg=False):
"""Resample a step function defined by (tp, vp) into intervals t.
Notation roughly matches jnp.interp. Resamples by summation by default.
Args:
t: tensor with shape (..., n+1), the endpoints to resample into.
tp: tensor with shape (..., m+1), the endpoints of the step function being
resampled.
vp: tensor with shape (..., m), the values of the step function being
resampled.
use_avg: bool, if False, return the sum of the step function for each
interval in `t`. If True, return the average, weighted by the width of
each interval in `t`.
Returns:
v: tensor with shape (..., n), the values of the resampled step function.
"""
utils.assert_valid_stepfun(tp, vp)
if use_avg:
wp = jnp.diff(tp)
v_numer = resample(t, tp, vp * wp, use_avg=False)
v_denom = resample(t, tp, wp, use_avg=False)
v = math.safe_div(v_numer, v_denom)
return v
acc = jnp.cumsum(vp, axis=-1)
acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1)
acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
t, tp, acc0
)
v = jnp.diff(acc0_resampled, axis=-1)
return v
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
|
evocodebench_data_54
|
import logging
import re
from collections import Counter
from collections import defaultdict
from . import formatter
from . import line_parser
from . import patterns
from nlm_ingestor.ingestor_utils import spell_utils
from nlm_ingestor.ingestor_utils.utils import sent_tokenize
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
su = spell_utils.SpellUtil()
def stem(line):
line = line.replace("'s", "")
line = line.replace("’s", "")
return line
def check_parentheses(text):
count = 0
for i in text:
if i == "(":
count += 1
elif i == ")":
count -= 1
return count == 0
def nlm_tokenize(line):
# print(line)
tokens = []
if not line:
line = ""
line = line.lower()
trans_table = line.maketrans("-/", " ")
line = line.translate(trans_table)
line = line.translate(str.maketrans("", "", "�\\(*,.?•\\➢ƒ–\\)'\"—"))
# line = patterns.num_unit.sub(r"100 \1", line)
line = patterns.num_unit.sub(r"", line)
line = stem(line)
words = line.split()
for word in words:
if (
not word.isdigit()
and not word.endswith("%")
and not word.startswith("$")
and not word.endswith("$")
):
tokens.append(word)
if len(tokens) == 0:
tokens.append("unknown")
return tokens
# make sure that there is at least one word which is greater than two characters
def find_floating_chars(line):
words = line.split(" ")
for word in words:
if len(word) > 2:
return False
return True
def is_table_row(line):
line = line_parser.Line(line)
return line.is_table_row
def should_skip(line, xml=False):
return len(line) <= 2 if not xml else len(line) == 0
def clean_lines(lines, xml=False):
result = []
running_line = ""
line_buffer = []
line_type = "para"
header_block_idx = -1
block_idx = 0
line_set = set()
for line_str in lines:
# print(line_str)
line_str = clean_line(line_str)
if should_skip(line_str, xml=xml):
continue
line_without_numbers = re.sub(r"\d+", "", line_str)
if line_without_numbers in line_set:
continue
else:
line_set.add(line_without_numbers)
curr_line = line_parser.Line(line_str)
# this converst strings like 'e x e c u t i v e summary' to 'executive summary'
if not xml and curr_line.has_spaced_characters:
line_str = fix_spaced_characters(line_str)
curr_line = line_parser.Line(line_str)
if len(line_buffer) > 0:
# find out if previous line was a discontinous line
prev_line = line_buffer[-1]
logger.debug("========")
logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n")
logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n")
# keep connecting lines as long as they seem incomplete
is_incomplete = prev_line.incomplete_line or (
len(line_buffer) > 1 and not prev_line.ends_with_period
)
logger.debug(
f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}",
)
if (
is_incomplete
and not (curr_line.is_list_or_row or curr_line.line_type == "list_item")
) or curr_line.continuing_line:
logger.debug("connecting..")
running_line = formatter.connect(running_line, curr_line.text)
line_buffer.append(curr_line)
# if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers
if not line_type == "list_item":
line_type = "para"
else: # commit the line and start a new line
# remove different types of bulletted list (for better formatting) but do not touch numbered line
logger.debug("starting new line..")
# if line_type == "list_item":
# running_line = running_line[1:].lstrip()
if line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
block_idx = block_idx + 1
running_line = curr_line.text
line_buffer = [curr_line]
line_type = curr_line.line_type
logger.debug("========")
else:
running_line = curr_line.text
line_type = curr_line.line_type
line_buffer = [curr_line]
if line_type == "list_item" and running_line[0] in "�\\*,.?•\\➢ƒ–\\'\"—":
running_line = running_line[1:].lstrip()
block = {
"block_idx": block_idx,
"block_text": running_line,
"block_type": line_type,
"text_group_start_idx": -1,
"block_list": [],
"header_block_idx": header_block_idx,
"level": 0,
}
result.append(block)
return result
def line_list_check(prev_line, curr_line, list_char):
# if prev_line is list_item and list_char matches curr_line
if list_char == curr_line.text[0] and list_char not in ["”", "'", '"', "("]:
return True
# same char is alpha
if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha():
if len(prev_line.text) >= 2 and prev_line.text[1].isupper():
# spell check first word
first_word = prev_line.text.split(" ")[0]
first_word = first_word.replace("'", "")
correct_word = su.segment(first_word)
if first_word[1:] == correct_word:
return True
# same char is not alpha but not digit
if prev_line.text[0] == curr_line.text[0] and not (
prev_line.text[0].isalpha()
or prev_line.text[0].isdigit()
or list_char not in ["”", "'", '"', "("]
):
return True
return False
def should_join_table(prev_line, curr_line, ents_aligned):
"""
Check if next line should be joined as a tr. This makes no assumption if the current line is a table
"""
# print()
# print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list)
# check list of spaced words
curr_line_ents = len(prev_line.visual_line.text_list)
next_line_ents = len(curr_line.visual_line.text_list)
ent_match = (
curr_line_ents == next_line_ents and curr_line_ents >= 2
) # tr should have at least two elements
# print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count)
tab_match = (
prev_line.visual_line.tab_count == curr_line.visual_line.tab_count
and curr_line.visual_line.tab_count > 0
)
# casing should also be the same
same_case = (
prev_line.text[0].islower() == curr_line.text[0].islower()
or prev_line.text[0].isupper() == curr_line.text[0].isupper()
)
colon_check = (
prev_line.hit_colon
and curr_line.hit_colon
and prev_line
and same_case
and not prev_line.incomplete_line
)
# if prev_line.hit_colon and curr_line.hit_colon:
# print()
# print("colon check")
# print(prev_line.visual_line.text_list)
# print(curr_line.visual_line.text_list)
# col_check
# print(tab_match, ent_match, colon_check)
tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count
return (
(tab_match and ent_match)
or colon_check
or (ents_aligned and ent_match and tab_check)
)
def check_page_spacing(prev_line, curr_line, spacing_dict):
# print("^"*50)
# print("checking page stats")
# print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text)
# print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text)
# print()
diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y)
# find best fs reference
prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs}
curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs}
same_fs = prev_line_fs.intersection(curr_line_fs)
fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs
min_check = (
spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None
)
max_check = (
spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None
)
normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3
if min_check or normal_check or max_check:
# get all fs in spacing dict
# see if the diff top is a min
# print("checking space dict")
distance_list = []
for val in spacing_dict:
if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2:
distance_list.append((val, val[1]))
# print(distance_list)
val = min(distance_list) if len(distance_list) else []
if len(val):
join_fs, join_top = val[0]
if len(val):
join_fs, join_top = val[0]
if val[0] == (fs, diff_top): # or close
# print("SHOULDJOIN")
return True
elif (
join_fs == fs
and ((diff_top - 1) == join_top)
or ((diff_top + 1) == join_top)
):
return True
return False
def compute_overlap(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
divide_by_min=True,
) -> float:
"""
Computes the % of intersection (overlap) of two lines w.r.t. the shortest line
"""
width_x0 = abs(end_x0 - start_x0)
width_x1 = abs(end_x1 - start_x1)
if start_x0 <= start_x1 <= end_x0:
intersect = min(abs(end_x0 - start_x1), width_x1)
elif start_x0 <= end_x1 <= end_x0:
intersect = min(abs(end_x1 - start_x0), width_x1)
elif start_x1 <= start_x0 <= end_x0 <= end_x1:
intersect = abs(end_x0 - start_x0)
else:
intersect = 0.0
if divide_by_min:
intersect /= min(width_x0, width_x1) + 1e-5
else:
intersect /= max(width_x0, width_x1) + 1e-5
return intersect
def compute_overlap_top_bottom(
start_x0: float,
end_x0: float,
start_x1: float,
end_x1: float,
) -> float:
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
width_x1 = abs(end_x1 - start_x1)
if width_x1 == 0:
return 0.0
if start_x0 <= start_x1:
# measure from left to right
if end_x1 <= end_x0:
# if start and end both less, full in subset
return 1.0
return (end_x1 - start_x0) / width_x1
else:
# measure from bottom start
if end_x1 <= start_x0:
return 0.0
return (end_x1 - start_x0) / width_x1
def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1):
"""
This is different from the above function.
Finds percentage overlap of top to bottom.
Score of 100% is possible doesn't reference the shortest line
"""
# print(start_x0, end_x0)
# print(start_x1, end_x1)
if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line
# print()
# print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0))
return (end_x1 - start_x1) / (end_x0 - start_x0)
# other conditions
# elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line
# return
# else: #to the right of bottom line
return 1.0
# header check for lines with similar font
# header check for lines with similar font
def visual_header_check(prev_line, curr_line, same_font):
# check top overlap (small) if the font size is bigger
# print()
# print("visual_header check:")
# print("prev", prev_line.text)
# print("checking", curr_line.text)
# top also has to be higher
# print("prev_line.visual_line.start_y, prev_line.visual_line.end_y")
# print(prev_line.visual_line.start_y, prev_line.visual_line.end_y)
# print(prev_line.visual_line.start_y, curr_line.visual_line.start_y)
if prev_line.visual_line.wrapped_page:
return False
if prev_line.visual_line.start_y < curr_line.visual_line.start_y:
prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x
curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x
# print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x")
# print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x)
# print("curr_line.visual_line.min_x, curr_line.visual_line.max_x")
# print(curr_line.visual_line.min_x, curr_line.visual_line.max_x)
# print("prev_line_width / curr_line_width")
# print(prev_line_width / curr_line_width)
# print("prev_line_width, curr_line_width")
# print(prev_line_width, curr_line_width)
if curr_line_width == 0:
return False
# print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x))
if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x):
if round(prev_line_width) == round(curr_line_width):
# print()
# print("NOT A HEADER1")
return False
offset = 0
# print(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
# print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x)
if prev_line.visual_line.min_x <= curr_line.visual_line.min_x:
offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset
# print("(prev_line_width - offset) / curr_line_width")
# print((prev_line_width - offset) / curr_line_width)
overlap_percentage = (prev_line_width - offset) / curr_line_width
different_font_style = (
prev_line.visual_line.fw != curr_line.visual_line.fw
or prev_line.visual_line[1] != curr_line.visual_line[1]
or prev_line.visual_line.fs > curr_line.visual_line.fs
)
if (
overlap_percentage < 0.3
or (different_font_style and overlap_percentage < 0.6)
or (prev_line.line_type == "header" and different_font_style)
# or (prev_line.is_header and different_font_style)
):
# print("HEADER INDENT", prev_line.is_header)
# print("overlap rule::", (prev_line_width - offset) / curr_line_width)
# print(True)
return True
# print(False)
# print()
# print("NOT A HEADER")
return False
def visual_header_from_stats(prev_line, curr_line, page_stats):
prev_fs = prev_line.visual_line.fs
curr_fs = curr_line.visual_line.fs
median_val = round(page_stats["median_fs"])
max_val = round(max(page_stats["fs_list"]))
max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True
prev_fs_diff = round(prev_fs - median_val)
curr_fs_diff = (
round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8
) # curr_fs is the median
varied_set = len(set(page_stats["fs_list"])) >= 4
rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]])
unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"])
prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff
# print("prev_fs, curr_fs", prev_fs, curr_fs)
# print("unique text")
# print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) )
# print("visual_header check", len(set(page_stats["fs_list"])))
# print("varied_set", varied_set, "unique_text", unique_text)
# print(rounded_fs_count)
# print()
# close from max or far enough from median
bigger_text = max_val_diff or (
prev_curr_ratio_from_median > 2
) # TODO text must also be relatively uncommon
if varied_set and (unique_text <= 0.08):
if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3:
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
# header join
if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1):
# print(max_val_diff)
# print(prev_fs, prev_line.text)
# print(curr_fs, curr_line.text)
# print()
return True
return False
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
# def visual_clean_lines(lines, page_stats={}, page_info_dict={}):
def check_tr_alignment(prev_line, curr_line):
# print("-=" * 50)
# print("check_tr_alignment!")
# print(prev_line.text)
# print(curr_line.text)
# print()
prev_ents = len(prev_line.visual_line.text_list)
curr_ents = len(curr_line.visual_line.text_list)
prev_positions = prev_line.visual_line.start_x_list
curr_positions = curr_line.visual_line.start_x_list
prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent
curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent
# print(prev_line_start_ents)
# print(curr_line_start_ents)
same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1
if len(prev_line_start_ents) == len(curr_line_start_ents):
prev_positions = prev_line_start_ents
curr_positions = curr_line_start_ents
if len(prev_line_start_ents) == len(curr_positions) and len(
prev_line_start_ents,
) != len(
prev_positions,
): # joined p_tags
prev_positions = prev_line_start_ents
if not same_ents:
# print("check_tr_alignment False1")
# print(prev_ents, curr_ents)
return False
# print("CHECKING POSITIONS")
# print(prev_positions)
# print(curr_positions)
for p_x, c_x in zip(prev_positions, curr_positions):
p_x = round(p_x)
c_x = round(c_x)
if abs(p_x - c_x) > 100:
# print("False")
# print("check_tr_alignment False3")
return False
# print("check_tr_alignment True")
return True
def check_layout(prev_line, curr_line, prev_above_curr):
prev_line_width = range(
int(prev_line.visual_line.min_x),
int(prev_line.visual_line.max_x),
)
# weird edge case
if not prev_line_width:
prev_line_width = range(
int(prev_line.visual_line.max_x),
int(prev_line.visual_line.min_x),
)
curr_line_width = range(
int(curr_line.visual_line.min_x),
int(curr_line.visual_line.max_x),
)
prev_line_width = set(prev_line_width)
prev_curr_overlap = prev_line_width.intersection(curr_line_width)
if prev_curr_overlap and not prev_above_curr:
# print(prev_line.text)
# print(curr_line.text)
# print("misplaced text group")
# print()
return True
return False
def order_blocks(blocks):
block_group_dict = defaultdict(list)
for idx, block in enumerate(blocks):
# print(idx, "block-group", block["group_id"], block["block_type"], block['block_text'])
group_id = block["group_id"]
block_group_dict[group_id].append(block)
block_group_list = [] # list that holds tuples (group_id, y_pos)
for block_group_id in block_group_dict:
block_group_list.append(
(block_group_id, block_group_dict[block_group_id][0]["y"]),
) # append starting y position of group
block_group_list = sorted(
block_group_list,
key=lambda x: x[1],
) # sort block groups by y position
# get list of ordered block group keys
ordered_blocks = []
for block_group_id, y in block_group_list:
ordered_blocks += block_group_dict[block_group_id]
# for b in original_blocks:
# re-index blocks and headers based off of new ordering
header_idx = 0
for idx, block in enumerate(ordered_blocks):
block["block_idx"] = idx
if block["block_type"] == "header":
header_idx = idx
ordered_blocks[idx]["header_block_idx"] = header_idx
return ordered_blocks
def visual_clean_lines(
lines,
page_stats={},
page_info_dict={},
page_idx=0,
line_set={},
):
page_blocks = []
header_block_idx = -1
block_idx = 0
# block_idx = page_idx
style_dict = {}
join_font_spacing = False
prev_line = None
text_list = []
prev_ents = 0
curr_ents = 0
is_incomplete = False
colon_rule = False
text_group_start = True
text_group_start_idx = 0
prev_line = None
next_line = None
# for idx, line in enumerate(lines[12:14]):
sentence_visual_end = False
group_id = 0
for idx, line in enumerate(lines):
# print(idx)
line_str, style_dict, text_list = (
line["text"],
line["style"],
line["text_list"],
)
line_str = " ".join(line_str.split())
if should_skip(line_str):
continue
if line_str in line_set:
continue
if len(line_str.split()) > 8:
line_set.add(line_str)
curr_line = line_parser.Line(
line_str=line_str,
style_dict=style_dict,
text_list=text_list,
page_details=page_stats,
)
if prev_line is None:
# initialize memory of previous line.
# this will update with join decisions
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"list_char": list_char,
"fs": curr_line.visual_line.start_fs,
"text_group_start_idx": text_group_start_idx,
"block_list": curr_line.visual_line.text_list,
"line": curr_line,
"y": curr_line.visual_line.start_y,
"group_id": group_id,
}
prev_line = curr_line
block_idx += 1
# if (idx <= 3) or (idx >= len(lines) - 3):
# line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip()
# if line_without_numbers:
# # track block_idx for de-duplication
# line_set[line_without_numbers].append((page_idx, block_idx))
page_blocks.append(block)
continue
# print("--" * 50)
# print(prev_line.line_type, "\n", prev_line.text)
# print(prev_ents)
# print(prev_line.visual_line.fw_list)
# print(prev_line.visual_line.font_family)
# print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text)
# print(prev_line.visual_line.mode_fs)
# print(curr_line.line_type, "\n", curr_line.text)
# print(curr_ents)
# print()
# print(curr_line.visual_line.font_family)
# print(curr_line.visual_line.mode_fs)
# print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text)
if (
len(prev_line.text) > 1
and len(curr_line.text) > 1
and prev_line.text[:2] == curr_line.text[:2]
and prev_line.text[1] == " "
and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit())
and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha())
):
curr_line.line_type = "list_item"
curr_line.is_list_item = True
curr_line.is_list_or_row = True
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["block_type"] = "list_item"
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
same_start_fs = (
abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5
)
same_end_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5
)
same_end_start_fs = (
abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5
)
prev_above_curr = (
True
if prev_line.visual_line.end_y < curr_line.visual_line.start_y
else False
)
y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y
top_overlap = compute_overlap_top_bottom(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
bottom_overlap = compute_bottom_top_overlap(
start_x0=prev_line.visual_line.start_x,
end_x0=prev_line.visual_line.end_x,
start_x1=curr_line.visual_line.start_x,
end_x1=curr_line.visual_line.end_x,
)
prev_overlap_curr = True if bottom_overlap or top_overlap else False
use_visual_join = True if prev_above_curr and prev_overlap_curr else False
if not use_visual_join and prev_line.incomplete_line:
join_font_spacing = True
if not (prev_line.is_table_row or curr_line.is_table_row):
if page_stats["n_lines"] <= 3:
join_font_spacing = True
else:
join_font_spacing = check_page_spacing(
prev_line,
curr_line,
page_stats["fs_and_diff_next_y"],
)
# if the font is different and font-family is different
different_font_family = (
curr_line.visual_line.font_family != prev_line.visual_line.font_family
)
different_common_fs = (
prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs
and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs
)
different_font = (
different_font_family and different_common_fs and not join_font_spacing
)
# start and end characters are same font or the mode of fonts of both lines is the same
same_font = (
(prev_line.visual_line.fs == curr_line.visual_line.fs)
or (same_start_fs and same_end_fs)
or same_end_start_fs
or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs
) and not different_font
prev_ents = (
len(prev_line.visual_line.text_list)
if not prev_line.line_type == "list_item"
else 0
)
curr_ents = (
len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0
)
ents_aligned = check_tr_alignment(prev_line, curr_line)
is_incomplete_sent = (
prev_line.incomplete_line
and not prev_line.ends_with_period
or prev_line.ends_with_comma
)
# logic using line after curr
if idx + 1 < len(lines):
# this is inefficent as line_parser is called twice,
# once for next_line and once for curr_line.
next_line = lines[idx + 1]
# print("NEXT LINE\n", next_line['text'])
next_line_str, next_style_dict, next_text_list = (
next_line["text"],
next_line["style"],
next_line["text_list"],
)
next_line = line_parser.Line(
line_str=next_line_str,
style_dict=next_style_dict,
text_list=next_text_list,
page_details=page_stats,
)
# if the last line was not a table, check if the next line is a table to avoid single tr
if prev_line.line_type != "table_row" and not ents_aligned:
# check if the next line is a table and matches curr_line
next_line_tr = next_line.line_type == "table_row" or should_join_table(
curr_line,
next_line,
False,
)
if not next_line_tr and curr_line.line_type == "table_row":
curr_line.line_type = "para"
# if the next line is joinable by visual stats but prev and curr are not
# don't join the line (only true by x-span check and y is below for prev cur)
# if this is not true ignore the rule
prev_not_above_next = (
next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y
)
next_line_join = False
if next_line and check_layout(prev_line, next_line, prev_not_above_next):
next_line_join = check_page_spacing(
curr_line,
next_line,
page_stats["fs_and_diff_next_y"],
)
# if the prev line is not visually joinable and the curr_next is
# make sure the prev_line doesn't join the curr_line
curr_next_visual_join = not join_font_spacing and next_line_join
# print()
# print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line")
# print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line)
# print("join_font_spacing:,", join_font_spacing)
is_incomplete = (
is_incomplete_sent
or (join_font_spacing and not sentence_visual_end)
or curr_line.continuing_line
)
# print("is_incomplete", is_incomplete)
has_overlap_with_min = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=True,
)
> 0.7
)
is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0
is_visually_apart = (has_overlap_with_min and not is_below) or (
not has_overlap_with_min and is_below
)
above_bold_below_not = (
prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0
)
has_overlap_with_max = (
compute_overlap(
curr_line.visual_line.start_x,
curr_line.visual_line.end_x,
prev_line.visual_line.start_x,
prev_line.visual_line.end_x,
divide_by_min=False,
)
> 0.3
)
is_not_header_over_para = True
if (
above_bold_below_not
and not has_overlap_with_max
and prev_line.line_type == "header"
and not prev_line.incomplete_line
):
is_not_header_over_para = False
# print("header over para check")
# print("""above_bold_below_not
# and not has_overlap_with_max
# and prev_line.line_type == "header"
# """)
# print(above_bold_below_not)
# print(has_overlap_with_max, j)
# print(prev_line.line_type == "header")
# print()
# print(is_not_header_over_para)
###########
# List item
if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]):
prev_line.line_type = "list_item"
curr_line.line_type = "list_item"
curr_line.is_list_item = True
# change prev_line to list item
if page_blocks[-1]["block_type"] != "list_item":
page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0]
page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][
1:
].lstrip()
page_blocks[-1]["block_type"] = "list_item"
close_text_y = (
curr_line.visual_line.start_y
- curr_line.visual_line.mode_fs
- prev_line.visual_line.start_y
- prev_line.visual_line.mode_fs
) <= 0
aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x
title_text = False
if len(lines) < 10:
title_text = top_overlap == 1.0 and close_text_y and aligned_text
visual_header = visual_header_check(prev_line, curr_line, same_font)
list_item_rule = curr_line.has_list_char or (
curr_line.numbered_line
and not (
(prev_line.incomplete_line and curr_line.continuing_line)
or join_font_spacing
)
)
last_2_block_tr = False
if len(page_blocks) >= 2:
last_block_tr = (
page_blocks[-1]["block_type"] == "table_row"
and page_blocks[-2]["block_type"] == "table_row"
)
if not last_block_tr and curr_line.line_type == "para":
# check to join
if prev_line.incomplete_line and curr_line.continuing_line:
last_2_block_tr = True
no_space_join = prev_line.ends_with_period and curr_line.text[0] != " "
visual_header_by_stats = visual_header_from_stats(
prev_line,
curr_line,
page_stats,
)
header_join = False
common_list = curr_line.has_list_char or prev_line.has_list_char
if (
visual_header_by_stats
and curr_line.incomplete_line
and same_font
and not (prev_line.is_table_row or curr_line.is_table_row or common_list)
):
header_join = True
# print("LINEJOIN CHECK")
# print("positive\n", "*" * 10)
# print(f"\nsame_font:{same_font}",
# f"\nis_incomplete:{is_incomplete}",
# f"\nis_not_header_over_para:{is_not_header_over_para}")
# print("join_font_spacing", join_font_spacing)
# print("header join", header_join)
# print()
# print("negative\n", "*" * 10)
# print(f"\nis_visually_apart:{is_visually_apart}",
# f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}",
# f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}",
# f"\ncurr_line table {curr_line.line_type == 'table_row'}",
# f"\ncurr_line list {curr_line.is_list_item}",
# f"\nvisual_header {visual_header}",
# f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}')
if (
same_font
and not should_join_table(prev_line, curr_line, ents_aligned)
and not (curr_line.line_type == "table_row" or list_item_rule)
and not (prev_line.line_type == "table_row" and not last_2_block_tr)
and is_incomplete
and not curr_next_visual_join # is_visually_apart
and not visual_header
or not check_parentheses(prev_line.text)
and is_not_header_over_para
and not no_space_join
or title_text
or header_join
):
# print("JOIN")
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
if page_stats["n_lines"] <= 3:
page_blocks[-1]["block_type"] = "header"
elif (
not prev_line.line_type == "list_item"
): # and not curr_line.visual_line.is_header:
page_blocks[-1]["block_type"] = "para"
new_text = formatter.connect(
prev_line.text.rstrip(),
curr_line.text.lstrip(),
)
new_text_list = (
prev_line.visual_line.text_list + curr_line.visual_line.text_list
)
# print("Max ex min ex assignment")
max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x)
min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x)
prev_line_type = prev_line.line_type
page_blocks[-1]["block_text"] = new_text
prev_start_y = prev_line.visual_line.start_y
curr_start_y = curr_line.visual_line.start_y
prev_end_y = prev_line.visual_line.end_y
wrapped_page = prev_line.visual_line.wrapped_page
# pass the line parser attributes
prev_line = curr_line
# add appended text and text_list, preserve the line type
prev_line.text = new_text
prev_line.visual_line.start_y = prev_start_y
prev_line.visual_line.text_list = new_text_list
prev_line.line_type = prev_line_type
prev_line.visual_line.min_x = min_x
prev_line.visual_line.max_x = max_x
prev_line.visual_line.wrapped_page = wrapped_page
if curr_start_y < prev_end_y:
prev_line.visual_line.wrapped_page = True
# print(prev_start_y)
# print("Join")
# print()
# print("-" * 50)
# print()
# new block
else:
# print("NEW block")
# print("*" * 50)
if not is_visually_apart and bottom_overlap < 0.5:
# this would signify end of paragraph
sentence_visual_end = True
else:
sentence_visual_end = False
# print("-"*50)
colon_rule = (
prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents
)
# normal case
tab_check_join = {
prev_line.visual_line.tab_count_join,
prev_line.visual_line.tab_count,
} & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count}
tab_check = sum(tab_check_join) > 0
# print("-+" * 50)
# print("TAB POSITIONS")
# print(prev_line.text)
# print(prev_line.visual_line.start_x_list)
# print(prev_line.visual_line.start_x_list_single_ent)
# print(prev_line.visual_line.tab_count)
# print(prev_line.visual_line.tab_count_join)
#
# print(curr_line.text)
# print(curr_line.visual_line.start_x_list)
# print(curr_line.visual_line.start_x_list_single_ent)
# print(curr_line.visual_line.tab_count)
# print(curr_line.visual_line.tab_count_join)
# print("tabcheck", tab_check)
# print("ents_aligned", ents_aligned)
# print(prev_ents, curr_ents)
# print(curr_line.visual_line.text_list)
# print("-+" * 50)
if visual_header_by_stats and prev_line.line_type != "table_row":
page_blocks[-1]["block_type"] = "header"
elif (
colon_rule
and prev_ents == 1
and prev_line.line_type != "list_item"
and not (prev_line.incomplete_line and curr_line.continuing_line)
):
# print("Table Conversion")
# print()
# print("colon check")
# print(prev_line.text.split(":"))
# print(curr_line.text.split(":"))
# print("TR1")
new_text_list = prev_line.text.split(":")
new_text_list = [new_text_list[0] + ":", new_text_list[1:]]
page_blocks[-1]["block_type"] = "table_row"
page_blocks[-1]["block_list"]: new_text_list
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
curr_line.is_list_or_row = True
# print("Table Conversion!")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR3")
elif (
tab_check and ents_aligned and prev_line.line_type != "list_item"
) or (colon_rule and not prev_line.incomplete_line):
# print("Table Conversion")
# print(prev_ents, curr_ents)
# print(page_blocks[-1]["block_text"])
# print("TR2")
page_blocks[-1]["block_type"] = "table_row"
if text_group_start:
text_group_start = False
text_group_start_idx = page_blocks[-1]["block_idx"]
page_blocks[-1]["text_group_start_idx"] = text_group_start_idx
curr_line.line_type = "table_row"
else:
text_group_start = True
text_group_start_idx = -1
list_char = ""
if curr_line.line_type == "list_item":
list_char = curr_line.text[0]
curr_line.text = curr_line.text[1:].lstrip()
if curr_line.line_type == "header":
header_block_idx = block_idx
if (visual_header or visual_header_by_stats) and not (
prev_line.line_type == "list_item"
or prev_line.line_type == "numbered_list_item"
):
page_blocks[-1]["block_type"] = "header"
# print()
# print("*" * 40)
# print("NEW BLOCK")
# print()
# print("*" * 40)
# print(curr_line.line_type, curr_line.text)
# group attribute
if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0:
group_id += 1
block = {
"block_idx": block_idx,
"block_text": curr_line.text,
"block_type": curr_line.line_type,
"header_block_idx": header_block_idx,
"block_group": [curr_line.visual_line.text_list],
"text_group_start_idx": text_group_start_idx,
"list_char": list_char,
"group_id": group_id,
"fs": curr_line.visual_line.start_fs,
"x": curr_line.visual_line.start_x,
"y": curr_line.visual_line.start_y,
"line": curr_line,
"block_list": curr_line.visual_line.text_list,
}
# This is to account for when the headers get false positive #TODO improve header code
prev_text = page_blocks[-1]["block_text"]
if page_blocks[-1]["block_type"] == "header" and (
len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16
):
page_blocks[-1]["block_type"] = "para"
prev_line = curr_line
block_idx += 1
page_blocks.append(block)
# not too many blocks there may be title text missed
if len(page_blocks) <= 2:
for idx, block in enumerate(page_blocks):
if "." not in block["block_text"] and len(block["block_text"].split()) < 10:
page_blocks[idx]["block_type"] = "header"
page_blocks = order_blocks(page_blocks)
return page_blocks, line_set
def clean_line(line):
line = line.replace("\n", " ")
line = line.replace("\t", " ")
line = line.strip()
return line
def fix_spaced_characters(line_text):
line_text = re.sub(r"\s+", "", line_text)
return su.segment(line_text)
def connect(prev, curr):
has_space = prev.endswith(" ")
result = prev + ("" if has_space else " ") + curr
return result
def get_numbers(line):
# test = re.compile(r"[0-9]+\.?[0-9]?")
regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$")
return regex.search(line)
def check_block_join(prev_block, block):
prev_text = prev_block["block_text"]
curr_text = block["block_text"]
blocks_are_paras = (
prev_block["block_type"] == "para" and block["block_type"] == "para"
)
if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras:
prev_line = line_parser.Line(prev_block["block_text"])
curr_line = line_parser.Line(block["block_text"])
if prev_line.incomplete_line or curr_line.continuing_line:
return True
return False
def join_blocks(page_blocks, blocks):
prev_last_block = page_blocks[-1][-1]
# update page blocks and blocks
# prev_blocks = page_blocks[-1]
# last_prev_block = prev_blocks[-1]
# check to join last_prev_block with first blocks[0]
# if it's a join, pop the block and join, subtract block indexes
prev_last_block["block_text"] = (
prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip()
)
prev_last_block["block_list"].append(blocks[0]["block_list"])
# print(prev_block)
page_blocks[-1][-1] = prev_last_block
for block in blocks[1:]:
block["block_idx"] -= 1
return page_blocks, blocks[1:]
|
evocodebench_data_55
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# Riskfolio-Lib, Copyright (c) 2020-2023, Dany Cajas, Licensed under BSD 3 clause.
# Statsmodels, Copyright (C) 2006, Jonathan E. Taylor, Licensed under BSD 3 clause.
from enum import auto
import numpy as np
import scipy.cluster.hierarchy as sch
import scipy.optimize as sco
import scipy.spatial.distance as scd
import scipy.special as scs
from scipy.sparse import csr_matrix
from skfolio.utils.tools import AutoEnum
__all__ = [
"NBinsMethod",
"n_bins_freedman",
"n_bins_knuth",
"is_cholesky_dec",
"assert_is_square",
"assert_is_symmetric",
"assert_is_distance",
"cov_nearest",
"cov_to_corr",
"corr_to_cov",
"commutation_matrix",
"compute_optimal_n_clusters",
"rand_weights",
"rand_weights_dirichlet",
]
class NBinsMethod(AutoEnum):
"""Enumeration of the Number of Bins Methods
Parameters
----------
FREEDMAN : str
Freedman method
KNUTH : str
Knuth method
"""
FREEDMAN = auto()
KNUTH = auto()
def n_bins_freedman(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using the Freedman-Diaconis rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "On the histogram as a density estimator: L2 theory".
Freedman & Diaconis (1981).
"""
if x.ndim != 1:
raise ValueError("`x` must be a 1d-array")
n = len(x)
p_25, p_75 = np.percentile(x, [25, 75])
d = 2 * (p_75 - p_25) / (n ** (1 / 3))
if d == 0:
return 5
n_bins = max(1, np.ceil((np.max(x) - np.min(x)) / d))
return int(round(n_bins))
def n_bins_knuth(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using Knuth's rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "Optimal Data-Based Binning for Histograms".
Knuth.
"""
x = np.sort(x)
n = len(x)
def func(y: float):
y = y[0]
if y <= 0:
return np.inf
bin_edges = np.linspace(x[0], x[-1], int(y) + 1)
hist, _ = np.histogram(x, bin_edges)
return -(
n * np.log(y)
+ scs.gammaln(0.5 * y)
- y * scs.gammaln(0.5)
- scs.gammaln(n + 0.5 * y)
+ np.sum(scs.gammaln(hist + 0.5))
)
n_bins_init = n_bins_freedman(x)
n_bins = sco.fmin(func, n_bins_init, disp=0)[0]
return int(round(n_bins))
def rand_weights_dirichlet(n: int) -> np.array:
"""Produces n random weights that sum to one from a dirichlet distribution
(uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
return np.random.dirichlet(np.ones(n))
def rand_weights(n: int, zeros: int = 0) -> np.array:
"""Produces n random weights that sum to one from an uniform distribution
(non-uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
zeros : int, default=0
The number of weights to randomly set to zeros.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
k = np.random.rand(n)
if zeros > 0:
zeros_idx = np.random.choice(n, zeros, replace=False)
k[zeros_idx] = 0
return k / sum(k)
def is_cholesky_dec(x: np.ndarray) -> bool:
"""Returns True if Cholesky decomposition can be computed.
The matrix must be Hermitian (symmetric if real-valued) and positive-definite.
No checking is performed to verify whether the matrix is Hermitian or not.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if Cholesky decomposition can be applied to the matrix, False otherwise.
"""
# Around 100 times faster than checking for positive eigenvalues with np.linalg.eigh
try:
np.linalg.cholesky(x)
return True
except np.linalg.linalg.LinAlgError:
return False
def is_positive_definite(x: np.ndarray) -> bool:
"""Returns True if the matrix is positive definite.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if if the matrix is positive definite, False otherwise.
"""
return np.all(np.linalg.eigvals(x) > 0)
def assert_is_square(x: np.ndarray) -> None:
"""Raises an error if the matrix is not square.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is not square.
"""
if x.ndim != 2 or x.shape[0] != x.shape[1]:
raise ValueError("The matrix must be square")
def assert_is_symmetric(x: np.ndarray) -> None:
"""Raises an error if the matrix is not symmetric.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Raises
------
ValueError: if the matrix is not symmetric.
"""
assert_is_square(x)
if not np.allclose(x, x.T):
raise ValueError("The matrix must be symmetric")
def assert_is_distance(x: np.ndarray) -> None:
"""Raises an error if the matrix is not a distance matrix.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is a distance matrix.
"""
assert_is_symmetric(x)
if not np.allclose(np.diag(x), np.zeros(x.shape[0]), atol=1e-5):
raise ValueError(
"The distance matrix must have diagonal elements close to zeros"
)
def cov_to_corr(cov: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Convert a covariance matrix to a correlation matrix.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
Returns
-------
corr, std : tuple[ndarray of shape (n, n), ndarray of shape (n, )]
Correlation matrix and standard-deviation vector
"""
if cov.ndim != 2:
raise ValueError(f"`cov` must be a 2D array, got a {cov.ndim}D array")
std = np.sqrt(np.diag(cov))
corr = cov / std / std[:, None]
return corr, std
def corr_to_cov(corr: np.ndarray, std: np.ndarray):
"""Convert a correlation matrix to a covariance matrix given its
standard-deviation vector.
Parameters
----------
corr : ndarray of shape (n, n)
Correlation matrix.
std : ndarray of shape (n, )
Standard-deviation vector.
Returns
-------
cov : ndarray of shape (n, n)
Covariance matrix
"""
if std.ndim != 1:
raise ValueError(f"`std` must be a 1D array, got a {std.ndim}D array")
if corr.ndim != 2:
raise ValueError(f"`corr` must be a 2D array, got a {corr.ndim}D array")
cov = corr * std * std[:, None]
return cov
_CLIPPING_VALUE = 1e-13
def cov_nearest(cov: np.ndarray, higham: bool = False, higham_max_iteration: int = 100):
"""Compute the nearest covariance matrix that is positive definite and with a
cholesky decomposition than can be computed. The variance is left unchanged.
First, it converts the covariance matrix to a correlation matrix.
Then, it finds the nearest correlation matrix and converts it back to a covariance
matrix using the initial standard deviation.
Cholesky decomposition can fail for symmetric positive definite (SPD) matrix due
to floating point error and inversely, Cholesky decomposition can success for
non-SPD matrix. Therefore, we need to test for both. We always start by testing
for Cholesky decomposition which is significantly faster than checking for positive
eigenvalues.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
higham : bool, default=False
If this is set to True, the Higham & Nick (2002) algorithm [1]_ is used,
otherwise the eigenvalues are clipped to threshold above zeros (1e-13).
The default (`False`) is to use the clipping method as the Higham & Nick
algorithm can be slow for large datasets.
higham_max_iteration : int, default=100
Maximum number of iteration of the Higham & Nick (2002) algorithm.
The default value is `100`.
Returns
-------
cov : ndarray
The nearest covariance matrix.
References
----------
.. [1] "Computing the nearest correlation matrix - a problem from finance"
IMA Journal of Numerical Analysis
Higham & Nick (2002)
"""
assert_is_square(cov)
assert_is_symmetric(cov)
# Around 100 times faster than checking eigenvalues with np.linalg.eigh
if is_cholesky_dec(cov) and is_positive_definite(cov):
return cov
corr, std = cov_to_corr(cov)
if higham:
eps = np.finfo(np.float64).eps * 5
diff = np.zeros(corr.shape)
x = corr.copy()
for _ in range(higham_max_iteration):
x_adj = x - diff
eig_vals, eig_vecs = np.linalg.eigh(x_adj)
x = eig_vecs * np.maximum(eig_vals, eps) @ eig_vecs.T
diff = x - x_adj
np.fill_diagonal(x, 1)
cov = corr_to_cov(x, std)
if is_cholesky_dec(cov) and is_positive_definite(cov):
break
else:
raise ValueError("Unable to find the nearest positive definite matrix")
else:
eig_vals, eig_vecs = np.linalg.eigh(corr)
# Clipping the eigenvalues with a value smaller than 1e-13 can cause scipy to
# consider the matrix non-psd is some corner cases (see test/test_stats.py)
x = eig_vecs * np.maximum(eig_vals, _CLIPPING_VALUE) @ eig_vecs.T
x, _ = cov_to_corr(x)
cov = corr_to_cov(x, std)
return cov
def commutation_matrix(x):
"""Compute the commutation matrix.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
K : ndarray of shape (m * n, m * n)
The commutation matrix.
"""
(m, n) = x.shape
row = np.arange(m * n)
col = row.reshape((m, n), order="F").ravel()
data = np.ones(m * n, dtype=np.int8)
k = csr_matrix((data, (row, col)), shape=(m * n, m * n))
return k
def compute_optimal_n_clusters(distance: np.ndarray, linkage_matrix: np.ndarray) -> int:
r"""Compute the optimal number of clusters based on Two-Order Difference to Gap
Statistic [1]_.
The Two-Order Difference to Gap Statistic has been developed to improve the
performance and stability of the Tibshiranis Gap statistic.
It applies the two-order difference of the within-cluster dispersion to replace the
reference null distribution in the Gap statistic.
The number of cluster :math:`k` is determined by:
.. math:: \begin{cases}
\begin{aligned}
&\max_{k} & & W_{k+2} + W_{k} - 2 W_{k+1} \\
&\text{s.t.} & & 1 \ge c \ge max\bigl(8, \sqrt{n}\bigr) \\
\end{aligned}
\end{cases}
with :math:`n` the sample size and :math:`W_{k}` the within-cluster dispersions
defined as:
.. math:: W_{k} = \sum_{i=1}^{k} \frac{D_{i}}{2|C_{i}|}
where :math:`|C_{i}|` is the cardinality of cluster :math:`i` and :math:`D_{i}` its
density defined as:
.. math:: D_{i} = \sum_{u \in C_{i}} \sum_{v \in C_{i}} d(u,v)
with :math:`d(u,v)` the distance between u and v.
Parameters
----------
distance : ndarray of shape (n, n)
Distance matrix.
linkage_matrix : ndarray of shape (n - 1, 4)
Linkage matrix.
Returns
-------
value : int
Optimal number of clusters.
References
----------
.. [1] "Application of two-order difference to gap statistic".
Yue, Wang & Wei (2009)
"""
cut_tree = sch.cut_tree(linkage_matrix)
n = cut_tree.shape[1]
max_clusters = max(8, round(np.sqrt(n)))
dispersion = []
for k in range(max_clusters):
level = cut_tree[:, n - k - 1]
cluster_density = []
for i in range(np.max(level) + 1):
cluster_idx = np.argwhere(level == i).flatten()
cluster_dists = scd.squareform(
distance[cluster_idx, :][:, cluster_idx], checks=False
)
if cluster_dists.shape[0] != 0:
cluster_density.append(np.nan_to_num(cluster_dists.mean()))
dispersion.append(np.sum(cluster_density))
dispersion = np.array(dispersion)
gaps = np.roll(dispersion, -2) + dispersion - 2 * np.roll(dispersion, -1)
gaps = gaps[:-2]
# k=0 represents one cluster
k = np.argmax(gaps) + 2
return k
|
evocodebench_data_56
|
from copy import deepcopy
from dataclasses import dataclass, field
from typing import Callable, Dict
from autorag.support import get_support_modules
@dataclass
class Module:
module_type: str
module_param: Dict
module: Callable = field(init=False)
def __post_init__(self):
self.module = get_support_modules(self.module_type)
if self.module is None:
raise ValueError(f"Module type {self.module_type} is not supported.")
@classmethod
def from_dict(cls, module_dict: Dict) -> 'Module':
_module_dict = deepcopy(module_dict)
module_type = _module_dict.pop('module_type')
module_params = _module_dict
return cls(module_type, module_params)
|
evocodebench_data_57
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import logging
import numpy as np
from typing import List, Union
import pycocotools.mask as mask_util
import torch
from PIL import Image
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
RotatedBoxes,
polygons_to_bitmask,
)
from detectron2.utils.file_io import PathManager
from . import transforms as T
from .catalog import MetadataCatalog
__all__ = [
"SizeMismatchError",
"convert_image_to_rgb",
"check_image_size",
"transform_proposals",
"transform_instance_annotations",
"annotations_to_instances",
"annotations_to_instances_rotated",
"build_augmentation",
"build_transform_gen",
"create_keypoint_hflip_indices",
"filter_empty_instances",
"read_image",
]
class SizeMismatchError(ValueError):
"""
When loaded image has difference width/height compared with annotation.
"""
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
# https://www.exiv2.org/tags.html
_EXIF_ORIENT = 274 # exif 'Orientation' tag
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def convert_image_to_rgb(image, format):
"""
Convert an image from given format to RGB.
Args:
image (np.ndarray or Tensor): an HWC image
format (str): the format of input image, also see `read_image`
Returns:
(np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
"""
if isinstance(image, torch.Tensor):
image = image.cpu().numpy()
if format == "BGR":
image = image[:, :, [2, 1, 0]]
elif format == "YUV-BT.601":
image = np.dot(image, np.array(_M_YUV2RGB).T)
image = image * 255.0
else:
if format == "L":
image = image[:, :, 0]
image = image.astype(np.uint8)
image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
return image
def _apply_exif_orientation(image):
"""
Applies the exif orientation correctly.
This code exists per the bug:
https://github.com/python-pillow/Pillow/issues/3973
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
various methods, especially `tobytes`
Function based on:
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
Args:
image (PIL.Image): a PIL image
Returns:
(PIL.Image): the PIL image with exif orientation applied, if applicable
"""
if not hasattr(image, "getexif"):
return image
try:
exif = image.getexif()
except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
exif = None
if exif is None:
return image
orientation = exif.get(_EXIF_ORIENT)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
return image.transpose(method)
return image
def read_image(file_name, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
Returns:
image (np.ndarray):
an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with PathManager.open(file_name, "rb") as f:
image = Image.open(f)
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = _apply_exif_orientation(image)
return convert_PIL_to_numpy(image, format)
def check_image_size(dataset_dict, image):
"""
Raise an error if the image does not match the size specified in the dict.
"""
if "width" in dataset_dict or "height" in dataset_dict:
image_wh = (image.shape[1], image.shape[0])
expected_wh = (dataset_dict["width"], dataset_dict["height"])
if not image_wh == expected_wh:
raise SizeMismatchError(
"Mismatched image shape{}, got {}, expect {}.".format(
" for image " + dataset_dict["file_name"]
if "file_name" in dataset_dict
else "",
image_wh,
expected_wh,
)
+ " Please check the width/height in your annotation."
)
# To ensure bbox always remap to original image size
if "width" not in dataset_dict:
dataset_dict["width"] = image.shape[1]
if "height" not in dataset_dict:
dataset_dict["height"] = image.shape[0]
def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
"""
Apply transformations to the proposals in dataset_dict, if any.
Args:
dataset_dict (dict): a dict read from the dataset, possibly
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
image_shape (tuple): height, width
transforms (TransformList):
proposal_topk (int): only keep top-K scoring proposals
min_box_size (int): proposals with either side smaller than this
threshold are removed
The input dict is modified in-place, with abovementioned keys removed. A new
key "proposals" will be added. Its value is an `Instances`
object which contains the transformed proposals in its field
"proposal_boxes" and "objectness_logits".
"""
if "proposal_boxes" in dataset_dict:
# Transform proposal boxes
boxes = transforms.apply_box(
BoxMode.convert(
dataset_dict.pop("proposal_boxes"),
dataset_dict.pop("proposal_bbox_mode"),
BoxMode.XYXY_ABS,
)
)
boxes = Boxes(boxes)
objectness_logits = torch.as_tensor(
dataset_dict.pop("proposal_objectness_logits").astype("float32")
)
boxes.clip(image_shape)
keep = boxes.nonempty(threshold=min_box_size)
boxes = boxes[keep]
objectness_logits = objectness_logits[keep]
proposals = Instances(image_shape)
proposals.proposal_boxes = boxes[:proposal_topk]
proposals.objectness_logits = objectness_logits[:proposal_topk]
dataset_dict["proposals"] = proposals
def transform_instance_annotations(
annotation, transforms, image_size, *, keypoint_hflip_indices=None
):
"""
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList or list[Transform]):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
if isinstance(transforms, (tuple, list)):
transforms = T.TransformList(transforms)
# bbox is 1d (per-instance bounding box)
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
# clip transformed bbox to image size
bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if "segmentation" in annotation:
# each instance contains 1 or more polygons
segm = annotation["segmentation"]
if isinstance(segm, list):
# polygons
polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
annotation["segmentation"] = [
p.reshape(-1) for p in transforms.apply_polygons(polygons)
]
elif isinstance(segm, dict):
# RLE
mask = mask_util.decode(segm)
mask = transforms.apply_segmentation(mask)
assert tuple(mask.shape[:2]) == image_size
annotation["segmentation"] = mask
else:
raise ValueError(
"Cannot transform segmentation of type '{}'!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict.".format(type(segm))
)
if "keypoints" in annotation:
keypoints = transform_keypoint_annotations(
annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
)
annotation["keypoints"] = keypoints
return annotation
def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
"""
Transform keypoint annotations of an image.
If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
Args:
keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
Each point is represented by (x, y, visibility).
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
When `transforms` includes horizontal flip, will use the index
mapping to flip keypoints.
"""
# (N*3,) -> (N, 3)
keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
keypoints_xy = transforms.apply_coords(keypoints[:, :2])
# Set all out-of-boundary points to "unlabeled"
inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
inside = inside.all(axis=1)
keypoints[:, :2] = keypoints_xy
keypoints[:, 2][~inside] = 0
# This assumes that HorizFlipTransform is the only one that does flip
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
# Alternative way: check if probe points was horizontally flipped.
# probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
# probe_aug = transforms.apply_coords(probe.copy())
# do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
# If flipped, swap each keypoint with its opposite-handed equivalent
if do_hflip:
if keypoint_hflip_indices is None:
raise ValueError("Cannot flip keypoints without providing flip indices!")
if len(keypoints) != len(keypoint_hflip_indices):
raise ValueError(
"Keypoint data has {} points, but metadata "
"contains {} points!".format(len(keypoints), len(keypoint_hflip_indices))
)
keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :]
# Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
keypoints[keypoints[:, 2] == 0] = 0
return keypoints
def annotations_to_instances(annos, image_size, mask_format="polygon"):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = (
np.stack(
[BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
)
if len(annos)
else np.zeros((0, 4))
)
target = Instances(image_size)
target.gt_boxes = Boxes(boxes)
classes = [int(obj["category_id"]) for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
try:
masks = PolygonMasks(segms)
except ValueError as e:
raise ValueError(
"Failed to use mask_format=='polygon' from the given annotations!"
) from e
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a binary segmentation mask "
" in a 2D numpy array of shape HxW.".format(type(segm))
)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "keypoints" in annos[0]:
kpts = [obj.get("keypoints", []) for obj in annos]
target.gt_keypoints = Keypoints(kpts)
return target
def annotations_to_instances_rotated(annos, image_size):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Compared to `annotations_to_instances`, this function is for rotated boxes only
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
Containing fields "gt_boxes", "gt_classes",
if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [obj["bbox"] for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = RotatedBoxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
return target
def filter_empty_instances(
instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False
):
"""
Filter out empty instances in an `Instances` object.
Args:
instances (Instances):
by_box (bool): whether to filter out instances with empty boxes
by_mask (bool): whether to filter out instances with empty masks
box_threshold (float): minimum width and height to be considered non-empty
return_mask (bool): whether to return boolean mask of filtered instances
Returns:
Instances: the filtered instances.
tensor[bool], optional: boolean mask of filtered instances
"""
assert by_box or by_mask
r = []
if by_box:
r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
if instances.has("gt_masks") and by_mask:
r.append(instances.gt_masks.nonempty())
# TODO: can also filter visible keypoints
if not r:
return instances
m = r[0]
for x in r[1:]:
m = m & x
if return_mask:
return instances[m], m
return instances[m]
def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:
"""
Args:
dataset_names: list of dataset names
Returns:
list[int]: a list of size=#keypoints, storing the
horizontally-flipped keypoint indices.
"""
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
check_metadata_consistency("keypoint_names", dataset_names)
check_metadata_consistency("keypoint_flip_map", dataset_names)
meta = MetadataCatalog.get(dataset_names[0])
names = meta.keypoint_names
# TODO flip -> hflip
flip_map = dict(meta.keypoint_flip_map)
flip_map.update({v: k for k, v in flip_map.items()})
flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
flip_indices = [names.index(i) for i in flipped_names]
return flip_indices
def gen_crop_transform_with_instance(crop_size, image_size, instance):
"""
Generate a CropTransform so that the cropping region contains
the center of the given instance.
Args:
crop_size (tuple): h, w in pixels
image_size (tuple): h, w
instance (dict): an annotation dict of one instance, in Detectron2's
dataset format.
"""
crop_size = np.asarray(crop_size, dtype=np.int32)
bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
assert (
image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
), "The annotation bounding box is outside of the image!"
assert (
image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
), "Crop size is larger than image size!"
min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
def check_metadata_consistency(key, dataset_names):
"""
Check that the datasets have consistent metadata.
Args:
key (str): a metadata key
dataset_names (list[str]): a list of dataset names
Raises:
AttributeError: if the key does not exist in the metadata
ValueError: if the given datasets do not have the same metadata values defined by key
"""
if len(dataset_names) == 0:
return
logger = logging.getLogger(__name__)
entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
for idx, entry in enumerate(entries_per_dataset):
if entry != entries_per_dataset[0]:
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
)
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(
key, dataset_names[0], str(entries_per_dataset[0])
)
)
raise ValueError("Datasets have different metadata '{}'!".format(key))
def build_augmentation(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
if is_train and cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
return augmentation
build_transform_gen = build_augmentation
"""
Alias for backward-compatibility.
"""
|
evocodebench_data_58
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
import math
from internal import math as math_lib
import jax.numpy as jnp
import numpy as np
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals
- viewdirs
)
def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis.
Normalizing vectors is surprisingly tricky, because you have to address the
case where the denominator in the normalization is tiny or zero, in which case
gradients will explode. For this reason, we perform two normalizations: in the
forward pass, we clamp the denominator with ~1e-40, but in the backward pass
we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the
output of this function is unit norm (unless x is very very small) while
preventing exploding gradients.
Args:
x: The array of values to normalize.
grad_eps: The value to clip the squared norm by before division in the
backward pass.
Returns:
A normalized array x / ||x||, normalized along the last axis.
"""
tiny = jnp.finfo(jnp.float32).tiny
grad_eps = jnp.maximum(tiny, grad_eps)
denom_sq = jnp.sum(x**2, axis=-1, keepdims=True)
normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq))
normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq))
# Use `normal_val` in the forward pass but `normal_grad` in the backward pass.
normal = math_lib.override_gradient(normal_val, normal_grad)
return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal)
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1))
return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum())
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return (
(-1) ** m
* 2**l
* math.factorial(l)
/ math.factorial(k)
/ math.factorial(l - k - m)
* generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l)
)
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return np.sqrt(
(2.0 * l + 1.0)
* math.factorial(l - m)
/ (4.0 * np.pi * math.factorial(l + m))
) * assoc_legendre_coeff(l, m, k)
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[Ellipsis, 0:1]
y = xyz[Ellipsis, 1:2]
z = xyz[Ellipsis, 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math_lib.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1]))
return dir_enc_fn
def orientation_loss(w, n, v):
"""Orientation loss on weights `w`, normals `n`, and -view directions `v`."""
n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1)
return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1))
|
evocodebench_data_59
|
import logging
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.parallel_agent_executor import ParallelAgentExecutor
from prompt_management.prompts import (
REACT_STEP_POST, REACT_STEP_PROMPT, REACT_SYSTEM_PROMPT, REACT_PLAN_PROMPT, STATIC_PRE_PROMPT, STATIC_PRE_PROMPT_PRIME, REACT_STEP_PROMPT_PRIME, REACT_STEP_POST_PRIME
)
logger = logging.getLogger()
class AgentResponse:
def __init__(self, openai_wrapper, manager, code_execution, agent, creator, depth):
self.openai_wrapper = openai_wrapper
self.manager = manager
self.code_execution = code_execution
self.agent = agent
self.creator = creator
self.depth = depth
def number_to_emoji(self, number):
"""Converts a number to an emoji."""
response = ""
for digit in str(number):
response += chr(0x1f1e6 + int(digit))
return response
def generate_response(self, input_text, dynamic_prompt, max_depth):
runtime_context = self._generate_runtime_context(dynamic_prompt)
system_prompt = self._compose_system_prompt(runtime_context, dynamic_prompt)
conversation_accumulator = ""
thought_number = 0
action_number = 0
found_new_solution = False
for _ in range(max_depth):
react_prompt = self._build_react_prompt(input_text, conversation_accumulator, thought_number, action_number)
self.agent.update_status(f"🤔 (Iteration {thought_number})")
response = self._generate_chat_response(system_prompt, react_prompt)
conversation_accumulator, thought_number, action_number = self._process_response(
response, conversation_accumulator, thought_number, action_number, input_text
)
if "Query Solved" in response:
found_new_solution = True
break
return self._conclude_output(conversation_accumulator, input_text), conversation_accumulator, found_new_solution, thought_number
def _compose_system_prompt(self, runtime_context, dynamic_prompt):
pre_prompt = STATIC_PRE_PROMPT_PRIME if self.agent.is_prime else STATIC_PRE_PROMPT
return pre_prompt + runtime_context + dynamic_prompt + "\nDELIVER THE NEXT PACKAGE."
def _generate_runtime_context(self, dynamic_prompt):
available_agents = self.manager.get_available_agents_for_agent(self.agent)
available_agents_info = ', '.join([f"{agent.purpose} (depth={agent.depth})" for agent in available_agents])
return f"Your Purpose: {dynamic_prompt}. Available agents (Feel free to invent new ones if required!): {available_agents_info}."
def _build_react_prompt(self, input_text, conversation_accumulator, thought_number, action_number):
thought_prompt = REACT_STEP_PROMPT_PRIME if self.agent.is_prime else REACT_STEP_PROMPT
action_prompt = REACT_STEP_POST_PRIME if self.agent.is_prime else REACT_STEP_POST
return (
f"Question: {input_text}\n"
f"{conversation_accumulator}\n"
f"Thought {thought_number}: {thought_prompt}\n"
f"Action {action_number}: {action_prompt}"
)
def _generate_chat_response(self, system_prompt, react_prompt):
return self.openai_wrapper.chat_completion(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": react_prompt}
]
)
def _process_response(self, response, conversation_accumulator, thought_number, action_number, input_text):
updated_accumulator = self._append_response_to_accumulator(conversation_accumulator, response)
thought_number += 1
action_number += 1
if self._is_python_code(response):
exec_response = self._execute_python_code(response)
updated_accumulator = self._append_execution_response(updated_accumulator, exec_response, thought_number)
if self._is_agent_invocation(response):
agent_name, updated_input_text = self._parse_agent_info(response)
delegated_response, updated_accumulator = self._handle_agent_delegation(agent_name, updated_input_text, updated_accumulator, thought_number, action_number)
action_number += 1
return updated_accumulator, thought_number, action_number
def _append_response_to_accumulator(self, accumulator, response):
return accumulator + f"\n{response}"
def _is_python_code(self, response):
return "```python" in response
def _execute_python_code(self, response):
self.agent.update_status('👩💻 Coding..')
self.agent.number_of_code_executions += 1
return self.code_execution.execute_external_code(response)
def _append_execution_response(self, accumulator, exec_response, thought_number):
return accumulator + f"\nObservation: Executed Python code\nOutput: {exec_response}"
def _is_agent_invocation(self, response):
return "Use Agent[" in response
def _handle_agent_delegation(self, agent_name, input_text, accumulator, thought_number, action_number):
self.agent.update_active_agents(self.agent.purpose, agent_name)
self.agent.update_status('⏳ ' + agent_name + '..')
if agent_name == self.agent.purpose:
accumulator += f"\nOutput {thought_number}: Unable to use Agent {agent_name}\nIt is not possible to call yourself!"
return "", accumulator
else:
parallel_executor = ParallelAgentExecutor(self.manager)
delegated_response = parallel_executor.create_and_run_agents(agent_name, self.depth + 1, input_text, self.agent)
accumulator += f"\nOutput {thought_number}: Delegated task to Agent {agent_name}\nOutput of Agent {action_number}: {delegated_response}"
return delegated_response, accumulator
def _parse_agent_info(self, response):
agent_info = response.split('Use Agent[')[1].split(']')[0]
split_info = agent_info.split(":", 1)
agent_name = split_info[0].strip()
input_text = split_info[1].strip() if len(split_info) > 1 else ""
return agent_name, input_text
def _conclude_output(self, conversation, input_text):
react_prompt = conversation
react_prompt += f"\nYour designation is: {self.agent.purpose}\n"
react_prompt += f"\nThe original question / task was: {input_text}\n"
react_prompt += f"\nUse beautiful markdown formatting in your output, e.g. include images using \n"
self.agent.update_status('🧐 Reviewing..')
return self.openai_wrapper.chat_completion(
messages=[
{"role": "system", "content": REACT_SYSTEM_PROMPT},
{"role": "user", "content": react_prompt}
]
)
|
evocodebench_data_60
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import logging
import numpy as np
from typing import List, Union
import pycocotools.mask as mask_util
import torch
from PIL import Image
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
RotatedBoxes,
polygons_to_bitmask,
)
from detectron2.utils.file_io import PathManager
from . import transforms as T
from .catalog import MetadataCatalog
__all__ = [
"SizeMismatchError",
"convert_image_to_rgb",
"check_image_size",
"transform_proposals",
"transform_instance_annotations",
"annotations_to_instances",
"annotations_to_instances_rotated",
"build_augmentation",
"build_transform_gen",
"create_keypoint_hflip_indices",
"filter_empty_instances",
"read_image",
]
class SizeMismatchError(ValueError):
"""
When loaded image has difference width/height compared with annotation.
"""
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
# https://www.exiv2.org/tags.html
_EXIF_ORIENT = 274 # exif 'Orientation' tag
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def convert_image_to_rgb(image, format):
"""
Convert an image from given format to RGB.
Args:
image (np.ndarray or Tensor): an HWC image
format (str): the format of input image, also see `read_image`
Returns:
(np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
"""
if isinstance(image, torch.Tensor):
image = image.cpu().numpy()
if format == "BGR":
image = image[:, :, [2, 1, 0]]
elif format == "YUV-BT.601":
image = np.dot(image, np.array(_M_YUV2RGB).T)
image = image * 255.0
else:
if format == "L":
image = image[:, :, 0]
image = image.astype(np.uint8)
image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
return image
def _apply_exif_orientation(image):
"""
Applies the exif orientation correctly.
This code exists per the bug:
https://github.com/python-pillow/Pillow/issues/3973
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
various methods, especially `tobytes`
Function based on:
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
Args:
image (PIL.Image): a PIL image
Returns:
(PIL.Image): the PIL image with exif orientation applied, if applicable
"""
if not hasattr(image, "getexif"):
return image
try:
exif = image.getexif()
except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
exif = None
if exif is None:
return image
orientation = exif.get(_EXIF_ORIENT)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
return image.transpose(method)
return image
def read_image(file_name, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
Returns:
image (np.ndarray):
an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with PathManager.open(file_name, "rb") as f:
image = Image.open(f)
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = _apply_exif_orientation(image)
return convert_PIL_to_numpy(image, format)
def check_image_size(dataset_dict, image):
"""
Raise an error if the image does not match the size specified in the dict.
"""
if "width" in dataset_dict or "height" in dataset_dict:
image_wh = (image.shape[1], image.shape[0])
expected_wh = (dataset_dict["width"], dataset_dict["height"])
if not image_wh == expected_wh:
raise SizeMismatchError(
"Mismatched image shape{}, got {}, expect {}.".format(
" for image " + dataset_dict["file_name"]
if "file_name" in dataset_dict
else "",
image_wh,
expected_wh,
)
+ " Please check the width/height in your annotation."
)
# To ensure bbox always remap to original image size
if "width" not in dataset_dict:
dataset_dict["width"] = image.shape[1]
if "height" not in dataset_dict:
dataset_dict["height"] = image.shape[0]
def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
"""
Apply transformations to the proposals in dataset_dict, if any.
Args:
dataset_dict (dict): a dict read from the dataset, possibly
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
image_shape (tuple): height, width
transforms (TransformList):
proposal_topk (int): only keep top-K scoring proposals
min_box_size (int): proposals with either side smaller than this
threshold are removed
The input dict is modified in-place, with abovementioned keys removed. A new
key "proposals" will be added. Its value is an `Instances`
object which contains the transformed proposals in its field
"proposal_boxes" and "objectness_logits".
"""
if "proposal_boxes" in dataset_dict:
# Transform proposal boxes
boxes = transforms.apply_box(
BoxMode.convert(
dataset_dict.pop("proposal_boxes"),
dataset_dict.pop("proposal_bbox_mode"),
BoxMode.XYXY_ABS,
)
)
boxes = Boxes(boxes)
objectness_logits = torch.as_tensor(
dataset_dict.pop("proposal_objectness_logits").astype("float32")
)
boxes.clip(image_shape)
keep = boxes.nonempty(threshold=min_box_size)
boxes = boxes[keep]
objectness_logits = objectness_logits[keep]
proposals = Instances(image_shape)
proposals.proposal_boxes = boxes[:proposal_topk]
proposals.objectness_logits = objectness_logits[:proposal_topk]
dataset_dict["proposals"] = proposals
def transform_instance_annotations(
annotation, transforms, image_size, *, keypoint_hflip_indices=None
):
"""
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList or list[Transform]):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
if isinstance(transforms, (tuple, list)):
transforms = T.TransformList(transforms)
# bbox is 1d (per-instance bounding box)
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
# clip transformed bbox to image size
bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if "segmentation" in annotation:
# each instance contains 1 or more polygons
segm = annotation["segmentation"]
if isinstance(segm, list):
# polygons
polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
annotation["segmentation"] = [
p.reshape(-1) for p in transforms.apply_polygons(polygons)
]
elif isinstance(segm, dict):
# RLE
mask = mask_util.decode(segm)
mask = transforms.apply_segmentation(mask)
assert tuple(mask.shape[:2]) == image_size
annotation["segmentation"] = mask
else:
raise ValueError(
"Cannot transform segmentation of type '{}'!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict.".format(type(segm))
)
if "keypoints" in annotation:
keypoints = transform_keypoint_annotations(
annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
)
annotation["keypoints"] = keypoints
return annotation
def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
"""
Transform keypoint annotations of an image.
If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
Args:
keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
Each point is represented by (x, y, visibility).
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
When `transforms` includes horizontal flip, will use the index
mapping to flip keypoints.
"""
# (N*3,) -> (N, 3)
keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
keypoints_xy = transforms.apply_coords(keypoints[:, :2])
# Set all out-of-boundary points to "unlabeled"
inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
inside = inside.all(axis=1)
keypoints[:, :2] = keypoints_xy
keypoints[:, 2][~inside] = 0
# This assumes that HorizFlipTransform is the only one that does flip
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
# Alternative way: check if probe points was horizontally flipped.
# probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
# probe_aug = transforms.apply_coords(probe.copy())
# do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
# If flipped, swap each keypoint with its opposite-handed equivalent
if do_hflip:
if keypoint_hflip_indices is None:
raise ValueError("Cannot flip keypoints without providing flip indices!")
if len(keypoints) != len(keypoint_hflip_indices):
raise ValueError(
"Keypoint data has {} points, but metadata "
"contains {} points!".format(len(keypoints), len(keypoint_hflip_indices))
)
keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :]
# Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
keypoints[keypoints[:, 2] == 0] = 0
return keypoints
def annotations_to_instances(annos, image_size, mask_format="polygon"):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = (
np.stack(
[BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
)
if len(annos)
else np.zeros((0, 4))
)
target = Instances(image_size)
target.gt_boxes = Boxes(boxes)
classes = [int(obj["category_id"]) for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
try:
masks = PolygonMasks(segms)
except ValueError as e:
raise ValueError(
"Failed to use mask_format=='polygon' from the given annotations!"
) from e
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a binary segmentation mask "
" in a 2D numpy array of shape HxW.".format(type(segm))
)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "keypoints" in annos[0]:
kpts = [obj.get("keypoints", []) for obj in annos]
target.gt_keypoints = Keypoints(kpts)
return target
def annotations_to_instances_rotated(annos, image_size):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Compared to `annotations_to_instances`, this function is for rotated boxes only
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
Containing fields "gt_boxes", "gt_classes",
if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [obj["bbox"] for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = RotatedBoxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
return target
def filter_empty_instances(
instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False
):
"""
Filter out empty instances in an `Instances` object.
Args:
instances (Instances):
by_box (bool): whether to filter out instances with empty boxes
by_mask (bool): whether to filter out instances with empty masks
box_threshold (float): minimum width and height to be considered non-empty
return_mask (bool): whether to return boolean mask of filtered instances
Returns:
Instances: the filtered instances.
tensor[bool], optional: boolean mask of filtered instances
"""
assert by_box or by_mask
r = []
if by_box:
r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
if instances.has("gt_masks") and by_mask:
r.append(instances.gt_masks.nonempty())
# TODO: can also filter visible keypoints
if not r:
return instances
m = r[0]
for x in r[1:]:
m = m & x
if return_mask:
return instances[m], m
return instances[m]
def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:
"""
Args:
dataset_names: list of dataset names
Returns:
list[int]: a list of size=#keypoints, storing the
horizontally-flipped keypoint indices.
"""
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
check_metadata_consistency("keypoint_names", dataset_names)
check_metadata_consistency("keypoint_flip_map", dataset_names)
meta = MetadataCatalog.get(dataset_names[0])
names = meta.keypoint_names
# TODO flip -> hflip
flip_map = dict(meta.keypoint_flip_map)
flip_map.update({v: k for k, v in flip_map.items()})
flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
flip_indices = [names.index(i) for i in flipped_names]
return flip_indices
def gen_crop_transform_with_instance(crop_size, image_size, instance):
"""
Generate a CropTransform so that the cropping region contains
the center of the given instance.
Args:
crop_size (tuple): h, w in pixels
image_size (tuple): h, w
instance (dict): an annotation dict of one instance, in Detectron2's
dataset format.
"""
crop_size = np.asarray(crop_size, dtype=np.int32)
bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
assert (
image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
), "The annotation bounding box is outside of the image!"
assert (
image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
), "Crop size is larger than image size!"
min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
def check_metadata_consistency(key, dataset_names):
"""
Check that the datasets have consistent metadata.
Args:
key (str): a metadata key
dataset_names (list[str]): a list of dataset names
Raises:
AttributeError: if the key does not exist in the metadata
ValueError: if the given datasets do not have the same metadata values defined by key
"""
if len(dataset_names) == 0:
return
logger = logging.getLogger(__name__)
entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
for idx, entry in enumerate(entries_per_dataset):
if entry != entries_per_dataset[0]:
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
)
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(
key, dataset_names[0], str(entries_per_dataset[0])
)
)
raise ValueError("Datasets have different metadata '{}'!".format(key))
def build_augmentation(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
if is_train and cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
return augmentation
build_transform_gen = build_augmentation
"""
Alias for backward-compatibility.
"""
|
evocodebench_data_61
|
"""Datasets module."""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# scikit-portfolio, Copyright (c) 2022, Carlo Nicolini, Licensed under MIT Licence.
# scikit-learn, Copyright (c) 2007-2010 David Cournapeau, Fabian Pedregosa, Olivier
# Grisel Licensed under BSD 3 clause.
import gzip
import os
import shutil
import urllib.request as ur
from importlib import resources
from pathlib import Path
import joblib
import pandas as pd
DATA_MODULE = "skfolio.datasets.data"
def get_data_home(data_home: str | Path | None = None) -> str:
"""Return the path of the skfolio data directory.
This folder is used by some large dataset loaders to avoid downloading the
data several times.
By default, the data directory is set to a folder named 'skfolio_data' in the
user home folder.
Alternatively, it can be set by the 'SKFOLIO_DATA' environment
variable or programmatically by giving an explicit folder path. The '~'
symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
Parameters
----------
data_home : str, optional
The path to skfolio data directory. If `None`, the default path
is `~/skfolio_data`.
Returns
-------
data_home: str or path-like, optional
The path to skfolio data directory.
"""
if data_home is None:
data_home = os.environ.get("SKFOLIO_DATA", os.path.join("~", "skfolio_data"))
data_home = os.path.expanduser(data_home)
os.makedirs(data_home, exist_ok=True)
return data_home
def clear_data_home(data_home: str | Path | None = None) -> None:
"""Delete all the content of the data home cache.
Parameters
----------
data_home : str or path-like, optional
The path to scikit-learn data directory. If `None`, the default path
is `~/skfolio_data`.
"""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_gzip_compressed_csv_data(
data_filename: str,
data_module: str = DATA_MODULE,
encoding="utf-8",
datetime_index: bool = True,
) -> pd.DataFrame:
"""Loads gzip-compressed csv files with `importlib.resources`.
1) Open resource file with `importlib.resources.open_binary`
2) Decompress csv file with `gzip.open`
3) Load decompressed data with `pd.read_csv`
Parameters
----------
data_filename : str
Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from
`data_module/data_file_name`. For example `'SPX500.csv.gz'`.
data_module : str or module, default='skfolio.datasets.data'
Module where data lives. The default is `'skfolio.datasets.data'`.
encoding : str, default="utf-8"
Name of the encoding that the gzip-decompressed file will be
decoded with. The default is 'utf-8'.
datetime_index: bool, default=True
If this is set to True, the DataFrame index is converted to datetime with
format="%Y-%m-%d".
The default is `True`.
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
DataFrame with each row representing one observation and each column
representing the asset price of a given observation.
"""
path = resources.files(data_module).joinpath(data_filename)
with path.open("rb") as compressed_file:
compressed_file = gzip.open(compressed_file, mode="rt", encoding=encoding)
df = pd.read_csv(compressed_file, sep=",", index_col=0)
if datetime_index:
df.index = pd.to_datetime(df.index, format="%Y-%m-%d")
return df
def download_dataset(
data_filename: str,
data_home: str | Path | None = None,
download_if_missing: bool = True,
) -> pd.DataFrame:
"""Download and save locally a dataset from the remote GitHub dataset folder.
Parameters
----------
data_filename : str
Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from a remote
GitHub dataset folder.
data_home : str or path-like, optional
Specify another download and cache folder for the datasets. By default,
all skfolio data is stored in `~/skfolio_data` sub-folders.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
The default is `True`.
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
DataFrame with each row representing one observation and each column
representing the asset price of a given observation.
"""
url = f"https://github.com/skfolio/skfolio/raw/main/datasets/{data_filename}.csv.gz"
data_home = get_data_home(data_home=data_home)
filepath = os.path.join(data_home, f"{data_filename}.pkz")
if os.path.exists(filepath):
return joblib.load(filepath)
if not download_if_missing:
raise OSError("Data not found and `download_if_missing` is False")
archive_path = os.path.join(data_home, os.path.basename(url))
ur.urlretrieve(url, archive_path)
df = load_gzip_compressed_csv_data(archive_path)
joblib.dump(df, filepath, compress=6)
os.remove(archive_path)
return df
def load_sp500_dataset() -> pd.DataFrame:
"""Load the prices of 20 assets from the S&P 500 Index composition.
This dataset is composed of the daily prices of 20 assets from the S&P 500
composition starting from 1990-01-02 up to 2022-12-28.
The data comes from the Yahoo public API.
The price is the adjusted close which is the closing price after adjustments for
all applicable splits and dividend distributions.
The adjustment uses appropriate split and dividend multipliers, adhering to
the Center for Research in Security Prices (CRSP) standards.
============== ==================
Observations 8313
Assets 20
============== ==================
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
Prices DataFrame
Examples
--------
>>> from skfolio.datasets import load_sp500_dataset
>>> prices = load_sp500_dataset()
>>> prices.head()
AAPL AMD BAC ... UNH WMT XOM
1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000
1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750
1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500
1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875
1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750
"""
data_filename = "sp500_dataset.csv.gz"
df = load_gzip_compressed_csv_data(data_filename)
return df
def load_sp500_index() -> pd.DataFrame:
"""Load the prices of the S&P 500 Index.
This dataset is composed of the daily prices of the S&P 500 Index starting from
1990-01-02 up to 2022-12-28.
The data comes from the Yahoo public API.
The price is the adjusted close which is the closing price after adjustments for
all applicable splits and dividend distributions.
The adjustment uses appropriate split and dividend multipliers, adhering to
the Center for Research in Security Prices (CRSP) standards.
============== ==================
Observations 8313
Assets 1
============== ==================
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
Prices DataFrame
Examples
--------
>>> from skfolio.datasets import load_sp500_index
>>> prices = load_sp500_index()
>>> prices.head()
SP500
Date
1990-01-02 359.69
1990-01-03 358.76
1990-01-04 355.67
1990-01-05 352.20
1990-01-08 353.79
"""
data_filename = "sp500_index.csv.gz"
df = load_gzip_compressed_csv_data(data_filename)
return df
def load_factors_dataset() -> pd.DataFrame:
"""Load the prices of 5 factor ETFs.
This dataset is composed of the daily prices of 5 ETF representing common factors
starting from 2014-01-02 up to 2022-12-28.
The factors are:
* "MTUM": Momentum
* "QUAL": Quanlity
* "SIZE": Size
* "VLUE": Value
* "USMV": low volatility
The data comes from the Yahoo public API.
The price is the adjusted close which is the closing price after adjustments for
all applicable splits and dividend distributions.
The adjustment uses appropriate split and dividend multipliers, adhering to
the Center for Research in Security Prices (CRSP) standards.
============== ==================
Observations 2264
Assets 5
============== ==================
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
Prices DataFrame
Examples
--------
>>> from skfolio.datasets import load_factors_dataset
>>> prices = load_factors_dataset()
>>> prices.head()
MTUM QUAL SIZE USMV VLUE
Date
2014-01-02 52.704 48.351 48.986 29.338 47.054
2014-01-03 52.792 48.256 48.722 29.330 46.999
2014-01-06 52.677 48.067 48.722 29.263 46.991
2014-01-07 53.112 48.455 48.731 29.430 47.253
2014-01-08 53.502 48.437 48.731 29.422 47.253
"""
data_filename = "factors_dataset.csv.gz"
df = load_gzip_compressed_csv_data(data_filename)
return df
def load_ftse100_dataset(data_home=None, download_if_missing=True) -> pd.DataFrame:
"""Load the prices of 64 assets from the FTSE 100 Index composition.
This dataset is composed of the daily prices of 64 assets from the FTSE 100 Index
starting from 2000-01-04 up to 2023-05-31.
The data comes from the Yahoo public API.
The price is the adjusted close which is the closing price after adjustments for
all applicable splits and dividend distributions.
The adjustment uses appropriate split and dividend multipliers, adhering to
the Center for Research in Security Prices (CRSP) standards.
The data contains NaN.
============== ==================
Observations 5960
Assets 64
============== ==================
Parameters
----------
data_home : str, optional
Specify another download and cache folder for the datasets.
By default, all skfolio data is stored in `~/skfolio_data` subfolders.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
Prices DataFrame
Examples
--------
>>> from skfolio.datasets import load_ftse100_dataset
>>> prices = load_ftse100_dataset()
>>> prices.head()
AAL.L ABF.L AHT.L ANTO.L ... VOD.L WEIR.L WPP.L WTB.L
Date ...
2000-01-04 535.354 205.926 97.590 40.313 ... 72.562 115.240 512.249 382.907
2000-01-05 540.039 209.185 96.729 40.313 ... 69.042 118.483 462.080 381.972
2000-01-06 553.289 229.048 95.581 40.452 ... 66.950 124.220 458.119 386.337
2000-01-07 572.829 222.220 95.581 40.452 ... 70.716 121.725 475.283 405.046
2000-01-10 578.852 224.548 92.711 40.685 ... 74.285 121.476 498.254 392.885
"""
data_filename = "ftse100_dataset"
df = download_dataset(
data_filename, data_home=data_home, download_if_missing=download_if_missing
)
return df
def load_nasdaq_dataset(data_home=None, download_if_missing=True) -> pd.DataFrame:
"""Load the prices of 1455 assets from the NASDAQ Composite Index.
This dataset is composed of the daily prices of 1455 assets from the NASDAQ
Composite starting from 2018-01-02 up to 2023-05-31.
The data comes from the Yahoo public API.
The price is the adjusted close which is the closing price after adjustments for
all applicable splits and dividend distributions.
The adjustment uses appropriate split and dividend multipliers, adhering to
the Center for Research in Security Prices (CRSP) standards.
============== ==================
Observations 1362
Assets 1455
============== ==================
Parameters
----------
data_home : str, optional
Specify another download and cache folder for the datasets.
By default, all skfolio data is stored in `~/skfolio_data` subfolders.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
df : DataFrame of shape (n_observations, n_assets)
Prices DataFrame
Examples
--------
>>> from skfolio.datasets import load_nasdaq_dataset
>>> prices = load_nasdaq_dataset()
>>> prices.head()
AAL AAOI AAON AAPL ... ZVRA ZYME ZYNE ZYXI
Date ...
2018-01-02 51.648 37.91 35.621 41.310 ... 66.4 7.933 12.995 2.922
2018-01-03 51.014 37.89 36.247 41.303 ... 72.8 7.965 13.460 2.913
2018-01-04 51.336 38.38 36.103 41.495 ... 78.4 8.430 12.700 2.869
2018-01-05 51.316 38.89 36.681 41.967 ... 77.6 8.400 12.495 2.780
2018-01-08 50.809 38.37 36.103 41.811 ... 82.4 8.310 12.550 2.825
"""
data_filename = "nasdaq_dataset"
df = download_dataset(
data_filename, data_home=data_home, download_if_missing=download_if_missing
)
return df
|
evocodebench_data_62
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# Riskfolio-Lib, Copyright (c) 2020-2023, Dany Cajas, Licensed under BSD 3 clause.
# Statsmodels, Copyright (C) 2006, Jonathan E. Taylor, Licensed under BSD 3 clause.
from enum import auto
import numpy as np
import scipy.cluster.hierarchy as sch
import scipy.optimize as sco
import scipy.spatial.distance as scd
import scipy.special as scs
from scipy.sparse import csr_matrix
from skfolio.utils.tools import AutoEnum
__all__ = [
"NBinsMethod",
"n_bins_freedman",
"n_bins_knuth",
"is_cholesky_dec",
"assert_is_square",
"assert_is_symmetric",
"assert_is_distance",
"cov_nearest",
"cov_to_corr",
"corr_to_cov",
"commutation_matrix",
"compute_optimal_n_clusters",
"rand_weights",
"rand_weights_dirichlet",
]
class NBinsMethod(AutoEnum):
"""Enumeration of the Number of Bins Methods
Parameters
----------
FREEDMAN : str
Freedman method
KNUTH : str
Knuth method
"""
FREEDMAN = auto()
KNUTH = auto()
def n_bins_freedman(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using the Freedman-Diaconis rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "On the histogram as a density estimator: L2 theory".
Freedman & Diaconis (1981).
"""
if x.ndim != 1:
raise ValueError("`x` must be a 1d-array")
n = len(x)
p_25, p_75 = np.percentile(x, [25, 75])
d = 2 * (p_75 - p_25) / (n ** (1 / 3))
if d == 0:
return 5
n_bins = max(1, np.ceil((np.max(x) - np.min(x)) / d))
return int(round(n_bins))
def n_bins_knuth(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using Knuth's rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "Optimal Data-Based Binning for Histograms".
Knuth.
"""
x = np.sort(x)
n = len(x)
def func(y: float):
y = y[0]
if y <= 0:
return np.inf
bin_edges = np.linspace(x[0], x[-1], int(y) + 1)
hist, _ = np.histogram(x, bin_edges)
return -(
n * np.log(y)
+ scs.gammaln(0.5 * y)
- y * scs.gammaln(0.5)
- scs.gammaln(n + 0.5 * y)
+ np.sum(scs.gammaln(hist + 0.5))
)
n_bins_init = n_bins_freedman(x)
n_bins = sco.fmin(func, n_bins_init, disp=0)[0]
return int(round(n_bins))
def rand_weights_dirichlet(n: int) -> np.array:
"""Produces n random weights that sum to one from a dirichlet distribution
(uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
return np.random.dirichlet(np.ones(n))
def rand_weights(n: int, zeros: int = 0) -> np.array:
"""Produces n random weights that sum to one from an uniform distribution
(non-uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
zeros : int, default=0
The number of weights to randomly set to zeros.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
k = np.random.rand(n)
if zeros > 0:
zeros_idx = np.random.choice(n, zeros, replace=False)
k[zeros_idx] = 0
return k / sum(k)
def is_cholesky_dec(x: np.ndarray) -> bool:
"""Returns True if Cholesky decomposition can be computed.
The matrix must be Hermitian (symmetric if real-valued) and positive-definite.
No checking is performed to verify whether the matrix is Hermitian or not.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if Cholesky decomposition can be applied to the matrix, False otherwise.
"""
# Around 100 times faster than checking for positive eigenvalues with np.linalg.eigh
try:
np.linalg.cholesky(x)
return True
except np.linalg.linalg.LinAlgError:
return False
def is_positive_definite(x: np.ndarray) -> bool:
"""Returns True if the matrix is positive definite.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if if the matrix is positive definite, False otherwise.
"""
return np.all(np.linalg.eigvals(x) > 0)
def assert_is_square(x: np.ndarray) -> None:
"""Raises an error if the matrix is not square.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is not square.
"""
if x.ndim != 2 or x.shape[0] != x.shape[1]:
raise ValueError("The matrix must be square")
def assert_is_symmetric(x: np.ndarray) -> None:
"""Raises an error if the matrix is not symmetric.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Raises
------
ValueError: if the matrix is not symmetric.
"""
assert_is_square(x)
if not np.allclose(x, x.T):
raise ValueError("The matrix must be symmetric")
def assert_is_distance(x: np.ndarray) -> None:
"""Raises an error if the matrix is not a distance matrix.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is a distance matrix.
"""
assert_is_symmetric(x)
if not np.allclose(np.diag(x), np.zeros(x.shape[0]), atol=1e-5):
raise ValueError(
"The distance matrix must have diagonal elements close to zeros"
)
def cov_to_corr(cov: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Convert a covariance matrix to a correlation matrix.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
Returns
-------
corr, std : tuple[ndarray of shape (n, n), ndarray of shape (n, )]
Correlation matrix and standard-deviation vector
"""
if cov.ndim != 2:
raise ValueError(f"`cov` must be a 2D array, got a {cov.ndim}D array")
std = np.sqrt(np.diag(cov))
corr = cov / std / std[:, None]
return corr, std
def corr_to_cov(corr: np.ndarray, std: np.ndarray):
"""Convert a correlation matrix to a covariance matrix given its
standard-deviation vector.
Parameters
----------
corr : ndarray of shape (n, n)
Correlation matrix.
std : ndarray of shape (n, )
Standard-deviation vector.
Returns
-------
cov : ndarray of shape (n, n)
Covariance matrix
"""
if std.ndim != 1:
raise ValueError(f"`std` must be a 1D array, got a {std.ndim}D array")
if corr.ndim != 2:
raise ValueError(f"`corr` must be a 2D array, got a {corr.ndim}D array")
cov = corr * std * std[:, None]
return cov
_CLIPPING_VALUE = 1e-13
def cov_nearest(cov: np.ndarray, higham: bool = False, higham_max_iteration: int = 100):
"""Compute the nearest covariance matrix that is positive definite and with a
cholesky decomposition than can be computed. The variance is left unchanged.
First, it converts the covariance matrix to a correlation matrix.
Then, it finds the nearest correlation matrix and converts it back to a covariance
matrix using the initial standard deviation.
Cholesky decomposition can fail for symmetric positive definite (SPD) matrix due
to floating point error and inversely, Cholesky decomposition can success for
non-SPD matrix. Therefore, we need to test for both. We always start by testing
for Cholesky decomposition which is significantly faster than checking for positive
eigenvalues.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
higham : bool, default=False
If this is set to True, the Higham & Nick (2002) algorithm [1]_ is used,
otherwise the eigenvalues are clipped to threshold above zeros (1e-13).
The default (`False`) is to use the clipping method as the Higham & Nick
algorithm can be slow for large datasets.
higham_max_iteration : int, default=100
Maximum number of iteration of the Higham & Nick (2002) algorithm.
The default value is `100`.
Returns
-------
cov : ndarray
The nearest covariance matrix.
References
----------
.. [1] "Computing the nearest correlation matrix - a problem from finance"
IMA Journal of Numerical Analysis
Higham & Nick (2002)
"""
assert_is_square(cov)
assert_is_symmetric(cov)
# Around 100 times faster than checking eigenvalues with np.linalg.eigh
if is_cholesky_dec(cov) and is_positive_definite(cov):
return cov
corr, std = cov_to_corr(cov)
if higham:
eps = np.finfo(np.float64).eps * 5
diff = np.zeros(corr.shape)
x = corr.copy()
for _ in range(higham_max_iteration):
x_adj = x - diff
eig_vals, eig_vecs = np.linalg.eigh(x_adj)
x = eig_vecs * np.maximum(eig_vals, eps) @ eig_vecs.T
diff = x - x_adj
np.fill_diagonal(x, 1)
cov = corr_to_cov(x, std)
if is_cholesky_dec(cov) and is_positive_definite(cov):
break
else:
raise ValueError("Unable to find the nearest positive definite matrix")
else:
eig_vals, eig_vecs = np.linalg.eigh(corr)
# Clipping the eigenvalues with a value smaller than 1e-13 can cause scipy to
# consider the matrix non-psd is some corner cases (see test/test_stats.py)
x = eig_vecs * np.maximum(eig_vals, _CLIPPING_VALUE) @ eig_vecs.T
x, _ = cov_to_corr(x)
cov = corr_to_cov(x, std)
return cov
def commutation_matrix(x):
"""Compute the commutation matrix.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
K : ndarray of shape (m * n, m * n)
The commutation matrix.
"""
(m, n) = x.shape
row = np.arange(m * n)
col = row.reshape((m, n), order="F").ravel()
data = np.ones(m * n, dtype=np.int8)
k = csr_matrix((data, (row, col)), shape=(m * n, m * n))
return k
def compute_optimal_n_clusters(distance: np.ndarray, linkage_matrix: np.ndarray) -> int:
r"""Compute the optimal number of clusters based on Two-Order Difference to Gap
Statistic [1]_.
The Two-Order Difference to Gap Statistic has been developed to improve the
performance and stability of the Tibshiranis Gap statistic.
It applies the two-order difference of the within-cluster dispersion to replace the
reference null distribution in the Gap statistic.
The number of cluster :math:`k` is determined by:
.. math:: \begin{cases}
\begin{aligned}
&\max_{k} & & W_{k+2} + W_{k} - 2 W_{k+1} \\
&\text{s.t.} & & 1 \ge c \ge max\bigl(8, \sqrt{n}\bigr) \\
\end{aligned}
\end{cases}
with :math:`n` the sample size and :math:`W_{k}` the within-cluster dispersions
defined as:
.. math:: W_{k} = \sum_{i=1}^{k} \frac{D_{i}}{2|C_{i}|}
where :math:`|C_{i}|` is the cardinality of cluster :math:`i` and :math:`D_{i}` its
density defined as:
.. math:: D_{i} = \sum_{u \in C_{i}} \sum_{v \in C_{i}} d(u,v)
with :math:`d(u,v)` the distance between u and v.
Parameters
----------
distance : ndarray of shape (n, n)
Distance matrix.
linkage_matrix : ndarray of shape (n - 1, 4)
Linkage matrix.
Returns
-------
value : int
Optimal number of clusters.
References
----------
.. [1] "Application of two-order difference to gap statistic".
Yue, Wang & Wei (2009)
"""
cut_tree = sch.cut_tree(linkage_matrix)
n = cut_tree.shape[1]
max_clusters = max(8, round(np.sqrt(n)))
dispersion = []
for k in range(max_clusters):
level = cut_tree[:, n - k - 1]
cluster_density = []
for i in range(np.max(level) + 1):
cluster_idx = np.argwhere(level == i).flatten()
cluster_dists = scd.squareform(
distance[cluster_idx, :][:, cluster_idx], checks=False
)
if cluster_dists.shape[0] != 0:
cluster_density.append(np.nan_to_num(cluster_dists.mean()))
dispersion.append(np.sum(cluster_density))
dispersion = np.array(dispersion)
gaps = np.roll(dispersion, -2) + dispersion - 2 * np.roll(dispersion, -1)
gaps = gaps[:-2]
# k=0 represents one cluster
k = np.argmax(gaps) + 2
return k
|
evocodebench_data_63
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import sys
import tempfile
from contextlib import ExitStack, contextmanager
from copy import deepcopy
from unittest import mock
import torch
from torch import nn
# need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964
import detectron2 # noqa F401
from detectron2.structures import Boxes, Instances
from detectron2.utils.env import _import_file
_counter = 0
def _clear_jit_cache():
from torch.jit._recursive import concrete_type_store
from torch.jit._state import _jit_caching_layer
concrete_type_store.type_store.clear() # for modules
_jit_caching_layer.clear() # for free functions
def _add_instances_conversion_methods(newInstances):
"""
Add from_instances methods to the scripted Instances class.
"""
cls_name = newInstances.__name__
@torch.jit.unused
def from_instances(instances: Instances):
"""
Create scripted Instances from original Instances
"""
fields = instances.get_fields()
image_size = instances.image_size
ret = newInstances(image_size)
for name, val in fields.items():
assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}"
setattr(ret, name, deepcopy(val))
return ret
newInstances.from_instances = from_instances
@contextmanager
def patch_instances(fields):
"""
A contextmanager, under which the Instances class in detectron2 is replaced
by a statically-typed scriptable class, defined by `fields`.
See more in `scripting_with_instances`.
"""
with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile(
mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False
) as f:
try:
# Objects that use Instances should not reuse previously-compiled
# results in cache, because `Instances` could be a new class each time.
_clear_jit_cache()
cls_name, s = _gen_instance_module(fields)
f.write(s)
f.flush()
f.close()
module = _import(f.name)
new_instances = getattr(module, cls_name)
_ = torch.jit.script(new_instances)
# let torchscript think Instances was scripted already
Instances.__torch_script_class__ = True
# let torchscript find new_instances when looking for the jit type of Instances
Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances)
_add_instances_conversion_methods(new_instances)
yield new_instances
finally:
try:
del Instances.__torch_script_class__
del Instances._jit_override_qualname
except AttributeError:
pass
sys.modules.pop(module.__name__)
def _gen_instance_class(fields):
"""
Args:
fields (dict[name: type])
"""
class _FieldType:
def __init__(self, name, type_):
assert isinstance(name, str), f"Field name must be str, got {name}"
self.name = name
self.type_ = type_
self.annotation = f"{type_.__module__}.{type_.__name__}"
fields = [_FieldType(k, v) for k, v in fields.items()]
def indent(level, s):
return " " * 4 * level + s
lines = []
global _counter
_counter += 1
cls_name = "ScriptedInstances{}".format(_counter)
field_names = tuple(x.name for x in fields)
extra_args = ", ".join([f"{f.name}: Optional[{f.annotation}] = None" for f in fields])
lines.append(
f"""
class {cls_name}:
def __init__(self, image_size: Tuple[int, int], {extra_args}):
self.image_size = image_size
self._field_names = {field_names}
"""
)
for f in fields:
lines.append(
indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], {f.name})")
)
for f in fields:
lines.append(
f"""
@property
def {f.name}(self) -> {f.annotation}:
# has to use a local for type refinement
# https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement
t = self._{f.name}
assert t is not None, "{f.name} is None and cannot be accessed!"
return t
@{f.name}.setter
def {f.name}(self, value: {f.annotation}) -> None:
self._{f.name} = value
"""
)
# support method `__len__`
lines.append(
"""
def __len__(self) -> int:
"""
)
for f in fields:
lines.append(
f"""
t = self._{f.name}
if t is not None:
return len(t)
"""
)
lines.append(
"""
raise NotImplementedError("Empty Instances does not support __len__!")
"""
)
# support method `has`
lines.append(
"""
def has(self, name: str) -> bool:
"""
)
for f in fields:
lines.append(
f"""
if name == "{f.name}":
return self._{f.name} is not None
"""
)
lines.append(
"""
return False
"""
)
# support method `to`
none_args = ", None" * len(fields)
lines.append(
f"""
def to(self, device: torch.device) -> "{cls_name}":
ret = {cls_name}(self.image_size{none_args})
"""
)
for f in fields:
if hasattr(f.type_, "to"):
lines.append(
f"""
t = self._{f.name}
if t is not None:
ret._{f.name} = t.to(device)
"""
)
else:
# For now, ignore fields that cannot be moved to devices.
# Maybe can support other tensor-like classes (e.g. __torch_function__)
pass
lines.append(
"""
return ret
"""
)
# support method `getitem`
none_args = ", None" * len(fields)
lines.append(
f"""
def __getitem__(self, item) -> "{cls_name}":
ret = {cls_name}(self.image_size{none_args})
"""
)
for f in fields:
lines.append(
f"""
t = self._{f.name}
if t is not None:
ret._{f.name} = t[item]
"""
)
lines.append(
"""
return ret
"""
)
# support method `cat`
# this version does not contain checks that all instances have same size and fields
none_args = ", None" * len(fields)
lines.append(
f"""
def cat(self, instances: List["{cls_name}"]) -> "{cls_name}":
ret = {cls_name}(self.image_size{none_args})
"""
)
for f in fields:
lines.append(
f"""
t = self._{f.name}
if t is not None:
values: List[{f.annotation}] = [x.{f.name} for x in instances]
if torch.jit.isinstance(t, torch.Tensor):
ret._{f.name} = torch.cat(values, dim=0)
else:
ret._{f.name} = t.cat(values)
"""
)
lines.append(
"""
return ret"""
)
# support method `get_fields()`
lines.append(
"""
def get_fields(self) -> Dict[str, Tensor]:
ret = {}
"""
)
for f in fields:
if f.type_ == Boxes:
stmt = "t.tensor"
elif f.type_ == torch.Tensor:
stmt = "t"
else:
stmt = f'assert False, "unsupported type {str(f.type_)}"'
lines.append(
f"""
t = self._{f.name}
if t is not None:
ret["{f.name}"] = {stmt}
"""
)
lines.append(
"""
return ret"""
)
return cls_name, os.linesep.join(lines)
def _gen_instance_module(fields):
# TODO: find a more automatic way to enable import of other classes
s = """
from copy import deepcopy
import torch
from torch import Tensor
import typing
from typing import *
import detectron2
from detectron2.structures import Boxes, Instances
"""
cls_name, cls_def = _gen_instance_class(fields)
s += cls_def
return cls_name, s
def _import(path):
return _import_file(
"{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True
)
@contextmanager
def patch_builtin_len(modules=()):
"""
Patch the builtin len() function of a few detectron2 modules
to use __len__ instead, because __len__ does not convert values to
integers and therefore is friendly to tracing.
Args:
modules (list[stsr]): names of extra modules to patch len(), in
addition to those in detectron2.
"""
def _new_len(obj):
return obj.__len__()
with ExitStack() as stack:
MODULES = [
"detectron2.modeling.roi_heads.fast_rcnn",
"detectron2.modeling.roi_heads.mask_head",
"detectron2.modeling.roi_heads.keypoint_head",
] + list(modules)
ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES]
for m in ctxs:
m.side_effect = _new_len
yield
def patch_nonscriptable_classes():
"""
Apply patches on a few nonscriptable detectron2 classes.
Should not have side-effects on eager usage.
"""
# __prepare_scriptable__ can also be added to models for easier maintenance.
# But it complicates the clean model code.
from detectron2.modeling.backbone import ResNet, FPN
# Due to https://github.com/pytorch/pytorch/issues/36061,
# we change backbone to use ModuleList for scripting.
# (note: this changes param names in state_dict)
def prepare_resnet(self):
ret = deepcopy(self)
ret.stages = nn.ModuleList(ret.stages)
for k in self.stage_names:
delattr(ret, k)
return ret
ResNet.__prepare_scriptable__ = prepare_resnet
def prepare_fpn(self):
ret = deepcopy(self)
ret.lateral_convs = nn.ModuleList(ret.lateral_convs)
ret.output_convs = nn.ModuleList(ret.output_convs)
for name, _ in self.named_children():
if name.startswith("fpn_"):
delattr(ret, name)
return ret
FPN.__prepare_scriptable__ = prepare_fpn
# Annotate some attributes to be constants for the purpose of scripting,
# even though they are not constants in eager mode.
from detectron2.modeling.roi_heads import StandardROIHeads
if hasattr(StandardROIHeads, "__annotations__"):
# copy first to avoid editing annotations of base class
StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__)
StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool]
StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool]
# These patches are not supposed to have side-effects.
patch_nonscriptable_classes()
@contextmanager
def freeze_training_mode(model):
"""
A context manager that annotates the "training" attribute of every submodule
to constant, so that the training codepath in these modules can be
meta-compiled away. Upon exiting, the annotations are reverted.
"""
classes = {type(x) for x in model.modules()}
# __constants__ is the old way to annotate constants and not compatible
# with __annotations__ .
classes = {x for x in classes if not hasattr(x, "__constants__")}
for cls in classes:
cls.__annotations__["training"] = torch.jit.Final[bool]
yield
for cls in classes:
cls.__annotations__["training"] = bool
|
evocodebench_data_64
|
from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
)
return values
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
|
evocodebench_data_65
|
from copy import deepcopy
from typing import Union, List, Dict, Tuple, Any
from autorag import embedding_models
def cast_metrics(metrics: Union[List[str], List[Dict]]) -> Tuple[List[str], List[Dict[str, Any]]]:
"""
Turn metrics to list of metric names and parameter list.
:param metrics: List of string or dictionary.
:return: The list of metric names and dictionary list of metric parameters.
"""
metrics_copy = deepcopy(metrics)
if not isinstance(metrics_copy, list):
raise ValueError("metrics must be a list of string or dictionary.")
if isinstance(metrics_copy[0], str):
return metrics_copy, [{} for _ in metrics_copy]
elif isinstance(metrics_copy[0], dict):
# pop 'metric_name' key from dictionary
metric_names = list(map(lambda x: x.pop('metric_name'), metrics_copy))
metric_params = [dict(map(lambda x, y: cast_embedding_model(x, y), metric.keys(), metric.values())) for metric
in metrics_copy]
return metric_names, metric_params
else:
raise ValueError("metrics must be a list of string or dictionary.")
def cast_embedding_model(key, value):
if key == 'embedding_model':
return key, embedding_models[value]
else:
return key, value
|
evocodebench_data_66
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
|
evocodebench_data_67
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Geometry utilities."""
from typing import Union
import chex
from internal import rigid_body
from internal import spin_math
import jax
from jax import numpy as jnp
from jax import random
import numpy as onp
import optax
_ArrayType = Union[onp.ndarray, jnp.ndarray]
def line_distance(point1, dir1, point2,
dir2):
"""Compute the distance between two lines in 3D.
Note that this is the distance between lines and not line segments or rays;
i.e., it does not consider endpoints and will compute the distance assuming
the line extends infinitely in both directions.
Args:
point1: (3,) a point on the first line.
dir1: (3,) the direction vector of the first line.
point2: (3,) a point on the second line.
dir2: (3,) the direction vector of the second line.
Returns:
The distance between the two lines.
"""
is_parallel = are_lines_parallel(dir1, dir2)
skew_dist = skew_line_distance(point1, dir1, point2, dir2)
parallel_dist = line_to_point_distance(point1, dir1, point2)
return jnp.where(is_parallel, parallel_dist, skew_dist)
def skew_line_closest_points(point1, dir1,
point2,
dir2):
"""Compute the shortest distance between two skew lines.
See:
https://en.wikipedia.org/wiki/Skew_lines#Nearest_points
Args:
point1: a point on the first line.
dir1: the direction vector of the first line.
point2: a point on the second line.
dir2: the direction vector of the second line.
Returns:
The distance between the two skew lines.
"""
# Make sure direction vectors are unit.
dir1 = spin_math.normalize(dir1)
dir2 = spin_math.normalize(dir2)
# The vector perpendicular to both lines.
n = jnp.cross(dir1, dir2)
# Compute the point on line 1 nearest to line 2.
n2 = jnp.cross(dir2, n)
c1 = point1 + jnp.dot(point2 - point1, n2) / jnp.dot(dir1, n2) * dir1
# Compute the point on line 2 nearest to line 1.
n1 = jnp.cross(dir1, n)
c2 = point2 + jnp.dot(point1 - point2, n1) / jnp.dot(dir2, n1) * dir2
return c1, c2 # pytype: disable=bad-return-type # jax-ndarray
def skew_line_distance(point1, dir1,
point2, dir2):
"""Compute the shortest distance between two skew lines.
Args:
point1: a point on the first line.
dir1: the direction vector of the first line.
point2: a point on the second line.
dir2: the direction vector of the second line.
Returns:
The distance between the two skew lines.
"""
c1, c2 = skew_line_closest_points(point1, dir1, point2, dir2)
return jnp.linalg.norm(c1 - c2)
def line_closest_point(line_point, line_dir,
query_point):
"""Return the closest point on the line to a point.
Args:
line_point: a point on the line.
line_dir: the direction vector of the line.
query_point: the query point.
Returns:
The closest point on the line to the query point.
"""
# Make sure direction vector is unit.
line_dir = spin_math.normalize(line_dir)
# Find the point along the line that is closest.
t = jnp.dot(query_point - line_point, line_dir)
return line_point + t * line_dir
def line_to_point_distance(line_point, line_dir,
query_point):
"""Return the distance from point to a line.
Args:
line_point: a point on the line.
line_dir: the direction vector of the line.
query_point: the point to compute the distance to.
Returns:
The closest distance between the line and the point.
"""
closest_point = line_closest_point(line_point, line_dir, query_point)
return jnp.linalg.norm(query_point - closest_point)
def ray_sphere_intersection(origin,
direction,
radius = 1.0):
"""Computes the intersecting point between a ray and a sphere.
Variables use notation from Wikipedia:
u: direction of ray
o: origin of ray
References:
https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
Args:
origin: The origin of the ray.
direction: The direction of the ray.
radius: The radius of the sphere.
Returns:
The intersecting point on the sphere.
"""
u_dot_o = jnp.sum(direction * origin, axis=-1, keepdims=True)
nabla = u_dot_o**2 - (jnp.linalg.norm(origin, keepdims=True)**2 - radius**2)
# This this is a ray and not a line, we only need to consider the case where
# nabla is positive.
distance = -u_dot_o + jnp.sqrt(nabla)
return origin + distance * direction
def are_lines_parallel(dir1, dir2):
eps = jnp.finfo(jnp.float32).eps
dir1 = spin_math.normalize(dir1)
dir2 = spin_math.normalize(dir2)
return jnp.dot(dir1, dir2) >= 1.0 - eps # pytype: disable=bad-return-type # jnp-type
def spherical_equirectangular_grid(
height,
width,
min_elevation = 0,
max_elevation = jnp.pi,
min_azimuth = 0,
max_azimuth = 2 * jnp.pi):
"""Creates an equirectangular grid (panorama) in spherical coordinates.
Args:
height: The height of the output grid.
width: The width of the output grid.
min_elevation: The minimum value for the elevation.
max_elevation: The maximum value for the elevation.
min_azimuth: The minimum value for the azimuth.
max_azimuth: The maximum value for the azimuth.
Returns:
elevations: (height, width) An array containing the elevations.
azimuths: (height, width) An array containing the azimuths.
"""
elevations = jnp.linspace(min_elevation, max_elevation, height)
# Prevent duplicate sample since 0 and 2*pi are the same azimuth.
azimuths = jnp.linspace(min_azimuth, max_azimuth, width, endpoint=False)
azimuths, elevations = jnp.meshgrid(azimuths, elevations)
return elevations, azimuths # pytype: disable=bad-return-type # jax-ndarray
def spherical_to_cartesian(
r,
theta,
phi,
):
"""Converts spherical to cartesian coordinates.
For more details see cartesian_to_spherical below.
Args:
r: (..., 1) Radius of spherical coordinate.
theta: (..., 1) Elevation of spherical coordinate.
phi: (..., 1) Azimuth of spherical coordinate.
Returns:
Cartesian coordinates of shape (..., 3) defined by x, y, z.
"""
x = r * jnp.sin(theta) * jnp.cos(phi)
y = r * jnp.sin(theta) * jnp.sin(phi)
z = r * jnp.cos(theta)
return jnp.stack([x, y, z], axis=-1)
def cartesian_to_spherical(
cartesian_vector,
eps = onp.float32(onp.finfo(onp.float32).tiny)
):
"""Converts cartesian to spherical coordinates.
Uses a right-handed coordinate system where z is up and y is right. The
spherical coordinates are defined by radius (r), inclination (theta)
∈ [0, π]) from fixed zenit direction (z) and azimuth (phi) ∈ [0, 2π]) from
x-axis to y-axis.
We are using the phyiscal coordinate system as described here:
https://en.wikipedia.org/wiki/Spherical_coordinate_system.
Args:
cartesian_vector: (..., 3) Cartesian coordinates defined by (x, y, z).
eps: Epsilon used for safe_acos.
Returns:
Spherical coordinates as tuple of r, elevation (theta), azimuth (phi).
"""
x = cartesian_vector[Ellipsis, 0]
y = cartesian_vector[Ellipsis, 1]
z = cartesian_vector[Ellipsis, 2]
r = optax.safe_norm(cartesian_vector, min_norm=eps, axis=-1)
theta = spin_math.safe_acos(z / r)
phi = jnp.arctan2(y, x)
return r, theta, phi # pytype: disable=bad-return-type # jax-ndarray
def sample_random_points_on_sphere(key, num_points,
min_radius,
max_radius):
"""Sample points uniformly on sphere with random radius within bounds.
Args:
key: Seed for random sampling.
num_points: Number of points to sample.
min_radius: Minimum euclidean distance of point from center of sphere.
max_radius: Maximum euclidean distance of point from center of sphere.
Returns:
Array of uniform points (N, 3) on sphere with random radius.
"""
key1, key2, _ = random.split(key, 3)
random_radii = random.uniform(
key1, (num_points, 1), minval=min_radius, maxval=max_radius)
v = spin_math.normalize(random.normal(key2, (num_points, 3)))
return v * random_radii # pytype: disable=bad-return-type # jax-ndarray
def sample_points_evenly_on_sphere(num_points,):
"""Deterministically sample points on a sphere that are evenly distributed.
Uses a generalization of the sunflower spiral to sample points that are
distibuted evenly on a sphere.
References:
http://extremelearning.com.au/how-to-evenly-distribute-points-on-a-sphere-more-effectively-than-the-canonical-fibonacci-lattice/#more-3069
https://mathoverflow.net/questions/24850/is-there-a-generalisation-of-the-sunflower-spiral-to-higher-dimensions
https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere/44164075#44164075
Args:
num_points: The number of points to sample.
Returns:
(num_points, 3) The sampled points.
"""
golden_ratio = (1 + 5**0.5) / 2
indices = jnp.arange(0, num_points, dtype=jnp.float32) + 0.5
azimuths = jnp.pi * 2 * golden_ratio * indices
elevations = jnp.arccos(1 - 2 * indices / num_points)
points = spherical_to_cartesian(1.0, elevations, azimuths) # pytype: disable=wrong-arg-types # jax-ndarray
return points
def is_point_in_convex_hull(point,
hull_normals,
hull_offsets,
padding = 0.0):
"""Computes whether the given points are inside or outside a convex hull.
The convex hull is defined using the normals and offsets of a facet.
If the dot product between a point and a normal is less than the offset, then
it is on the inner side of that facet. If this is true for all facets, then
the point is inside the convex hull.
References:
http://www.qhull.org/html/index.htm
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.html
Args:
point: (..., D) An array containing the points to test.
hull_normals: (F, D) The normals of the facets of the convex hull.
hull_offsets: (F, D) The offsets of the facets of the convex hull.
padding: A number to pad the convex hull by. A positive value expands the
convex hull while a negative number shrinks it.
Returns:
A boolean array of shape (...,) that is True if a point is inside the hull
and False otherwise.
"""
input_shape = point.shape[:-1]
point = point.reshape(-1, point.shape[-1])
dots = hull_normals @ point.T
mask = (dots <= -hull_offsets[:, None] + padding).all(axis=0)
return mask.reshape(input_shape)
def cosine_to_deg(array):
"""Converts cosine angle to degrees.
Args:
array: containing cosine angles (e.g. result of dot product).
Returns:
array with angles as degrees.
"""
return jnp.degrees(jnp.arccos(array.clip(-1, 1)))
# TODO(phenzler): Convert this to xnp once we have a more solid code base that
# supports xnp.
def onp_cosine_to_deg(array):
"""Converts cosine angle to degrees.
Args:
array: containing cosine angles (e.g. result of dot product).
Returns:
array with angles as degrees.
"""
return onp.degrees(onp.arccos(array.clip(-1, 1)))
def rotation_distance(rotation_mat1,
rotation_mat2):
"""Computes the angle between two rotation matrices in degrees.
Args:
rotation_mat1: (3, 3) The first batch of rotation matrix.
rotation_mat2: (3, 3) The second batch of rotation matrix.
Returns:
The angle in degrees between 0 and 180.
"""
axis_angle1 = rigid_body.log_so3(rotation_mat1)
axis_angle2 = rigid_body.log_so3(rotation_mat2)
orientation_error_deg = jnp.degrees(
jnp.linalg.norm(axis_angle1 - axis_angle2, axis=-1))
return jnp.where(
orientation_error_deg < 180,
orientation_error_deg, # pytype: disable=bad-return-type # jnp-type
360 - orientation_error_deg)
def compute_bbox_from_xyza(
xyza,
padding,
alpha_threshold = 0.99,
):
"""Computes a bounding box given an xyza array.
Args:
xyza: An array of shape (..., 4) containing the XYZ coordinates in the first
three channels and an alpha value in the last.
padding: A padding value to be added to all sides.
alpha_threshold: The threshold at which to binarize the alpha into a mask.
Returns:
A bounding box of shape (2, 3) containing (min_coords, max_coords).
"""
padding = onp.array(padding)
xyz = xyza[Ellipsis, :3]
alpha = xyza[Ellipsis, 3]
mask = alpha > alpha_threshold
xyz = xyz[mask]
xyz = xyz.reshape(-1, 3)
min_coord = xyz.min(axis=0) - padding
max_coord = xyz.max(axis=0) + padding
return onp.stack([min_coord, max_coord], axis=0)
|
evocodebench_data_68
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for linear splines."""
import functools
from internal import math
from internal import utils
import jax
from jax.experimental import checkify
import jax.numpy as jnp
def check_zero_endpoints(y):
checkify.check(jnp.all(y[Ellipsis, 0] == 0), 'Splines must all start with 0.')
checkify.check(jnp.all(y[Ellipsis, -1] == 0), 'Splines must all end with 0.')
def query(tq, t, v):
"""Query linear spline (t, v) at tq."""
utils.assert_valid_linspline(t, v)
interp = functools.partial(jnp.interp, left=0, right=0)
return jnp.vectorize(interp, signature='(n),(m),(m)->(n)')(tq, t, v)
def integrate(t, w):
"""Integrate (t, w) according to the trapezoid rule."""
utils.assert_valid_linspline(t, w)
return 0.5 * jnp.sum((w[Ellipsis, :-1] + w[Ellipsis, 1:]) * jnp.diff(t), axis=-1)
def normalize(t, w, eps=jnp.finfo(jnp.float32).eps ** 2):
"""Make w integrate to 1."""
utils.assert_valid_linspline(t, w)
return w / jnp.maximum(eps, integrate(t, w))[Ellipsis, None]
def insert_knot(ti, t, y):
"""Inserts knots ti into the linear spline (t, w). Assumes zero endpoints."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Compute the spline value at the insertion points.
yi = query(ti, t, y)
# Concatenate the insertion points and values onto the end of each spline.
ti_ex = jnp.broadcast_to(ti, t.shape[: -len(ti.shape)] + ti.shape)
yi_ex = jnp.broadcast_to(yi, y.shape[: -len(yi.shape)] + yi.shape)
to = jnp.concatenate([t, ti_ex], axis=-1)
yo = jnp.concatenate([y, yi_ex], axis=-1)
# Sort the spline according to t.
sort_idx = jnp.argsort(to)
to = jnp.take_along_axis(to, sort_idx, axis=-1)
yo = jnp.take_along_axis(yo, sort_idx, axis=-1)
return to, yo
def clamp(t, y, minval, maxval):
"""Clamp (t, y) to be zero outside of t in [minval, maxval]."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Add in extra points at and immediately above/below the min/max vals.
ti = jnp.concatenate(
[
math.minus_eps(minval),
minval,
maxval,
math.plus_eps(maxval),
],
axis=-1,
)
tc, yo = insert_knot(ti, t, y)
# Zero the spline values outside of [minval, maxval].
yc = jnp.where(tc > maxval, 0, jnp.where(tc < minval, 0, yo))
return tc, yc
def compute_integral(t, y):
"""Integrate a linear spline into a piecewise quadratic spline."""
utils.assert_valid_linspline(t, y)
eps = jnp.finfo(jnp.float32).eps ** 2
dt = jnp.diff(t)
a = jnp.diff(y) / jnp.maximum(eps, 2 * dt)
b = y[Ellipsis, :-1]
# The integral has an ambiguous global offset here, which we set to 0.
c1 = 0.5 * jnp.cumsum(dt[Ellipsis, :-1] * (y[Ellipsis, :-2] + y[Ellipsis, 1:-1]), axis=-1)
c = jnp.concatenate([jnp.zeros_like(y[Ellipsis, :1]), c1], axis=-1)
# This quadratic is parameterized as:
# (t - t[i])**2 * a[i] + (t - t[i]) * b[i] + c[i]
return a, b, c
def sorted_lookup(x, xp):
"""Lookup `x` at sorted locations `xp`."""
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
functools.partial(jnp.searchsorted, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx0 = jnp.maximum(idx - 1, 0)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
return idx0, idx1
def interpolate_integral(tq, t, a, b, c):
"""Interpolate into the piecewise quadratic returned by compute_integral()."""
utils.assert_valid_stepfun(t, a)
utils.assert_valid_stepfun(t, b)
utils.assert_valid_stepfun(t, c)
# Clip to valid inputs (assumes repeating boundaries).
tq = jnp.clip(tq, t[Ellipsis, :1], math.minus_eps(t[Ellipsis, -1:]))
# Lookup the quadratic coefficients corresponding to each input query.
idx0, _ = sorted_lookup(tq, t)
# TODO(barron): It might be faster to stack (a, c, b) during generation and
# do a single gather.
t0 = jnp.take_along_axis(t, idx0, axis=-1)
a0 = jnp.take_along_axis(a, idx0, axis=-1)
b0 = jnp.take_along_axis(b, idx0, axis=-1)
c0 = jnp.take_along_axis(c, idx0, axis=-1)
td = tq - t0
v = a0 * td**2 + b0 * td + c0
return v
def blur_stepfun(ts, ys, halfwidth):
"""Convolve a step function (ts, ys) with a box filter of size `halfwidth`."""
utils.assert_valid_stepfun(ts, ys)
# Blur each entire step function by a single `halfwidth` value.
# Dilate the t-values by at least numerical epsilon in each direction.
ts_lo = ts - halfwidth
ts_hi = jnp.maximum(math.plus_eps(ts), ts + halfwidth)
# The difference in adjacent `y` values (zero padded) divided by the
# difference in adjacent `t` values.
ys0 = jnp.concatenate(
[jnp.zeros_like(ys[Ellipsis, :1]), ys, jnp.zeros_like(ys[Ellipsis, :1])], axis=-1
)
dy = jnp.diff(ys0) / (ts_hi - ts_lo)
# When decreasing t splat a positive second derivative, and when increasing
# t splat a negative second derivative.
tp = jnp.concatenate([ts_lo, ts_hi], axis=-1)
dyp = jnp.concatenate([dy, -dy], axis=-1)
# Sort the dilated t-values and their accompanying derivative weights.
idx = jnp.argsort(tp, axis=-1)
tp = jnp.take_along_axis(tp, idx, axis=-1)
dyp = jnp.take_along_axis(dyp, idx[Ellipsis, :-2], axis=-1)
# A ramp is the double integral of a delta function, so if we double-
# integrate these derivatives you get the sum of a bunch of trapezoids.
yp = jnp.cumsum(jnp.diff(tp)[Ellipsis, :-1] * jnp.cumsum(dyp, axis=-1), axis=-1)
# Add in the missing first and last endpoint values, which must be zero
# because we assume zero padding on `ys`.
yp = jnp.concatenate(
[jnp.zeros_like(yp[Ellipsis, :1]), yp, jnp.zeros_like(yp[Ellipsis, -1:])], axis=-1
)
return tp, yp
|
evocodebench_data_69
|
from typing import Tuple, List
import pandas as pd
from autorag.nodes.retrieval import retrieval_node
@retrieval_node
def hybrid_cc(
ids: Tuple,
scores: Tuple,
top_k: int,
weights: Tuple = (0.5, 0.5)) -> Tuple[List[List[str]], List[List[float]]]:
"""
Hybrid CC function.
CC (convex combination) is a method to fuse multiple retrieval results.
It is a method that first normalizes the scores of each retrieval result,
and then combines them with the given weights.
To use this function, you must input ids and scores as tuple.
It is uniquer than other retrieval modules, because it does not really execute retrieval,
but just fuse the results of other retrieval functions.
So you have to run more than two retrieval modules before running this function.
And collect ids and scores result from each retrieval module.
Make it as tuple and input it to this function.
:param ids: The tuple of ids that you want to fuse.
The length of this must be the same as the length of scores.
:param scores: The retrieve scores that you want to fuse.
The length of this must be the same as the length of ids.
:param top_k: The number of passages to be retrieved.
:param weights: Weight for each retrieval result.
Default is (0.5, 0.5).
You must set its length as the same as the length of ids and scores.
Plus, the sum of the weights must be 1.
:return: The tuple of ids and fused scores that fused by CC.
"""
assert len(ids) == len(scores), "The length of ids and scores must be the same."
assert len(ids) == len(weights), "The length of weights must be the same as the length of ids."
assert len(ids) > 1, "You must input more than one retrieval results."
assert top_k > 0, "top_k must be greater than 0."
assert sum(weights) == 1, "The sum of weights must be 1."
id_df = pd.DataFrame({f'id_{i}': id_list for i, id_list in enumerate(ids)})
score_df = pd.DataFrame({f'score_{i}': score_list for i, score_list in enumerate(scores)})
df = pd.concat([id_df, score_df], axis=1)
def cc_pure_apply(row):
ids_tuple = tuple(row[[f'id_{i}' for i in range(len(ids))]].values)
scores_tuple = tuple(row[[f'score_{i}' for i in range(len(scores))]].values)
return pd.Series(cc_pure(ids_tuple, scores_tuple, weights, top_k))
df[['cc_id', 'cc_score']] = df.apply(cc_pure_apply, axis=1)
return df['cc_id'].tolist(), df['cc_score'].tolist()
def cc_pure(ids: Tuple, scores: Tuple, weights: Tuple, top_k: int) -> Tuple[
List[str], List[float]]:
df = pd.concat([pd.Series(dict(zip(_id, score))) for _id, score in zip(ids, scores)], axis=1)
normalized_scores = (df - df.min()) / (df.max() - df.min())
normalized_scores = normalized_scores.fillna(0)
normalized_scores['weighted_sum'] = normalized_scores.mul(weights).sum(axis=1)
normalized_scores = normalized_scores.sort_values(by='weighted_sum', ascending=False)
return normalized_scores.index.tolist()[:top_k], normalized_scores['weighted_sum'][:top_k].tolist()
|
evocodebench_data_70
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
|
evocodebench_data_71
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# scikit-learn, Copyright (c) 2007-2010 David Cournapeau, Fabian Pedregosa, Olivier
# Grisel Licensed under BSD 3 clause.
from collections.abc import Callable, Iterator
from enum import Enum
from functools import wraps
import numpy as np
import numpy.typing as npt
import pandas as pd
import sklearn as sk
import sklearn.base as skb
__all__ = [
"AutoEnum",
"cached_property_slots",
"cache_method",
"input_to_array",
"args_names",
"format_measure",
"bisection",
"safe_split",
"fit_single_estimator",
"fit_and_predict",
"deduplicate_names",
"default_asset_names",
"check_estimator",
]
GenericAlias = type(list[int])
class AutoEnum(str, Enum):
"""Base Enum class used in `skfolio`"""
@staticmethod
def _generate_next_value_(
name: str, start: int, count: int, last_values: any
) -> str:
"""Overriding `auto()`"""
return name.lower()
@classmethod
def has(cls, value: str) -> bool:
"""Check if a value is in the Enum.
Parameters
----------
value : str
Input value.
Returns
-------
x : bool
True if the value is in the Enum, False otherwise.
"""
return value in cls._value2member_map_
def __repr__(self) -> str:
"""Representation of the Enum"""
return self.name
# noinspection PyPep8Naming
class cached_property_slots:
"""Cached property decorator for slots"""
def __init__(self, func):
self.func = func
self.public_name = None
self.private_name = None
self.__doc__ = func.__doc__
def __set_name__(self, owner, name):
self.public_name = name
self.private_name = f"_{name}"
def __get__(self, instance, owner=None):
if instance is None:
return self
if self.private_name is None:
raise TypeError(
"Cannot use cached_property instance without calling __set_name__"
" on it."
)
try:
value = getattr(instance, self.private_name)
except AttributeError:
value = self.func(instance)
setattr(instance, self.private_name, value)
return value
def __set__(self, instance, owner=None):
raise AttributeError(
f"'{type(instance).__name__}' object attribute '{self.public_name}' is"
" read-only"
)
__class_getitem__ = classmethod(GenericAlias)
def _make_key(args, kwds) -> int:
"""Make a cache key from optionally typed positional and keyword arguments"""
key = args
if kwds:
for item in kwds.items():
key += item
return hash(key)
def cache_method(cache_name: str) -> Callable:
"""Decorator that caches class methods results into a class dictionary.
Parameters
----------
cache_name : str
Name of the dictionary class attribute.
Returns
-------
func : Callable
Decorating function that caches class methods.
"""
# To avoid memory leakage and proper garbage collection, self should not be part of
# the cache key.
# This is a known issue when we use functools.lru_cache on class methods.
def decorating_function(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
func_name = method.__name__
key = _make_key(args, kwargs)
try:
cache = getattr(self, cache_name)
except AttributeError:
raise AttributeError(
"You first need to create a dictionary class attribute named "
f"'{cache_name}'"
) from None
if not isinstance(cache, dict):
raise AttributeError(
f"'The cache named '{cache_name}' must be a "
f"dictionary, got {type(cache)}"
)
if func_name not in cache:
cache[func_name] = {}
c = cache[func_name]
if key not in c:
c[key] = method(self, *args, **kwargs)
return c[key]
return wrapper
return decorating_function
def args_names(func: object) -> list[str]:
"""Returns the argument names of a function.
Parameters
----------
func : object
Function.
Returns
-------
args : list[str]
The list of function arguments.
"""
return [
v for v in func.__code__.co_varnames[: func.__code__.co_argcount] if v != "self"
]
def check_estimator(
estimator: skb.BaseEstimator | None, default: skb.BaseEstimator, check_type: any
):
"""Check the estimator type and returns its cloned version it provided, otherwise
return the default estimator.
Parameters
----------
estimator : BaseEstimator, optional
Estimator.
default : BaseEstimator
Default estimator to return when `estimator` is `None`.
check_type : any
Expected type of the estimator to check against.
Returns
-------
estimator: Estimator
The checked estimator or the default.
"""
if estimator is None:
return default
if not isinstance(estimator, check_type):
raise TypeError(f"Expected type {check_type}, got {type(estimator)}")
return sk.clone(estimator)
def input_to_array(
items: dict | npt.ArrayLike,
n_assets: int,
fill_value: any,
dim: int,
assets_names: np.ndarray | None,
name: str,
) -> np.ndarray:
"""Convert a collection of items (array-like or dictionary) into
a numpy array and verify its shape.
Parameters
----------
items : np.ndarray | dict | list
Items to verify and convert to array.
n_assets : int
Expected number of assets.
Used to verify the shape of the converted array.
fill_value : any
When `items` is a dictionary, elements that are not in `asset_names` are filled
with `fill_value` in the converted array.
dim : int
Dimension of the final array.
Possible values are `1` or `2`.
assets_names : ndarray, optional
Asset names used when `items` is a dictionary.
name : str
Name of the items used for error messages.
Returns
-------
values : ndarray of shape (n_assets) for dim=1 or (n_groups, n_assets) for dim=2
Converted array.
"""
if dim not in [1, 2]:
raise ValueError(f"dim must be 1 or 2, got {dim}")
if isinstance(items, dict):
if assets_names is None:
raise ValueError(
f"If `{name}` is provided as a dictionary, you must input `X` as a"
" DataFrame with assets names in columns"
)
if dim == 1:
arr = np.array([items.get(asset, fill_value) for asset in assets_names])
else:
# add assets and convert dict to ordered array
arr = {}
for asset in assets_names:
elem = items.get(asset)
if elem is None:
elem = [asset]
elif np.isscalar(elem):
elem = [asset, elem]
else:
elem = [asset, *elem]
arr[asset] = elem
arr = (
pd.DataFrame.from_dict(arr, orient="index")
.loc[assets_names]
.to_numpy()
.T
)
else:
arr = np.asarray(items)
if arr.ndim != dim:
raise ValueError(f"`{name}` must be a {dim}D array, got a {arr.ndim}D array")
if not isinstance(fill_value, str) and np.isnan(arr).any():
raise ValueError(f"`{name}` contains NaN")
if arr.shape[-1] != n_assets:
if dim == 1:
s = "(n_assets,)"
else:
s = "(n_groups, n_assets)"
raise ValueError(
f"`{name}` must be a of shape {s} with n_assets={n_assets}, "
f"got {arr.shape[0]}"
)
return arr
def format_measure(x: float, percent: bool = False) -> str:
"""Format a measure number into a user-friendly string.
Parameters
----------
x : float
Number to format.
percent : bool, default=False
If this is set to True, the number is formatted in percentage.
Returns
-------
formatted : str
Formatted string.
"""
if np.isnan(x):
return str(x)
if percent:
xn = x * 100
f = "%"
else:
xn = x
f = "f"
if xn == 0:
n = 0
else:
n = min(6, max(int(-np.log10(abs(xn))) + 2, 2))
return "{value:{fmt}}".format(value=x, fmt=f".{n}{f}")
def bisection(x: list[np.ndarray]) -> Iterator[list[np.ndarray, np.ndarray]]:
"""Generator to bisect a list of array.
Parameters
----------
x : list[ndarray]
A list of array.
Yields
------
arr : Iterator[list[ndarray, ndarray]]
Bisected array.
"""
for e in x:
n = len(e)
if n > 1:
mid = n // 2
yield [e[0:mid], e[mid:n]]
def safe_indexing(
X: npt.ArrayLike | pd.DataFrame, indices: npt.ArrayLike | None, axis: int = 0
):
"""
Return rows, items or columns of X using indices.
Parameters
----------
X : array-like
Data from which to sample rows.
indices : array-like, optional
Indices of rows or columns.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
subset :
Subset of X on axis 0.
"""
if indices is None:
return X
if hasattr(X, "iloc"):
return X.take(indices, axis=axis)
if axis == 0:
return X[indices]
return X[:, indices]
def safe_split(
X: npt.ArrayLike,
y: npt.ArrayLike | None = None,
indices: np.ndarray | None = None,
axis: int = 0,
):
"""Create subset of dataset.
Slice X, y according to indices for cross-validation.
Parameters
----------
X : array-like
Data to be indexed.
y : array-like
Data to be indexed.
indices : ndarray of int, optional
Rows or columns to select from X and y.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
X_subset : array-like
Indexed data.
y_subset : array-like
Indexed targets.
"""
X_subset = safe_indexing(X, indices=indices, axis=axis)
if y is not None:
y_subset = safe_indexing(y, indices=indices, axis=axis)
else:
y_subset = None
return X_subset, y_subset
def fit_single_estimator(
estimator: any,
X: npt.ArrayLike,
y: npt.ArrayLike | None = None,
indices: np.ndarray | None = None,
axis: int = 0,
):
"""function used to fit an estimator within a job.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_observations, n_assets)
The data to fit.
y : array-like of shape (n_observations, n_targets), optional
The target array if provided.
indices : ndarray of int, optional
Rows or columns to select from X and y.
The default (`None`) is to select the entire data.
axis : int, default=0
The axis along which `X` will be sub-sampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
fitted_estimator : estimator
The fitted estimator.
"""
X, y = safe_split(X, y, indices=indices, axis=axis)
estimator.fit(X, y)
return estimator
def fit_and_predict(
estimator: any,
X: npt.ArrayLike,
y: npt.ArrayLike | None,
train: np.ndarray,
test: np.ndarray | list[np.ndarray],
fit_params: dict,
method: str,
column_indices: np.ndarray | None = None,
) -> npt.ArrayLike | list[npt.ArrayLike]:
"""Fit the estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape (n_observations, n_assets)
The data to fit.
y : array-like of shape (n_observations, n_factors) or None
The factor array if provided
train : ndarray of int of shape (n_train_observations,)
Indices of training samples.
test : ndarray of int of shape (n_test_samples,) or list of ndarray
Indices of test samples or list of indices.
fit_params : dict
Parameters that will be passed to ``estimator.fit``.
method : str
Invokes the passed method name of the passed estimator.
column_indices : ndarray, optional
Indices of columns to select.
The default (`None`) is to select all columns.
Returns
-------
predictions : array-like or list of array-like
If `test` is an array, it returns the array-like result of calling
'estimator.method' on `test`.
Otherwise, if `test` is a list of arrays, it returns the list of array-like
results of calling 'estimator.method' on each test set in `test`.
"""
fit_params = fit_params if fit_params is not None else {}
X, y = safe_split(X, y, indices=column_indices, axis=1)
X_train, y_train = safe_split(X, y, indices=train, axis=0)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
if isinstance(test, list):
predictions = []
for t in test:
X_test, _ = safe_split(X, indices=t, axis=0)
predictions.append(func(X_test))
else:
X_test, _ = safe_split(X, indices=test, axis=0)
predictions = func(X_test)
return predictions
def default_asset_names(n_assets: int) -> np.ndarray:
"""Default asset names are `["x0", "x1", ..., "x(n_assets - 1)"]`
Parameters
----------
n_assets : int
Number of assets.
Returns
-------
asset_names : ndarray of str
Default assets names.
"""
return np.asarray([f"x{i}" for i in range(n_assets)], dtype=object)
def deduplicate_names(names: npt.ArrayLike) -> list[str]:
"""Rename duplicated names by appending "_{duplicate_nb}" at the end.
This function is inspired by the pandas function `_maybe_dedup_names`.
Parameters
----------
names : array-like of shape (n_names,)
List of names.
Returns
-------
names : list[str]
Deduplicate names.
"""
names = list(names)
counts = {}
for i, col in enumerate(names):
cur_count = counts.get(col, 0)
if cur_count > 0:
names[i] = f"{col}_{cur_count}"
counts[col] = cur_count + 1
return names
|
evocodebench_data_72
|
"""Tools module"""
# Copyright (c) 2023
# Author: Hugo Delatte <delatte.hugo@gmail.com>
# License: BSD 3 clause
# Implementation derived from:
# Riskfolio-Lib, Copyright (c) 2020-2023, Dany Cajas, Licensed under BSD 3 clause.
# Statsmodels, Copyright (C) 2006, Jonathan E. Taylor, Licensed under BSD 3 clause.
from enum import auto
import numpy as np
import scipy.cluster.hierarchy as sch
import scipy.optimize as sco
import scipy.spatial.distance as scd
import scipy.special as scs
from scipy.sparse import csr_matrix
from skfolio.utils.tools import AutoEnum
__all__ = [
"NBinsMethod",
"n_bins_freedman",
"n_bins_knuth",
"is_cholesky_dec",
"assert_is_square",
"assert_is_symmetric",
"assert_is_distance",
"cov_nearest",
"cov_to_corr",
"corr_to_cov",
"commutation_matrix",
"compute_optimal_n_clusters",
"rand_weights",
"rand_weights_dirichlet",
]
class NBinsMethod(AutoEnum):
"""Enumeration of the Number of Bins Methods
Parameters
----------
FREEDMAN : str
Freedman method
KNUTH : str
Knuth method
"""
FREEDMAN = auto()
KNUTH = auto()
def n_bins_freedman(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using the Freedman-Diaconis rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "On the histogram as a density estimator: L2 theory".
Freedman & Diaconis (1981).
"""
if x.ndim != 1:
raise ValueError("`x` must be a 1d-array")
n = len(x)
p_25, p_75 = np.percentile(x, [25, 75])
d = 2 * (p_75 - p_25) / (n ** (1 / 3))
if d == 0:
return 5
n_bins = max(1, np.ceil((np.max(x) - np.min(x)) / d))
return int(round(n_bins))
def n_bins_knuth(x: np.ndarray) -> int:
"""Compute the optimal histogram bin size using Knuth's rule [1]_.
Parameters
----------
x : ndarray of shape (n_observations,)
The input array.
Returns
-------
n_bins : int
The optimal bin size.
References
----------
.. [1] "Optimal Data-Based Binning for Histograms".
Knuth.
"""
x = np.sort(x)
n = len(x)
def func(y: float):
y = y[0]
if y <= 0:
return np.inf
bin_edges = np.linspace(x[0], x[-1], int(y) + 1)
hist, _ = np.histogram(x, bin_edges)
return -(
n * np.log(y)
+ scs.gammaln(0.5 * y)
- y * scs.gammaln(0.5)
- scs.gammaln(n + 0.5 * y)
+ np.sum(scs.gammaln(hist + 0.5))
)
n_bins_init = n_bins_freedman(x)
n_bins = sco.fmin(func, n_bins_init, disp=0)[0]
return int(round(n_bins))
def rand_weights_dirichlet(n: int) -> np.array:
"""Produces n random weights that sum to one from a dirichlet distribution
(uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
return np.random.dirichlet(np.ones(n))
def rand_weights(n: int, zeros: int = 0) -> np.array:
"""Produces n random weights that sum to one from an uniform distribution
(non-uniform distribution over a simplex)
Parameters
----------
n : int
Number of weights.
zeros : int, default=0
The number of weights to randomly set to zeros.
Returns
-------
weights : ndarray of shape (n, )
The vector of weights.
"""
k = np.random.rand(n)
if zeros > 0:
zeros_idx = np.random.choice(n, zeros, replace=False)
k[zeros_idx] = 0
return k / sum(k)
def is_cholesky_dec(x: np.ndarray) -> bool:
"""Returns True if Cholesky decomposition can be computed.
The matrix must be Hermitian (symmetric if real-valued) and positive-definite.
No checking is performed to verify whether the matrix is Hermitian or not.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if Cholesky decomposition can be applied to the matrix, False otherwise.
"""
# Around 100 times faster than checking for positive eigenvalues with np.linalg.eigh
try:
np.linalg.cholesky(x)
return True
except np.linalg.linalg.LinAlgError:
return False
def is_positive_definite(x: np.ndarray) -> bool:
"""Returns True if the matrix is positive definite.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
value : bool
True if if the matrix is positive definite, False otherwise.
"""
return np.all(np.linalg.eigvals(x) > 0)
def assert_is_square(x: np.ndarray) -> None:
"""Raises an error if the matrix is not square.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is not square.
"""
if x.ndim != 2 or x.shape[0] != x.shape[1]:
raise ValueError("The matrix must be square")
def assert_is_symmetric(x: np.ndarray) -> None:
"""Raises an error if the matrix is not symmetric.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Raises
------
ValueError: if the matrix is not symmetric.
"""
assert_is_square(x)
if not np.allclose(x, x.T):
raise ValueError("The matrix must be symmetric")
def assert_is_distance(x: np.ndarray) -> None:
"""Raises an error if the matrix is not a distance matrix.
Parameters
----------
x : ndarray of shape (n, n)
The matrix.
Raises
------
ValueError: if the matrix is a distance matrix.
"""
assert_is_symmetric(x)
if not np.allclose(np.diag(x), np.zeros(x.shape[0]), atol=1e-5):
raise ValueError(
"The distance matrix must have diagonal elements close to zeros"
)
def cov_to_corr(cov: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Convert a covariance matrix to a correlation matrix.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
Returns
-------
corr, std : tuple[ndarray of shape (n, n), ndarray of shape (n, )]
Correlation matrix and standard-deviation vector
"""
if cov.ndim != 2:
raise ValueError(f"`cov` must be a 2D array, got a {cov.ndim}D array")
std = np.sqrt(np.diag(cov))
corr = cov / std / std[:, None]
return corr, std
def corr_to_cov(corr: np.ndarray, std: np.ndarray):
"""Convert a correlation matrix to a covariance matrix given its
standard-deviation vector.
Parameters
----------
corr : ndarray of shape (n, n)
Correlation matrix.
std : ndarray of shape (n, )
Standard-deviation vector.
Returns
-------
cov : ndarray of shape (n, n)
Covariance matrix
"""
if std.ndim != 1:
raise ValueError(f"`std` must be a 1D array, got a {std.ndim}D array")
if corr.ndim != 2:
raise ValueError(f"`corr` must be a 2D array, got a {corr.ndim}D array")
cov = corr * std * std[:, None]
return cov
_CLIPPING_VALUE = 1e-13
def cov_nearest(cov: np.ndarray, higham: bool = False, higham_max_iteration: int = 100):
"""Compute the nearest covariance matrix that is positive definite and with a
cholesky decomposition than can be computed. The variance is left unchanged.
First, it converts the covariance matrix to a correlation matrix.
Then, it finds the nearest correlation matrix and converts it back to a covariance
matrix using the initial standard deviation.
Cholesky decomposition can fail for symmetric positive definite (SPD) matrix due
to floating point error and inversely, Cholesky decomposition can success for
non-SPD matrix. Therefore, we need to test for both. We always start by testing
for Cholesky decomposition which is significantly faster than checking for positive
eigenvalues.
Parameters
----------
cov : ndarray of shape (n, n)
Covariance matrix.
higham : bool, default=False
If this is set to True, the Higham & Nick (2002) algorithm [1]_ is used,
otherwise the eigenvalues are clipped to threshold above zeros (1e-13).
The default (`False`) is to use the clipping method as the Higham & Nick
algorithm can be slow for large datasets.
higham_max_iteration : int, default=100
Maximum number of iteration of the Higham & Nick (2002) algorithm.
The default value is `100`.
Returns
-------
cov : ndarray
The nearest covariance matrix.
References
----------
.. [1] "Computing the nearest correlation matrix - a problem from finance"
IMA Journal of Numerical Analysis
Higham & Nick (2002)
"""
assert_is_square(cov)
assert_is_symmetric(cov)
# Around 100 times faster than checking eigenvalues with np.linalg.eigh
if is_cholesky_dec(cov) and is_positive_definite(cov):
return cov
corr, std = cov_to_corr(cov)
if higham:
eps = np.finfo(np.float64).eps * 5
diff = np.zeros(corr.shape)
x = corr.copy()
for _ in range(higham_max_iteration):
x_adj = x - diff
eig_vals, eig_vecs = np.linalg.eigh(x_adj)
x = eig_vecs * np.maximum(eig_vals, eps) @ eig_vecs.T
diff = x - x_adj
np.fill_diagonal(x, 1)
cov = corr_to_cov(x, std)
if is_cholesky_dec(cov) and is_positive_definite(cov):
break
else:
raise ValueError("Unable to find the nearest positive definite matrix")
else:
eig_vals, eig_vecs = np.linalg.eigh(corr)
# Clipping the eigenvalues with a value smaller than 1e-13 can cause scipy to
# consider the matrix non-psd is some corner cases (see test/test_stats.py)
x = eig_vecs * np.maximum(eig_vals, _CLIPPING_VALUE) @ eig_vecs.T
x, _ = cov_to_corr(x)
cov = corr_to_cov(x, std)
return cov
def commutation_matrix(x):
"""Compute the commutation matrix.
Parameters
----------
x : ndarray of shape (n, m)
The matrix.
Returns
-------
K : ndarray of shape (m * n, m * n)
The commutation matrix.
"""
(m, n) = x.shape
row = np.arange(m * n)
col = row.reshape((m, n), order="F").ravel()
data = np.ones(m * n, dtype=np.int8)
k = csr_matrix((data, (row, col)), shape=(m * n, m * n))
return k
def compute_optimal_n_clusters(distance: np.ndarray, linkage_matrix: np.ndarray) -> int:
r"""Compute the optimal number of clusters based on Two-Order Difference to Gap
Statistic [1]_.
The Two-Order Difference to Gap Statistic has been developed to improve the
performance and stability of the Tibshiranis Gap statistic.
It applies the two-order difference of the within-cluster dispersion to replace the
reference null distribution in the Gap statistic.
The number of cluster :math:`k` is determined by:
.. math:: \begin{cases}
\begin{aligned}
&\max_{k} & & W_{k+2} + W_{k} - 2 W_{k+1} \\
&\text{s.t.} & & 1 \ge c \ge max\bigl(8, \sqrt{n}\bigr) \\
\end{aligned}
\end{cases}
with :math:`n` the sample size and :math:`W_{k}` the within-cluster dispersions
defined as:
.. math:: W_{k} = \sum_{i=1}^{k} \frac{D_{i}}{2|C_{i}|}
where :math:`|C_{i}|` is the cardinality of cluster :math:`i` and :math:`D_{i}` its
density defined as:
.. math:: D_{i} = \sum_{u \in C_{i}} \sum_{v \in C_{i}} d(u,v)
with :math:`d(u,v)` the distance between u and v.
Parameters
----------
distance : ndarray of shape (n, n)
Distance matrix.
linkage_matrix : ndarray of shape (n - 1, 4)
Linkage matrix.
Returns
-------
value : int
Optimal number of clusters.
References
----------
.. [1] "Application of two-order difference to gap statistic".
Yue, Wang & Wei (2009)
"""
cut_tree = sch.cut_tree(linkage_matrix)
n = cut_tree.shape[1]
max_clusters = max(8, round(np.sqrt(n)))
dispersion = []
for k in range(max_clusters):
level = cut_tree[:, n - k - 1]
cluster_density = []
for i in range(np.max(level) + 1):
cluster_idx = np.argwhere(level == i).flatten()
cluster_dists = scd.squareform(
distance[cluster_idx, :][:, cluster_idx], checks=False
)
if cluster_dists.shape[0] != 0:
cluster_density.append(np.nan_to_num(cluster_dists.mean()))
dispersion.append(np.sum(cluster_density))
dispersion = np.array(dispersion)
gaps = np.roll(dispersion, -2) + dispersion - 2 * np.roll(dispersion, -1)
gaps = gaps[:-2]
# k=0 represents one cluster
k = np.argmax(gaps) + 2
return k
|
evocodebench_data_73
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
|
evocodebench_data_74
|
from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
if len(values[field1]) != len(values[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
)
return values
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
|
evocodebench_data_75
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager # must be imported before OpenGL.GL
from easyvolcap.runners.volumetric_video_viewer import VolumetricVideoViewer
import os
import sys
import glm
import torch
import ctypes
import numpy as np
from torch import nn
from enum import Enum, auto
from types import MethodType
from typing import Dict, Union, List
from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.viewer_utils import Camera
from easyvolcap.utils.bound_utils import get_bounds
from easyvolcap.utils.chunk_utils import multi_gather
from easyvolcap.utils.color_utils import cm_cpu_store
from easyvolcap.utils.ray_utils import create_meshgrid
from easyvolcap.utils.depth_utils import depth_curve_fn
from easyvolcap.utils.gaussian_utils import rgb2sh0, sh02rgb
from easyvolcap.utils.nerf_utils import volume_rendering, raw2alpha
from easyvolcap.utils.data_utils import load_pts, load_mesh, to_cuda, add_batch
from easyvolcap.utils.cuda_utils import CHECK_CUDART_ERROR, FORMAT_CUDART_ERROR
from easyvolcap.utils.net_utils import typed, torch_dtype_to_numpy_dtype, load_pretrained
from easyvolcap.utils.fcds_utils import prepare_feedback_transform, get_opencv_camera_params
# fmt: off
# Environment variable messaging
# Need to export EGL_DEVICE_ID before trying to import egl
# And we need to consider the case when we're performing distributed training
# from easyvolcap.engine import cfg, args # FIXME: GLOBAL IMPORTS
if 'easyvolcap.engine' in sys.modules and \
(sys.modules['easyvolcap.engine'].args.type != 'gui' or \
sys.modules['easyvolcap.engine'].cfg.viewer_cfg.type != 'VolumetricVideoViewer'): # FIXME: GLOBAL VARIABLES
try:
from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager
except Exception as e:
log(yellow(f'Could not import EGL related modules. {type(e).__name__}: {e}'))
os.environ['PYOPENGL_PLATFORM'] = ''
def is_wsl2():
"""Returns True if the current environment is WSL2, False otherwise."""
return exists("/etc/wsl.conf") and os.environ.get("WSL_DISTRO_NAME")
if is_wsl2():
os.environ['PYOPENGL_PLATFORM'] = 'glx'
import OpenGL.GL as gl
try:
from OpenGL.GL import shaders
except Exception as e:
print(f'WARNING: OpenGL shaders import error encountered, please install the latest PyOpenGL from github using:')
print(f'pip install git+https://github.com/mcfletch/pyopengl')
raise e
# fmt: on
def linearize_depth(d, n: float, f: float):
# 0-1 -> -1,1
# ndc -> view
return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n))
def common_opengl_options():
# Use program point size
gl.glEnable(gl.GL_PROGRAM_POINT_SIZE)
# Performs face culling
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_BACK)
# Performs alpha trans testing
# gl.glEnable(gl.GL_ALPHA_TEST)
try: gl.glEnable(gl.GL_ALPHA_TEST)
except gl.GLError as e: pass
# Performs z-buffer testing
gl.glEnable(gl.GL_DEPTH_TEST)
# gl.glDepthMask(gl.GL_TRUE)
gl.glDepthFunc(gl.GL_LEQUAL)
# gl.glDepthRange(-1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
# Enable some masking tests
gl.glEnable(gl.GL_SCISSOR_TEST)
# Enable this to correctly render points
# https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310
# gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW
try: gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW
except gl.GLError as e: pass
# gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW
# # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory.
# # The second argument specifies that our pixels will be in bytes.
# gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
def load_shader_source(file: str = 'splat.frag'):
# Ideally we can just specify the shader name instead of an variable
if not exists(file):
file = f'{dirname(__file__)}/shaders/{file}'
if not exists(file):
file = file.replace('shaders/', '')
if not exists(file):
raise RuntimeError(f'Shader file: {file} does not exist')
with open(file, 'r') as f:
return f.read()
def use_gl_program(program: Union[shaders.ShaderProgram, dict]):
if isinstance(program, dict):
# Recompile the program if the user supplied sources
program = dotdict(program)
program = shaders.compileProgram(
shaders.compileShader(program.VERT_SHADER_SRC, gl.GL_VERTEX_SHADER),
shaders.compileShader(program.FRAG_SHADER_SRC, gl.GL_FRAGMENT_SHADER)
)
return gl.glUseProgram(program)
class Mesh:
class RenderType(Enum):
POINTS = 1
LINES = 2
TRIS = 3
QUADS = 4 # TODO: Support quad loading
STRIPS = 5
# Helper class to render a mesh on opengl
# This implementation should only be used for debug visualization
# Since no differentiable mechanism will be added
# We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly
def __init__(self,
verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update
faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update
colors: torch.Tensor = None,
normals: torch.Tensor = None,
scalars: dotdict[str, torch.Tensor] = dotdict(),
render_type: RenderType = RenderType.TRIS,
# Misc info
name: str = 'mesh',
filename: str = '',
visible: bool = True,
# Render options
shade_flat: bool = False, # smooth shading
point_radius: float = 0.015,
render_normal: bool = False,
# Storage options
store_device: str = 'cpu',
compute_device: str = 'cuda',
vert_sizes=[3, 3, 3], # pos + color + norm
# Init options
est_normal_thresh: int = 100000,
# Ignore unused input
**kwargs,
) -> None:
super().__init__()
self.name = name
self.visible = visible
self.render_type = render_type
self.shade_flat = shade_flat
self.point_radius = point_radius
self.render_normal = render_normal
self.store_device = store_device
self.compute_device = compute_device
self.vert_sizes = vert_sizes
self.est_normal_thresh = est_normal_thresh
# Uniform and program
self.compile_shaders()
self.uniforms = dotdict() # uniform values
# Before initialization
self.max_verts = 0
self.max_faces = 0
# OpenGL data
if filename: self.load_from_file(filename)
else: self.load_from_data(verts, faces, colors, normals, scalars)
def compile_shaders(self):
try:
self.mesh_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER)
)
self.point_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
@property
def n_verts_bytes(self):
return len(self.verts) * self.vert_size * self.verts.element_size()
@property
def n_faces_bytes(self):
return len(self.faces) * self.face_size * self.faces.element_size()
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
@property
def faces_data(self): # a heavy copy operation
faces = self.faces.ravel().numpy() # N, 3
faces = np.asarray(faces, dtype=np.uint32, order='C')
return faces
@property
def face_size(self):
return self.render_type.value
@property
def vert_size(self):
return sum(self.vert_sizes)
def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'):
verts, faces, colors, normals, scalars = self.load_data_from_file(filename)
self.load_from_data(verts, faces, colors, normals, scalars)
def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'):
self.name = os.path.split(filename)[-1]
verts, faces, colors, normals, scalars = None, None, None, None, None
verts, faces = load_mesh(filename, device=self.store_device)
if not len(faces):
verts, colors, normals, scalars = load_pts(filename)
self.render_type = Mesh.RenderType.POINTS
else:
self.render_type = Mesh.RenderType(faces.shape[-1]) # use value
return verts, faces, colors, normals, scalars
def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()):
# Data type conversion
verts = torch.as_tensor(verts) # convert to tensor if input is of other types
if verts.dtype == torch.float32:
pass # supports this for now
elif verts.dtype == torch.float16:
pass # supports this for now
else:
verts = verts.type(torch.float) # convert to float32 if input is of higher precision
gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT
self.vert_gl_types = [gl_dtype] * len(self.vert_sizes)
# Prepare main mesh data: vertices and faces
self.verts = torch.as_tensor(verts, device=self.store_device)
self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support
# Prepare colors and normals
if colors is not None:
self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype)
else:
bounds = get_bounds(self.verts[None])[0]
self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0])
if normals is not None:
self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype)
else:
self.estimate_vertex_normals()
# Prepare other scalars
if scalars is not None:
for k, v in scalars.items():
setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok?
# Prepare OpenGL related buffer
self.update_gl_buffers()
def estimate_vertex_normals(self):
def est_pcd_norms():
if self.verts.dtype == torch.half:
self.normals = self.verts
else:
from pytorch3d.structures import Pointclouds, Meshes
pcd = Pointclouds([self.verts]).to(self.compute_device)
self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim
def est_tri_norms():
if self.verts.dtype == torch.half:
self.normals = self.verts
else:
from pytorch3d.structures import Pointclouds, Meshes
mesh = Meshes([self.verts], [self.faces]).to(self.compute_device)
self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim
if not len(self.verts) > self.est_normal_thresh:
if self.render_type == Mesh.RenderType.TRIS: est_tri_norms()
elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms()
else:
# log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping'))
self.normals = self.verts
else:
# log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation'))
self.normals = self.verts
def offscreen_render(self, eglctx: "eglContextManager", camera: Camera):
eglctx.resize(camera.W, camera.H)
self.render(camera)
def render(self, camera: Camera):
if not self.visible: return
# For point rendering
if self.render_type == Mesh.RenderType.POINTS:
gl.glUseProgram(self.point_program)
self.use_gl_program(self.point_program)
else:
gl.glUseProgram(self.mesh_program)
self.use_gl_program(self.mesh_program)
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
if self.render_type == Mesh.RenderType.POINTS:
gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices
elif self.render_type == Mesh.RenderType.LINES:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.TRIS:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.QUADS:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.STRIPS:
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))
else:
raise NotImplementedError
gl.glBindVertexArray(0)
def use_gl_program(self, program: shaders.ShaderProgram):
use_gl_program(program)
self.uniforms.shade_flat = gl.glGetUniformLocation(program, "shade_flat")
self.uniforms.point_radius = gl.glGetUniformLocation(program, "point_radius")
self.uniforms.render_normal = gl.glGetUniformLocation(program, "render_normal")
self.uniforms.H = gl.glGetUniformLocation(program, "H")
self.uniforms.W = gl.glGetUniformLocation(program, "W")
self.uniforms.n = gl.glGetUniformLocation(program, "n")
self.uniforms.f = gl.glGetUniformLocation(program, "f")
self.uniforms.P = gl.glGetUniformLocation(program, "P")
self.uniforms.K = gl.glGetUniformLocation(program, "K")
self.uniforms.V = gl.glGetUniformLocation(program, "V")
self.uniforms.M = gl.glGetUniformLocation(program, "M")
def upload_gl_uniforms(self, camera: Camera):
K = camera.gl_ixt # hold the reference
V = camera.gl_ext # hold the reference
M = glm.identity(mat4)
P = K * V * M
gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat)
gl.glUniform1f(self.uniforms.point_radius, self.point_radius)
gl.glUniform1i(self.uniforms.render_normal, self.render_normal)
gl.glUniform1i(self.uniforms.H, camera.H) # o2w
gl.glUniform1i(self.uniforms.W, camera.W) # o2w
gl.glUniform1f(self.uniforms.n, camera.n) # o2w
gl.glUniform1f(self.uniforms.f, camera.f) # o2w
gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip
gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip
gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c
gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w
def update_gl_buffers(self):
# Might be overwritten
self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0,
len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated
if hasattr(self, 'verts'):
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)
gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference
if hasattr(self, 'faces'):
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data)
def resize_buffers(self, v: int = 0, f: int = 0):
if v > self.max_verts or f > self.max_faces:
if v > self.max_verts: self.max_verts = v
if f > self.max_faces: self.max_faces = f
self.init_gl_buffers(v, f)
def init_gl_buffers(self, v: int = 0, f: int = 0):
# This will only init the corresponding buffer object
n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes
n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes
# Housekeeping
if hasattr(self, 'vao'):
gl.glDeleteVertexArrays(1, [self.vao])
gl.glDeleteBuffers(2, [self.vbo, self.ebo])
self.vao = gl.glGenVertexArrays(1)
self.vbo = gl.glGenBuffers(1)
self.ebo = gl.glGenBuffers(1)
gl.glBindVertexArray(self.vao)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)
gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work
# https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao
cumsum = 0
for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)):
gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float
gl.glEnableVertexAttribArray(i)
cumsum += s
if n_faces_bytes > 0:
# Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW)
gl.glBindVertexArray(0)
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import push_button_color, pop_button_color
i = batch.i
will_delete = batch.will_delete
slider_width = batch.slider_width
imgui.push_item_width(slider_width * 0.5)
mesh.name = imgui.input_text(f'Mesh name##{i}', mesh.name)[1]
if imgui.begin_combo(f'Mesh type##{i}', mesh.render_type.name):
for t in Mesh.RenderType:
if imgui.selectable(t.name, mesh.render_type == t)[1]:
mesh.render_type = t # construct enum from name
if mesh.render_type == t:
imgui.set_item_default_focus()
imgui.end_combo()
imgui.pop_item_width()
if hasattr(mesh, 'point_radius'):
mesh.point_radius = imgui.slider_float(f'Point radius##{i}', mesh.point_radius, 0.0005, 3.0)[1] # 0.1mm
if hasattr(mesh, 'pts_per_pix'):
mesh.pts_per_pix = imgui.slider_int('Point per pixel', mesh.pts_per_pix, 0, 60)[1] # 0.1mm
if hasattr(mesh, 'shade_flat'):
push_button_color(0x55cc33ff if not mesh.shade_flat else 0x8855aaff)
if imgui.button(f'Smooth##{i}' if not mesh.shade_flat else f' Flat ##{i}'):
mesh.shade_flat = not mesh.shade_flat
pop_button_color()
if hasattr(mesh, 'render_normal'):
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.render_normal else 0x8855aaff)
if imgui.button(f'Color ##{i}' if not mesh.render_normal else f'Normal##{i}'):
mesh.render_normal = not mesh.render_normal
pop_button_color()
if hasattr(mesh, 'visible'):
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.visible else 0x8855aaff)
if imgui.button(f'Show##{i}' if not mesh.visible else f'Hide##{i}'):
mesh.visible = not mesh.visible
pop_button_color()
# Render the delete button
imgui.same_line()
push_button_color(0xff5533ff)
if imgui.button(f'Delete##{i}'):
will_delete.append(i)
pop_button_color()
class Quad(Mesh):
# A shared texture for CUDA (pytorch) and OpenGL
# Could be rendererd to screen using blitting or just drawing a quad
def __init__(self,
H: int = 256, W: int = 256,
use_quad_draw: bool = True,
use_quad_cuda: bool = True,
compose: bool = False,
compose_power: float = 1.0,
): # the texture to blip
self.use_quad_draw = use_quad_draw
self.use_quad_cuda = use_quad_cuda
self.vert_sizes = [3] # only position
self.vert_gl_types = [gl.GL_FLOAT] # only position
self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type
self.max_verts, self.max_faces = 0, 0
self.verts = torch.as_tensor([[-1., -1., 0.5],
[1., -1., 0.5],
[-1., 1., 0.5],
[1., 1., 0.5],])
self.update_gl_buffers()
self.compile_shaders()
self.max_H, self.max_W = H, W
self.H, self.W = H, W
self.compose = compose
self.compose_power = compose_power
self.init_texture()
@property
def n_faces_bytes(self): return 0
def use_gl_program(self, program: shaders.ShaderProgram):
super().use_gl_program(program)
self.uniforms.tex = gl.glGetUniformLocation(program, 'tex')
gl.glUseProgram(self.quad_program) # use a different program
gl.glUniform1i(self.uniforms.tex, 0)
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers
self.H, self.W = H, W
if self.H > self.max_H or self.W > self.max_W: # max got updated
self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)
self.init_texture()
def init_texture(self):
if hasattr(self, 'cu_tex'):
from cuda import cudart
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex))
if hasattr(self, 'fbo'):
gl.glDeleteFramebuffers(1, [self.fbo])
gl.glDeleteTextures(1, [self.tex])
# Init the texture to be blit onto the screen
self.tex = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0))
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Init the framebuffer object if explicit blitting is used (slower than drawing quad)
self.fbo = gl.glGenFramebuffers(1)
old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo)
if self.use_quad_cuda:
from cuda import cudart
if self.compose:
# Both reading and writing of this resource is required
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone
else:
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
try:
self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags))
except RuntimeError as e:
log(red('Failed to initialize Quad with CUDA-GL interop, will use slow upload: '), e)
self.use_quad_cuda = False
def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
if not self.use_quad_cuda:
self.upload_to_texture(image)
return
if not hasattr(self, 'cu_tex'):
self.init_texture()
# assert self.use_quad_cuda, "Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad"
w = w or self.W
h = h or self.H
if image.shape[-1] == 3:
image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0))
if self.compose:
"""
Blit current framebuffer to this texture (self.tex)
Read content of this texture into a cuda buffer
Perform alpha blending based on the frame's alpha channel
Copy the blended image back into the texture (self.tex)
"""
old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0
gl.glBlitFramebuffer(x, y, w, h,
x, y, w, h,
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old)
buffer = torch.empty_like(image)
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst
w * 4 * buffer.element_size(), # dpitch
cu_tex_arr, # src
x * 4 * image.element_size(), # wOffset
y, # hOffset
w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes)
h, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
# cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]])
alpha = image[..., -1:] / 255
image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int
image[..., -1:] = buffer[..., -1:] + image[..., -1:]
image = image.clip(0, 255)
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr,
x * 4 * image.element_size(),
y,
image.data_ptr(),
w * 4 * image.element_size(), # differently sized
w * 4 * image.element_size(), # rgba, should do a composition first
h,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))
def upload_to_texture(self, ptr: np.ndarray, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
w = w or self.W
h = h or self.H
if isinstance(ptr, torch.Tensor):
ptr = ptr.detach().cpu().numpy() # slow sync and copy operation # MARK: SYNC
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, x, y, w, h, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[y:h, x:w]) # to gpu, might slow down?
@property
def verts_data(self): # a heavy copy operation
verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
def render(self, camera: Camera = None):
self.draw() # no uploading needed
def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
"""
Upload the texture instead of the camera
This respects the OpenGL convension of lower left corners
"""
if not self.use_quad_draw:
self.blit(x, y, w, h)
return
w = w or self.W
h = h or self.H
_, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(x, y, w, h)
gl.glScissor(x, y, w, h) # only render in this small region of the viewport
gl.glUseProgram(self.quad_program) # use a different program
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glBindVertexArray(self.vao)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))
gl.glBindVertexArray(0)
# Some house keepings
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H)
def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
"""
This respects the OpenGL convension of lower left corners
"""
w = w or self.W
h = h or self.H
old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0
gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped
x, y, x + w, y + h, # the height is flipped
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old)
class UQuad(Mesh):
"""
Responsible for initializing textures with a single value
or blitting a texture to a framebuffer (possibly better done with blit instead of quad drawing)
Effectively clearing the texture for real, see: https://stackoverflow.com/questions/37335281/is-glcleargl-color-buffer-bit-preferred-before-a-whole-frame-buffer-overwritte
"""
def __init__(self):
self.n_blit_values = 3
self.vert_sizes = [3] # only position
self.vert_gl_types = [gl.GL_FLOAT] # only position
self.max_verts, self.max_faces = 0, 0
self.verts = torch.as_tensor([[-1., -1., 0.5],
[1., -1., 0.5],
[-1., 1., 0.5],
[1., 1., 0.5],])
self.compile_shaders()
self.uniforms = dotdict() # uniform values
self.use_gl_programs(self.quad_program)
self.update_gl_buffers()
@property
def n_faces_bytes(self): return 0
@property
def verts_data(self): # a heavy copy operation
verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
def use_gl_programs(self, program: shaders.ShaderProgram):
for i in range(self.n_blit_values):
self.uniforms[f'value{i}'] = gl.glGetUniformLocation(program, f'value{i}')
for i in range(self.n_blit_values):
self.uniforms[f'use_tex{i}'] = gl.glGetUniformLocation(program, f'use_tex{i}')
gl.glUseProgram(self.program) # use a different program
for i in range(self.n_blit_values):
self.uniforms[f'tex{i}'] = gl.glGetUniformLocation(program, f'tex{i}')
gl.glUniform1i(self.uniforms[f'tex{i}'], i)
def upload_gl_uniforms(self, values: List[List[float]], use_texs: List[bool]):
for i, v in enumerate(values):
v = vec4(v) # HACK: Hold the reference for this upload
gl.glUniform4fv(self.uniforms[f'value{i}'], 1, glm.value_ptr(v)) # as float array
for i, v in enumerate(use_texs):
gl.glUniform1i(self.uniforms[f'use_tex{i}'], v)
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('uquad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('uquad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def draw(self, values: List[List[float]] = [], use_texs=[]):
"""
This function will render 'value' to the currently bound framebuffer, up to six outputs
"""
old_prog = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM)
old_vao = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING)
gl.glUseProgram(self.quad_program)
self.upload_gl_uniforms(values, use_texs) # should be a noop
# Prepare to render to textures
gl.glBindVertexArray(self.vao)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) # number of vertices
gl.glBindVertexArray(old_vao)
gl.glUseProgram(old_prog)
class DQuad(UQuad):
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('dquad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('dquad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def draw(self, values: List[List[float]] = [], use_texs=[]):
old_function = gl.glGetIntegerv(gl.GL_DEPTH_FUNC)
gl.glDepthFunc(gl.GL_ALWAYS)
super().draw(values, use_texs)
gl.glDepthFunc(old_function)
def hardware_rendering_framebuffer(H: int, W: int, gl_tex_dtype=gl.GL_RGBA16F):
# Prepare for write frame buffers
color_buffer = gl.glGenTextures(1)
depth_upper = gl.glGenTextures(1)
depth_lower = gl.glGenTextures(1)
depth_attach = gl.glGenTextures(1)
fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb
# Init the texture (call the resizing function), will simply allocate empty memory
# The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter).
gl.glBindTexture(gl.GL_TEXTURE_2D, color_buffer)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_tex_dtype, W, H, 0, gl.GL_RGBA, gl.GL_FLOAT, ctypes.c_void_p(0)) # 16 * 4
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_upper)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Bind texture to fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, color_buffer, 0) # location 0
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_upper, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT2, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0)
gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2])
# Check framebuffer status
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
log(red('Framebuffer not complete, exiting...'))
raise RuntimeError('Incomplete framebuffer')
# Restore the original state
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
return color_buffer, depth_upper, depth_lower, depth_attach, fbo
def hareward_peeling_framebuffer(H: int, W: int):
# Prepare for write frame buffers
index_buffer = gl.glGenTextures(1)
depth_lower = gl.glGenTextures(1)
depth_attach = gl.glGenTextures(1)
fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb
# Init the texture (call the resizing function), will simply allocate empty memory
# The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter).
gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Bind texture to fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0)
gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1])
# Check framebuffer status
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
log(red('Framebuffer not complete, exiting...'))
raise RuntimeError('Incomplete framebuffer')
# Restore the original state
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
return index_buffer, depth_lower, depth_attach, fbo
class Gaussian(Mesh):
def __init__(self,
filename: str = 'assets/meshes/zju3dv.npz',
gaussian_cfg: dotdict = dotdict(),
quad_cfg: dotdict = dotdict(),
view_depth: bool = False, # show depth or show color
dpt_cm: str = 'linear',
H: int = 1024,
W: int = 1024,
**kwargs,
):
# Import Gaussian Model
from easyvolcap.engine.registry import call_from_cfg
from easyvolcap.utils.gaussian_utils import GaussianModel
# Housekeeping
super().__init__(**kwargs)
self.name = split(filename)[-1]
# Init Gaussian related models, for now only the first gaussian model is supported
if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'):
# Load from GaussianTSampler
pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe)
pretrained = pretrained.model
state_dict = dotdict()
for k, v in pretrained.items():
if k.startswith('sampler.pcds.0'):
state_dict[k.replace('sampler.pcds.0.', '')] = v
# Load the parameters into the gaussian model
self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model
self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model
self.gaussian_model.cuda() # move the parameters to GPU
elif filename.endswith('.ply'):
# Load raw GaussianModel
# pts, rgb, norm, scalars = load_pts(filename)
self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model
self.gaussian_model.load_ply(filename) # load the original gaussian model
self.gaussian_model.cuda()
else:
raise NotImplementedError
# Init rendering quad
self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W)
# Other configurations
self.view_depth = view_depth
self.dpt_cm = dpt_cm
del self.shade_flat
del self.point_radius
del self.render_normal
# Disabling initialization
def load_from_file(self, *args, **kwargs):
pass
def load_from_data(self, *args, **kwargs):
pass
def compile_shaders(self):
pass
def update_gl_buffers(self):
pass
def resize_textures(self, H: int, W: int):
self.quad.resize_textures(H, W)
# The actual rendering function
@torch.no_grad()
def render(self, camera: Camera):
# Perform actual gaussian rendering
batch = add_batch(to_cuda(camera.to_batch()))
rgb, acc, dpt = self.gaussian_model.render(batch)
if self.view_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
else:
rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4
# Copy rendered tensor to screen
rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform
self.quad.copy_to_texture(rgba)
self.quad.render()
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
super().render_imgui(viewer, batch)
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import push_button_color, pop_button_color
i = batch.i
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.view_depth else 0x8855aaff)
if imgui.button(f'Color##{i}' if not mesh.view_depth else f' Depth ##{i}'):
mesh.view_depth = not mesh.view_depth
pop_button_color()
class PointSplat(Gaussian, nn.Module):
def __init__(self,
filename: str = 'assets/meshes/zju3dv.ply',
quad_cfg: dotdict = dotdict(),
view_depth: bool = False, # show depth or show color
dpt_cm: str = 'linear',
H: int = 1024,
W: int = 1024,
**kwargs,
):
# Import Gaussian Model
from easyvolcap.engine.registry import call_from_cfg
from easyvolcap.utils.data_utils import load_pts
from easyvolcap.utils.net_utils import make_buffer
from easyvolcap.models.samplers.gaussiant_sampler import GaussianTSampler
# Housekeeping
super(Gaussian, self).__init__(**kwargs)
self.name = split(filename)[-1]
self.render_radius = MethodType(GaussianTSampler.render_radius, self) # override the method
# Init PointSplat related models, for now only the first gaussian model is supported
if filename.endswith('.ply'):
# Load raw GaussianModel
pts, rgb, norms, scalars = load_pts(filename)
occ, rad = scalars.alpha, scalars.radius
self.pts = make_buffer(torch.from_numpy(pts)) # N, 3
self.rgb = make_buffer(torch.from_numpy(rgb)) # N, 3
self.occ = make_buffer(torch.from_numpy(occ)) # N, 1
self.rad = make_buffer(torch.from_numpy(rad)) # N, 1
else:
raise NotImplementedError
# Init rendering quad
self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W)
self.cuda() # move to cuda
# Other configurations
self.view_depth = view_depth
self.dpt_cm = dpt_cm
self.radius_mult = 1.0
self.alpha_mult = 1.0
# The actual rendering function
@torch.no_grad()
def render(self, camera: Camera):
# Perform actual gaussian rendering
batch = add_batch(to_cuda(camera.to_batch()))
sh0 = rgb2sh0(self.rgb[..., None])
xyz = self.pts
occ = (self.occ * self.alpha_mult).clip(0, 1)
rad = self.rad * self.radius_mult
rgb, acc, dpt = self.render_radius(*add_batch([xyz, sh0, rad, occ]), batch)
rgb, acc, dpt = rgb[0], acc[0], dpt[0]
if self.view_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
else:
rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4
# Copy rendered tensor to screen
rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform
self.quad.copy_to_texture(rgba)
self.quad.render()
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
super().render_imgui(viewer, batch)
i = batch.i
from imgui_bundle import imgui
mesh.radius_mult = imgui.slider_float(f'Point radius multiplier##{i}', mesh.radius_mult, 0.1, 3.0)[1] # 0.1mm
mesh.alpha_mult = imgui.slider_float(f'Point alpha multiplier##{i}', mesh.alpha_mult, 0.1, 3.0)[1] # 0.1mm
class Splat(Mesh): # FIXME: Not rendering, need to debug this
def __init__(self,
*args,
H: int = 512,
W: int = 512,
tex_dtype: str = torch.half,
pts_per_pix: int = 24, # render less for the static background since we're only doing a demo
blit_last_ratio: float = 0.0,
volume_rendering: bool = True,
radii_mult_volume: float = 1.00, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better
radii_mult_solid: float = 0.85, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better
point_smooth: bool = True,
alpha_blending: bool = True,
**kwargs):
kwargs = dotdict(kwargs)
kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1])
self.tex_dtype = getattr(torch, tex_dtype) if isinstance(tex_dtype, str) else tex_dtype
self.gl_tex_dtype = gl.GL_RGBA16F if self.tex_dtype == torch.half else gl.GL_RGBA32F
super().__init__(*args, **kwargs)
self.use_gl_program(self.splat_program)
self.pts_per_pix = pts_per_pix
self.blit_last_ratio = blit_last_ratio
self.volume_rendering = volume_rendering
self.radii_mult_volume = radii_mult_volume
self.radii_mult_solid = radii_mult_solid
self.point_smooth = point_smooth
self.alpha_blending = alpha_blending
self.max_H, self.max_W = H, W
self.H, self.W = H, W
self.init_textures()
from easyvolcap.models.samplers.gaussiant_sampler import GaussianTSampler
self.render_radius = MethodType(GaussianTSampler.render_radius, self) # override the method
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C') # this should only be invoked once
return verts
def use_gl_program(self, program: shaders.ShaderProgram):
super().use_gl_program(program)
# Special controlling variables
self.uniforms.alpha_blending = gl.glGetUniformLocation(program, f'alpha_blending')
self.uniforms.point_smooth = gl.glGetUniformLocation(program, f'point_smooth')
self.uniforms.radii_mult = gl.glGetUniformLocation(program, f'radii_mult')
# Special rendering variables
self.uniforms.pass_index = gl.glGetUniformLocation(program, f'pass_index')
self.uniforms.read_color = gl.glGetUniformLocation(program, f'read_color')
self.uniforms.read_upper = gl.glGetUniformLocation(program, f'read_upper')
self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower')
gl.glUniform1i(self.uniforms.read_color, 0)
gl.glUniform1i(self.uniforms.read_upper, 1)
gl.glUniform1i(self.uniforms.read_lower, 2)
def compile_shaders(self):
try:
self.splat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('splat.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('splat.frag'), gl.GL_FRAGMENT_SHADER)
)
self.usplat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('usplat.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('usplat.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def rasterize(self, camera: Camera = None, length: int = None):
if self.volume_rendering:
return self.rasterize_volume(camera, length)
else:
return self.rasterize_solid(camera, length)
def rasterize_volume(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera
"""
Let's try to analyze what's happening here
We want to:
1. Render the front-most color to color buffer
2. UNUSED: Render the front-most depth + some large margin to a depth upper limit buffer
3. Render the front-most depth + some small margin to a depth lower limit buffer
4. Switch between the render target and sampling target
5. Use the previous rendered color, depth upper limit and lower limit as textures
6. When current depth is smaller than the lower limit, we've already rendered this in the first pass, discard
7. UNUSED: When current depth is larger than the upper limit, it will probabily not contribute much to final results, discard
8. UNUSED: When the accumulated opacity reaches almost 1, subsequent rendering would not have much effect, return directly
9. When the point coordinates falls out of bound of the current sphere, dicard (this could be optimized with finutining in rectangle)
10. Finally, try to render the final color using the volume rendering equation (by accumulating alpha values from front to back)
Required cleanup checklist:
1. Before rendering the first pass, we need to clear the color and depth texture, this is not done, need to check multi-frame accumulation on this
2. Before rendering next pass, it's also recommended to blit color and depth values from previous pass to avoid assign them in the shader
"""
front_fbo, front_color, front_upper, front_lower = self.read_fbo, self.read_color, self.read_upper, self.read_lower
back_fbo, back_color, back_upper, back_lower = self.write_fbo, self.write_color, self.write_upper, self.write_lower
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0])
# gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9])
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0])
# gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9])
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.splat_program) # TODO: Implement this with a mapping and a lazy modification
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
# The actual multi pass rendering process happens here
for pass_index in range(self.pts_per_pix):
# Swap buffers to render the next pass
front_fbo, front_color, front_upper, front_lower, back_fbo, back_color, back_upper, back_lower = \
back_fbo, back_color, back_upper, back_lower, front_fbo, front_color, front_upper, front_lower
# Bind the read texture and bind the write render frame buffer
gl.glBindTextures(0, 3, [front_color, front_upper, front_lower])
# Move content from write_fbo to screen fbo
if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo)
for i in range(3):
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + i)
gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + i)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2])
# Clear depth buffer for depth testing
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
gl.glUniform1i(self.uniforms.pass_index, pass_index) # pass index
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return back_fbo
def upload_gl_uniforms(self, camera: Camera):
super().upload_gl_uniforms(camera)
gl.glUniform1i(self.uniforms.point_smooth, self.point_smooth)
gl.glUniform1i(self.uniforms.alpha_blending, self.alpha_blending)
if self.volume_rendering:
gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_volume) # radii mult
else:
gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_solid) # radii mult
def rasterize_solid(self, camera: Camera = None, length: int = None):
# Only clear the output once
back_fbo = self.write_fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0]) # color
# gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) # depth upper
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0]) # depth lower
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.usplat_program)
self.upload_gl_uniforms(camera)
gl.glUniform1i(self.uniforms.pass_index, 0) # pass index
gl.glBindVertexArray(self.vao)
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return back_fbo
def show(self, back_fbo: int):
# Move content from write_fbo to screen fbo
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, back_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, 0) # render the final content onto screen
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
def render(self, camera):
if not self.visible: return
self.show(self.rasterize(camera))
def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers
self.H, self.W = H, W
if self.H > self.max_H or self.W > self.max_W: # max got updated
self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)
self.init_textures()
def init_textures(self):
if hasattr(self, 'write_fbo'):
gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo])
gl.glDeleteTextures(8, [self.write_color, self.write_upper, self.write_lower, self.write_attach, self.read_color, self.read_upper, self.read_lower, self.read_attach])
self.write_color, self.write_upper, self.write_lower, self.write_attach, self.write_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype)
self.read_color, self.read_upper, self.read_lower, self.read_attach, self.read_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype)
log(f'Created texture of h, w: {self.max_H}, {self.max_W}')
class HardwareRendering(Splat):
def __init__(self,
dtype=torch.half,
**kwargs,
):
self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype
self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT
kwargs = dotdict(kwargs)
kwargs.blit_last_ratio = kwargs.get('blit_last_ratio', 0.90)
kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1])
super().__init__(**kwargs) # verts, color, radius, alpha
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once
return verts
def init_gl_buffers(self, v: int = 0, f: int = 0):
from cuda import cudart
if hasattr(self, 'cu_vbo'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo))
super().init_gl_buffers(v, f)
# Register vertex buffer obejct
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
try:
self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags))
except RuntimeError as e:
log(red(f'Your system does not support CUDA-GL interop, please use pytorch3d\'s implementation instead'))
log(red(f'This can be done by specifying {blue("model_cfg.sampler_cfg.use_cudagl=False model_cfg.sampler_cfg.use_diffgl=False")} at the end of your command'))
log(red(f'Note that this implementation is extremely slow, we recommend running on a native system that support the interop'))
# raise RuntimeError(str(e) + ": This unrecoverable, please read the error message above")
raise e
def init_textures(self):
from cuda import cudart
if hasattr(self, 'cu_read_color'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_color))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_color))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower))
super().init_textures()
# Register image to read from
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly
self.cu_read_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_color, gl.GL_TEXTURE_2D, flags))
self.cu_write_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_color, gl.GL_TEXTURE_2D, flags))
self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags))
self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags))
def forward(self, xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor, batch: dotdict):
"""
Renders a 3D point cloud using OpenGL and returns the rendered RGB image, accumulated alpha image, and depth map.
Args:
xyz (torch.Tensor): A tensor of shape (B, N, 3) containing the 3D coordinates of the points.
rgb (torch.Tensor): A tensor of shape (B, N, 3) containing the RGB color values of the points.
rad (torch.Tensor): A tensor of shape (B, N, 1) containing the radii of the points.
batch (dotdict): A dictionary containing the camera parameters and other metadata for the batch.
Returns:
A tuple containing the rendered RGB image, accumulated alpha image, and depth map, all as torch.Tensors.
The RGB image has shape (1, H, W, 3), the alpha image has shape (1, H, W, 1), and the depth map has shape (1, H, W, 1).
The method first resizes the OpenGL texture to match the height and width of the output image. It then sets the OpenGL viewport and scissor to only render in the region of the viewport specified by the output image size.
It concatenates the `xyz`, `rgb`, and `rad` tensors along the last dimension and flattens the result into a 1D tensor.
The method then uploads the input data to OpenGL for rendering and performs depth peeling using OpenGL. The method uploads the camera parameters to OpenGL and renders the point cloud, saving the output buffer to the `back_fbo` attribute of the class.
Finally, the method copies the rendered image and depth back to the CPU as torch.Tensors and reshapes them to match the output image size. The RGB image is returned with shape (1, H, W, 3), the accumulated alpha image is returned with shape (1, H, W, 1), and the depth map is returned with shape (1, H, W, 1).
"""
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
# !: BATCH
H, W = batch.meta.H[0].item(), batch.meta.W[0].item()
self.resize_textures(H, W) # maybe resize the texture
self.resize_buffers(xyz.shape[1]) # maybe resize the buffer
_, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H) # only render in this small region of the viewport
# Prepare for input data
data = torch.cat([xyz, rgb, rad, occ], dim=-1).type(self.dtype).ravel()
# Upload to opengl for rendering
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo))
assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side'
CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr,
data.data_ptr(),
data.numel() * data.element_size(),
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
# Perform rasterization (depth peeling using OpenGL)
if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish
back_fbo = self.rasterize(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo
# Copy rendered image and depth back as tensor
cu_tex = self.cu_write_color if back_fbo == self.write_fbo else self.cu_read_color # double buffered depth peeling
cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling
# Prepare the output # !: BATCH
rgb_map = torch.empty((H, W, 4), dtype=self.tex_dtype, device='cuda') # to hold the data from opengl
dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl
# The resources in resources may be accessed by CUDA until they are unmapped.
# The graphics API from which resources were registered should not access any resources while they are mapped by CUDA.
# If an application does so, the results are undefined.
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0))
cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0))
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(rgb_map.data_ptr(), # dst
W * 4 * rgb_map.element_size(), # dpitch
cu_tex_arr, # src
0, # wOffset
0, # hOffset
W * 4 * rgb_map.element_size(), # width Width of matrix transfer (columns in bytes)
H, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(),
W * 1 * dpt_map.element_size(),
cu_dpt_arr,
0,
0,
W * 1 * dpt_map.element_size(),
H,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
# Ouput reshaping
rgb_map, dpt_map = rgb_map[None].flip(1), dpt_map[None].flip(1)
rgb_map, acc_map = rgb_map[..., :3], rgb_map[..., 3:]
dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map)
# Some house keepings
gl.glViewport(0, 0, old_W, old_H)
gl.glScissor(0, 0, old_W, old_H)
return rgb_map, acc_map, dpt_map
class HardwarePeeling(Splat):
def __init__(self,
dtype=torch.float,
**kwargs):
self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype
self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT
super().__init__(**kwargs,
blit_last_ratio=-10.0,
vert_sizes=[3, 1],
) # verts, radius, index
# from pytorch3d.renderer import AlphaCompositor
# self.compositor = AlphaCompositor() # this the key to convergence, this is differentiable
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.radius], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once
return verts
def init_gl_buffers(self, v: int = 0, f: int = 0):
from cuda import cudart
if hasattr(self, 'cu_vbo'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo))
super().init_gl_buffers(v, f)
# Register vertex buffer obejct
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags))\
def use_gl_program(self, program):
super().use_gl_program(program)
gl.glUseProgram(self.splat_program) # use a different program
self.uniforms.read_index = gl.glGetUniformLocation(program, f'read_index')
self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower')
gl.glUniform1i(self.uniforms.read_index, 0)
gl.glUniform1i(self.uniforms.read_lower, 1)
def upload_gl_uniforms(self, camera: Camera):
super().upload_gl_uniforms(camera)
def compile_shaders(self):
try:
self.splat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('idx_splat.vert'), gl.GL_VERTEX_SHADER), # use the pass through quad shader
shaders.compileShader(load_shader_source('idx_splat.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def init_textures(self):
from cuda import cudart
if hasattr(self, 'cu_read_index'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_index))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_index))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower))
if hasattr(self, 'write_fbo'):
gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo])
gl.glDeleteTextures(6, [self.write_index, self.write_lower, self.write_attach, self.read_index, self.read_lower, self.read_attach])
self.write_index, self.write_lower, self.write_attach, self.write_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W)
self.read_index, self.read_lower, self.read_attach, self.read_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W)
# Register image to read from
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly
self.cu_read_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_index, gl.GL_TEXTURE_2D, flags))
self.cu_write_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_index, gl.GL_TEXTURE_2D, flags))
self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags))
self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags))
log(f'Created texture of h, w: {self.max_H}, {self.max_W}')
def rasterize_generator(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera
front_fbo, front_index, front_lower = self.read_fbo, self.read_index, self.read_lower
back_fbo, back_index, back_lower = self.write_fbo, self.write_index, self.write_lower
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1])
gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1])
gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.splat_program)
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
# The actual multi pass rendering process happens here
for pass_index in range(self.pts_per_pix):
# Swap buffers to render the next pass
front_fbo, front_index, front_lower, back_fbo, back_index, back_lower = \
back_fbo, back_index, back_lower, front_fbo, front_index, front_lower
# Bind the read texture and bind the write render frame buffer
gl.glBindTextures(0, 2, [front_index, front_lower])
# Move content from write_fbo to screen fbo
if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo)
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + 1)
gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + 1)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1])
else:
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
# Clear depth buffer for depth testing
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) # clear the indices buffer for later rendering and retrieving
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
yield back_fbo # give the CUDA end a chance to read from this frame buffer after rendering
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return
def forward(self,
xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor,
batch: dotdict,
return_frags: bool = False,
return_full: bool = False,
):
"""
Get all indices from the depth peeling passes
Compute the vertex weight here in torch(cuda)
Use the indices to pass through a compositor
The backward pass should only be valid on the torch side, and it should've been enough
TODO: This function is too memory intensive
TODO: Performing IBR is too memory intensive
"""
# This the slow part, but not differentiable
idx, _, _ = self.forward_idx(xyz, rad, batch) # B, H, W, K
msk = idx != -1 # B, H, W, K
idx = torch.where(msk, idx, 0).long()
# Sample things needed for computing screen space weight
H, W, K, R, T, C = get_opencv_camera_params(batch)
K, R, T, C = K.to(xyz.dtype), R.to(xyz.dtype), T.to(xyz.dtype), C.to(xyz.dtype)
pix_xyz = (xyz @ R.mT + T.mT) @ K.mT # B, P, 3
pix_xyz_xy = pix_xyz[..., :-1] / (pix_xyz[..., -1:] + 1e-10)
pix_rad = abs(K[..., 1, 1][..., None] * rad[..., 0] / (pix_xyz[..., -1] + 1e-10)) # z: B, 1 * B, N, world space radius
mean_xy = multi_gather(pix_xyz_xy, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, 2) # B, HWK, 2 -> B, H, W, K, 2
xy = create_meshgrid(H, W, idx.device, dtype=xyz.dtype).flip(-1)[None].expand(idx.shape[0], H, W, 2) # create screen space xy (opencv)
dists = (xy[..., None, :] - mean_xy).pow(2).sum(-1) # B, H, W, K
# Point values
dpt = (xyz - C.mT).norm(dim=-1, keepdim=True) # B, N, 1
pix_occ = multi_gather(occ, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape)
pix_rad = multi_gather(pix_rad, idx.view(idx.shape[0], -1), dim=-1).view(*idx.shape) # -> B, H, W, K
pix_occ = pix_occ * (1 - dists / (pix_rad * pix_rad + 1e-10)) # B, H, W, K
pix_occ = pix_occ.clip(0, 1)
pix_occ = torch.where(msk, pix_occ, 0)
if return_frags:
return idx, pix_occ # B, H, W, K
# The actual computation
rgb = torch.cat([rgb, occ, dpt], dim=-1) # B, N, 3 + C
pix_rgb = multi_gather(rgb, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, rgb.shape[-1]) # B, H, W, K, -1
_, rgb, _ = volume_rendering(pix_rgb, pix_occ[..., None]) # B, H, W, -1
rgb, acc, dpt = rgb[..., :-2], rgb[..., -2:-1], rgb[..., -1:]
dpt = dpt + (1 - acc) * dpt.max() # only for the looks (rendered depth are already premultiplied)
if return_full:
return rgb, acc, dpt, idx, pix_occ
else:
return rgb, acc, dpt
def forward_idx(self, xyz: torch.Tensor, rad: torch.Tensor, batch: dotdict):
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
# !: BATCH
H, W = batch.meta.H[0].item(), batch.meta.W[0].item()
self.resize_textures(H, W) # maybe resize the texture
self.resize_buffers(xyz.shape[1]) # maybe resize the buffer
_, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H) # only render in this small region of the viewport
# Prepare for input data
data = torch.cat([xyz, rad], dim=-1).type(self.dtype).ravel()
# Upload to opengl for rendering
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo))
assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side'
CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr,
data.data_ptr(),
data.numel() * data.element_size(),
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
# Perform rasterization (depth peeling using OpenGL)
if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish
# FIXME: Strange bug occurs if batch parameter is passed in directly for the construction of Camera(batch=batch.meta)
gen = self.rasterize_generator(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo
ind_maps = []
dpt_maps = []
acc_maps = []
for back_fbo in gen:
# Copy rendered image and depth back as tensor
cu_tex = self.cu_write_index if back_fbo == self.write_fbo else self.cu_read_index # double buffered depth peeling
cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling
# Prepare the output # !: BATCH
ind_map = torch.empty((H, W, 1), dtype=torch.int, device='cuda') # to hold the data from opengl
dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl
# The resources in resources may be accessed by CUDA until they are unmapped.
# The graphics API from which resources were registered should not access any resources while they are mapped by CUDA.
# If an application does so, the results are undefined.
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0))
cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0))
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(ind_map.data_ptr(), # dst
W * ind_map.shape[-1] * ind_map.element_size(), # dpitch
cu_tex_arr, # src
0, # wOffset
0, # hOffset
W * ind_map.shape[-1] * ind_map.element_size(), # width Width of matrix transfer (columns in bytes)
H, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(),
W * dpt_map.shape[-1] * dpt_map.element_size(),
cu_dpt_arr,
0,
0,
W * dpt_map.shape[-1] * dpt_map.element_size(),
H,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
# Ouput reshaping
ind_map, dpt_map = ind_map[None].flip(1), dpt_map[None].flip(1)
acc_map = ind_map != -1
dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map)
ind_maps.append(ind_map)
acc_maps.append(acc_map)
dpt_maps.append(dpt_map)
ind_map = torch.cat(ind_maps, dim=-1) # B, H, W, K
acc_map = torch.cat(acc_maps, dim=-1) # B, H, W, K
dpt_map = torch.cat(dpt_maps, dim=-1) # B, H, W, K
# Some house keepings
gl.glViewport(0, 0, old_W, old_H)
gl.glScissor(0, 0, old_W, old_H)
return ind_map, acc_map, dpt_map
|
evocodebench_data_76
|
import re
from collections import OrderedDict
import torch
import torch.nn.functional as F
from transformers import BertConfig, GPT2Config, PretrainedConfig
from .configuration_nomic_bert import NomicBertConfig
def bert_config_to_nomic_config(bert_config: BertConfig) -> NomicBertConfig:
return NomicBertConfig(
vocab_size=bert_config.vocab_size,
n_positions=bert_config.max_position_embeddings, # No absolute position embedding
n_embd=bert_config.hidden_size,
n_layer=bert_config.num_hidden_layers,
n_head=bert_config.num_attention_heads,
n_inner=bert_config.intermediate_size,
activation_function=bert_config.hidden_act,
resid_pdrop=bert_config.hidden_dropout_prob,
embd_pdrop=bert_config.hidden_dropout_prob,
attn_pdrop=bert_config.attention_probs_dropout_prob,
layer_norm_epsilon=bert_config.layer_norm_eps,
initializer_range=bert_config.initializer_range,
bos_token_id=None, # TODO: check this
eos_token_id=None,
# These are new arguments not in the original GPT2Config
prenorm=False,
parallel_block=False,
parallel_block_tied_norm=False,
rotary_emb_fraction=getattr(bert_config, "rotary_emb_fraction", 0),
tie_word_embeddings=True,
fused_dropout_add_ln=True,
fused_bias_fc=True,
use_flash_attn=True,
use_xentropy=True,
qkv_proj_bias=getattr(bert_config, "qkv_proj_bias", True),
rotary_emb_base=getattr(bert_config, "rotary_emb_base", 1000),
rotary_emb_scale_base=getattr(bert_config, "rotary_emb_scale_base", None),
rotary_emb_interleaved=getattr(bert_config, "rotary_emb_interleaved", False),
mlp_fc1_bias=getattr(bert_config, "mlp_fc1_bias", True),
mlp_fc2_bias=getattr(bert_config, "mlp_fc2_bias", True),
use_rms_norm=getattr(bert_config, "use_rms_norm", False),
causal=False,
type_vocab_size=bert_config.type_vocab_size,
dense_seq_output=True,
pad_vocab_size_multiple=getattr(bert_config, "pad_vocab_to_multiple_of", 1),
rotary_scaling_factor=getattr(bert_config, "rotary_scaling_factor", None),
)
def nomic_config_to_bert_config(gpt2_config: NomicBertConfig) -> BertConfig:
return BertConfig(
vocab_size=gpt2_config.vocab_size,
hidden_size=gpt2_config.n_embd,
num_hidden_layers=gpt2_config.n_layer,
num_attention_heads=gpt2_config.n_head,
intermediate_size=gpt2_config.n_inner,
hidden_act=gpt2_config.activation_function,
hidden_dropout_prob=gpt2_config.resid_pdrop,
attention_probs_dropout_prob=gpt2_config.attn_pdrop,
max_position_embeddings=gpt2_config.n_positions,
type_vocab_size=gpt2_config.type_vocab_size,
initializer_range=gpt2_config.initializer_range,
layer_norm_eps=gpt2_config.layer_norm_epsilon,
# The following attributes do not have a direct equivalent in GPT2Config
# and are set to commonly used defaults for BertConfig
pad_token_id=0,
position_embedding_type="absolute",
use_cache=True,
)
def remap_bert_state_dict(
state_dict,
config: PretrainedConfig,
remove_bert=False,
remove_cls_weights=False,
add_pooling_layer=False,
):
"""
Map the state_dict of a Huggingface BERT model to be flash_attn compatible.
"""
def add_bert_prefix(key):
# prepend bert. to the key
if key.startswith("bert.") or key.startswith("cls."):
return key
return f"bert.{key}"
state_dict = OrderedDict((add_bert_prefix(k), v) for k, v in state_dict.items())
# LayerNorm
def key_mapping_ln_gamma_beta(key):
key = re.sub(r"LayerNorm.gamma$", "LayerNorm.weight", key)
key = re.sub(r"LayerNorm.beta$", "LayerNorm.bias", key)
return key
state_dict = OrderedDict((key_mapping_ln_gamma_beta(k), v) for k, v in state_dict.items())
# Layers
def key_mapping_layers(key):
return re.sub(r"^bert.encoder.layer\.", "bert.encoder.layers.", key)
state_dict = OrderedDict((key_mapping_layers(k), v) for k, v in state_dict.items())
# LayerNorm
def key_mapping_ln(key):
key = re.sub(r"^bert.embeddings.LayerNorm.", "bert.emb_ln.", key)
key = re.sub(
r"^bert.encoder.layers.(\d+).attention.output.LayerNorm.(weight|bias)",
r"bert.encoder.layers.\1.norm1.\2",
key,
)
key = re.sub(
r"^bert.encoder.layers.(\d+).output.LayerNorm.(weight|bias)",
r"bert.encoder.layers.\1.norm2.\2",
key,
)
key = re.sub(
r"^cls.predictions.transform.LayerNorm.(weight|bias)",
r"cls.predictions.transform.layer_norm.\1",
key,
)
return key
state_dict = OrderedDict((key_mapping_ln(k), v) for k, v in state_dict.items())
# MLP
def key_mapping_mlp(key):
key = re.sub(
r"^bert.encoder.layers.(\d+).intermediate.dense.(weight|bias)",
r"bert.encoder.layers.\1.mlp.fc1.\2",
key,
)
key = re.sub(
r"^bert.encoder.layers.(\d+).output.dense.(weight|bias)",
r"bert.encoder.layers.\1.mlp.fc2.\2",
key,
)
return key
state_dict = OrderedDict((key_mapping_mlp(k), v) for k, v in state_dict.items())
# Attention
last_layer_subset = getattr(config, "last_layer_subset", False)
for d in range(config.num_hidden_layers):
if f"bert.encoder.layers.{d}.attention.self.query.weight" not in state_dict:
continue
Wq = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.query.weight")
Wk = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.key.weight")
Wv = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.value.weight")
bq = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.query.bias")
bk = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.key.bias")
bv = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.value.bias")
if not (last_layer_subset and d == config.num_hidden_layers - 1):
state_dict[f"bert.encoder.layers.{d}.attn.Wqkv.weight"] = torch.cat([Wq, Wk, Wv], dim=0)
state_dict[f"bert.encoder.layers.{d}.attn.Wqkv.bias"] = torch.cat([bq, bk, bv], dim=0)
else:
state_dict[f"bert.encoder.layers.{d}.attn.Wq.weight"] = Wq
state_dict[f"bert.encoder.layers.{d}.attn.Wkv.weight"] = torch.cat([Wk, Wv], dim=0)
state_dict[f"bert.encoder.layers.{d}.attn.Wq.bias"] = bq
state_dict[f"bert.encoder.layers.{d}.attn.Wkv.bias"] = torch.cat([bk, bv], dim=0)
def key_mapping_attn(key):
return re.sub(
r"^bert.encoder.layers.(\d+).attention.output.dense.(weight|bias)",
r"bert.encoder.layers.\1.attn.out_proj.\2",
key,
)
state_dict = OrderedDict((key_mapping_attn(k), v) for k, v in state_dict.items())
def key_mapping_decoder_bias(key):
return re.sub(r"^cls.predictions.bias", "cls.predictions.decoder.bias", key)
# remove nsp weights, we don't use
state_dict.pop("cls.seq_relationship.weight", None)
state_dict.pop("cls.seq_relationship.bias", None)
state_dict.pop("bert.embeddings.position_ids", None)
state_dict = OrderedDict((key_mapping_decoder_bias(k), v) for k, v in state_dict.items())
if remove_cls_weights:
cls_weights = [
"cls.predictions.decoder.bias",
"cls.predictions.transform.dense.weight",
"cls.predictions.transform.dense.bias",
"cls.predictions.transform.layer_norm.weight",
"cls.predictions.transform.layer_norm.bias",
"cls.predictions.decoder.weight",
]
for weight in cls_weights:
state_dict.pop(weight, None)
# Word embedding
pad_vocab_size_multiple = getattr(config, "pad_vocab_size_multiple", 1)
if pad_vocab_size_multiple > 1:
word_embeddings = state_dict["bert.embeddings.word_embeddings.weight"]
state_dict["bert.embeddings.word_embeddings.weight"] = F.pad(
word_embeddings, (0, 0, 0, config.vocab_size - word_embeddings.shape[0])
)
if not remove_cls_weights:
decoder_weight = state_dict["cls.predictions.decoder.weight"]
state_dict["cls.predictions.decoder.weight"] = F.pad(
decoder_weight, (0, 0, 0, config.vocab_size - decoder_weight.shape[0])
)
# If the vocab was padded, we want to set the decoder bias for those padded indices to be
# strongly negative (i.e. the decoder shouldn't predict those indices).
# TD [2022-05-09]: I don't think it affects the MLPerf training.
if "cls.predictions.decoder.bias" in state_dict:
decoder_bias = state_dict["cls.predictions.decoder.bias"]
state_dict["cls.predictions.decoder.bias"] = F.pad(
decoder_bias, (0, config.vocab_size - decoder_bias.shape[0]), value=-100.0
)
if add_pooling_layer is False:
pooler_weights = [
"bert.pooler.dense.weight",
"bert.pooler.dense.bias",
]
for key in pooler_weights:
state_dict.pop(key, None)
if remove_bert:
def remove_bert_prefix(key):
key = re.sub(r"^bert.", "", key)
return key
state_dict = OrderedDict((remove_bert_prefix(k), v) for k, v in state_dict.items())
return state_dict
def inv_remap_state_dict(state_dict, config: PretrainedConfig):
"""
Map the state_dict of a flash_attn model to be Huggingface BERT compatible.
This function is meant to be the inverse of remap_state_dict.
"""
if isinstance(config, GPT2Config):
config = nomic_config_to_bert_config(config)
# Word embedding
pad_vocab_size_multiple = getattr(config, "pad_vocab_size_multiple", 1)
if pad_vocab_size_multiple > 1:
word_embeddings = state_dict["bert.embeddings.word_embeddings.weight"]
decoder_weight = state_dict["cls.predictions.decoder.weight"]
decoder_bias = state_dict["cls.predictions.decoder.bias"]
# unpad embeddings
state_dict["bert.embeddings.word_embeddings.weight"] = word_embeddings[: config.orig_vocab_size, :]
state_dict["cls.predictions.decoder.weight"] = decoder_weight[: config.orig_vocab_size, :]
state_dict["cls.predictions.decoder.bias"] = decoder_bias[: config.orig_vocab_size]
for d in range(config.num_hidden_layers):
last_layer_subset = getattr(config, "last_layer_subset", False)
if not last_layer_subset or d != (config.num_hidden_layers - 1):
Wqkv_weights = state_dict.pop(f"bert.encoder.layers.{d}.attn.Wqkv.weight")
Wqkv_biases = state_dict.pop(f"bert.encoder.layers.{d}.attn.Wqkv.bias")
state_dict[f"bert.encoder.layers.{d}.attention.self.query.weight"] = Wqkv_weights[
: Wqkv_weights.shape[0] // 3, :
]
state_dict[f"bert.encoder.layers.{d}.attention.self.key.weight"] = Wqkv_weights[
Wqkv_weights.shape[0] // 3 : 2 * Wqkv_weights.shape[0] // 3, :
]
state_dict[f"bert.encoder.layers.{d}.attention.self.value.weight"] = Wqkv_weights[
2 * Wqkv_weights.shape[0] // 3 :, :
]
state_dict[f"bert.encoder.layers.{d}.attention.self.query.bias"] = Wqkv_biases[: Wqkv_biases.shape[0] // 3]
state_dict[f"bert.encoder.layers.{d}.attention.self.key.bias"] = Wqkv_biases[
Wqkv_biases.shape[0] // 3 : 2 * Wqkv_biases.shape[0] // 3
]
state_dict[f"bert.encoder.layers.{d}.attention.self.value.bias"] = Wqkv_biases[
2 * Wqkv_biases.shape[0] // 3 :
]
else:
Wq_weight = state_dict.pop(f"bert.encoder.layers.{d}.attn.Wq.weight")
Wkv_weights = state_dict.pop(f"bert.encoder.layers.{d}.attn.Wkv.weight")
Wq_bias = state_dict.pop(f"bert.encoder.layers.{d}.attn.Wq.bias")
Wkv_biases = state_dict.pop(f"bert.encoder.layers.{d}.attn.Wkv.bias")
state_dict[f"bert.encoder.layers.{d}.attention.self.query.weight"] = Wq_weight
state_dict[f"bert.encoder.layers.{d}.attention.self.key.weight"] = Wkv_weights[
: Wkv_weights.shape[0] // 2, :
]
state_dict[f"bert.encoder.layers.{d}.attention.self.value.weight"] = Wkv_weights[
Wkv_weights.shape[0] // 2 :, :
]
state_dict[f"bert.encoder.layers.{d}.attention.self.query.bias"] = Wq_bias
state_dict[f"bert.encoder.layers.{d}.attention.self.key.bias"] = Wkv_biases[: Wkv_biases.shape[0] // 2]
state_dict[f"bert.encoder.layers.{d}.attention.self.value.bias"] = Wkv_biases[Wkv_biases.shape[0] // 2 :]
def inv_key_mapping_ln(key):
key = re.sub(r"bert.emb_ln.", "bert.embeddings.LayerNorm.", key)
key = re.sub(
r"bert.encoder.layers.(\d+).norm1.(weight|bias)",
r"bert.encoder.layers.\1.attention.output.LayerNorm.\2",
key,
)
key = re.sub(
r"bert.encoder.layers.(\d+).norm2.(weight|bias)",
r"bert.encoder.layers.\1.output.LayerNorm.\2",
key,
)
key = re.sub(
r"cls.predictions.transform.layer_norm.(weight|bias)",
r"cls.predictions.transform.LayerNorm.\1",
key,
)
return key
def inv_key_mapping_layers(key):
return re.sub(r"bert.encoder.layers.", "bert.encoder.layer.", key)
def inv_key_mapping_mlp(key):
key = re.sub(
r"bert.encoder.layer.(\d+).mlp.fc1.(weight|bias)",
r"bert.encoder.layer.\1.intermediate.dense.\2",
key,
)
key = re.sub(
r"bert.encoder.layer.(\d+).mlp.fc2.(weight|bias)",
r"bert.encoder.layer.\1.output.dense.\2",
key,
)
return key
def inv_key_mapping_attn(key):
return re.sub(
r"bert.encoder.layer.(\d+).attn.out_proj.(weight|bias)",
r"bert.encoder.layer.\1.attention.output.dense.\2",
key,
)
state_dict = OrderedDict((inv_key_mapping_ln(key), value) for key, value in state_dict.items())
state_dict = OrderedDict((inv_key_mapping_layers(key), value) for key, value in state_dict.items())
state_dict = OrderedDict((inv_key_mapping_mlp(key), value) for key, value in state_dict.items())
state_dict = OrderedDict((inv_key_mapping_attn(key), value) for key, value in state_dict.items())
state_dict["cls.predictions.bias"] = state_dict["cls.predictions.decoder.bias"]
return state_dict
|
evocodebench_data_77
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager # must be imported before OpenGL.GL
from easyvolcap.runners.volumetric_video_viewer import VolumetricVideoViewer
import os
import sys
import glm
import torch
import ctypes
import numpy as np
from torch import nn
from enum import Enum, auto
from types import MethodType
from typing import Dict, Union, List
from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.viewer_utils import Camera
from easyvolcap.utils.bound_utils import get_bounds
from easyvolcap.utils.chunk_utils import multi_gather
from easyvolcap.utils.color_utils import cm_cpu_store
from easyvolcap.utils.ray_utils import create_meshgrid
from easyvolcap.utils.depth_utils import depth_curve_fn
from easyvolcap.utils.gaussian_utils import rgb2sh0, sh02rgb
from easyvolcap.utils.nerf_utils import volume_rendering, raw2alpha
from easyvolcap.utils.data_utils import load_pts, load_mesh, to_cuda, add_batch
from easyvolcap.utils.cuda_utils import CHECK_CUDART_ERROR, FORMAT_CUDART_ERROR
from easyvolcap.utils.net_utils import typed, torch_dtype_to_numpy_dtype, load_pretrained
from easyvolcap.utils.fcds_utils import prepare_feedback_transform, get_opencv_camera_params
# fmt: off
# Environment variable messaging
# Need to export EGL_DEVICE_ID before trying to import egl
# And we need to consider the case when we're performing distributed training
# from easyvolcap.engine import cfg, args # FIXME: GLOBAL IMPORTS
if 'easyvolcap.engine' in sys.modules and \
(sys.modules['easyvolcap.engine'].args.type != 'gui' or \
sys.modules['easyvolcap.engine'].cfg.viewer_cfg.type != 'VolumetricVideoViewer'): # FIXME: GLOBAL VARIABLES
try:
from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager
except Exception as e:
log(yellow(f'Could not import EGL related modules. {type(e).__name__}: {e}'))
os.environ['PYOPENGL_PLATFORM'] = ''
def is_wsl2():
"""Returns True if the current environment is WSL2, False otherwise."""
return exists("/etc/wsl.conf") and os.environ.get("WSL_DISTRO_NAME")
if is_wsl2():
os.environ['PYOPENGL_PLATFORM'] = 'glx'
import OpenGL.GL as gl
try:
from OpenGL.GL import shaders
except Exception as e:
print(f'WARNING: OpenGL shaders import error encountered, please install the latest PyOpenGL from github using:')
print(f'pip install git+https://github.com/mcfletch/pyopengl')
raise e
# fmt: on
def linearize_depth(d, n: float, f: float):
# 0-1 -> -1,1
# ndc -> view
return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n))
def common_opengl_options():
# Use program point size
gl.glEnable(gl.GL_PROGRAM_POINT_SIZE)
# Performs face culling
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_BACK)
# Performs alpha trans testing
# gl.glEnable(gl.GL_ALPHA_TEST)
try: gl.glEnable(gl.GL_ALPHA_TEST)
except gl.GLError as e: pass
# Performs z-buffer testing
gl.glEnable(gl.GL_DEPTH_TEST)
# gl.glDepthMask(gl.GL_TRUE)
gl.glDepthFunc(gl.GL_LEQUAL)
# gl.glDepthRange(-1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
# Enable some masking tests
gl.glEnable(gl.GL_SCISSOR_TEST)
# Enable this to correctly render points
# https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310
# gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW
try: gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW
except gl.GLError as e: pass
# gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW
# # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory.
# # The second argument specifies that our pixels will be in bytes.
# gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
def load_shader_source(file: str = 'splat.frag'):
# Ideally we can just specify the shader name instead of an variable
if not exists(file):
file = f'{dirname(__file__)}/shaders/{file}'
if not exists(file):
file = file.replace('shaders/', '')
if not exists(file):
raise RuntimeError(f'Shader file: {file} does not exist')
with open(file, 'r') as f:
return f.read()
def use_gl_program(program: Union[shaders.ShaderProgram, dict]):
if isinstance(program, dict):
# Recompile the program if the user supplied sources
program = dotdict(program)
program = shaders.compileProgram(
shaders.compileShader(program.VERT_SHADER_SRC, gl.GL_VERTEX_SHADER),
shaders.compileShader(program.FRAG_SHADER_SRC, gl.GL_FRAGMENT_SHADER)
)
return gl.glUseProgram(program)
class Mesh:
class RenderType(Enum):
POINTS = 1
LINES = 2
TRIS = 3
QUADS = 4 # TODO: Support quad loading
STRIPS = 5
# Helper class to render a mesh on opengl
# This implementation should only be used for debug visualization
# Since no differentiable mechanism will be added
# We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly
def __init__(self,
verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update
faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update
colors: torch.Tensor = None,
normals: torch.Tensor = None,
scalars: dotdict[str, torch.Tensor] = dotdict(),
render_type: RenderType = RenderType.TRIS,
# Misc info
name: str = 'mesh',
filename: str = '',
visible: bool = True,
# Render options
shade_flat: bool = False, # smooth shading
point_radius: float = 0.015,
render_normal: bool = False,
# Storage options
store_device: str = 'cpu',
compute_device: str = 'cuda',
vert_sizes=[3, 3, 3], # pos + color + norm
# Init options
est_normal_thresh: int = 100000,
# Ignore unused input
**kwargs,
) -> None:
super().__init__()
self.name = name
self.visible = visible
self.render_type = render_type
self.shade_flat = shade_flat
self.point_radius = point_radius
self.render_normal = render_normal
self.store_device = store_device
self.compute_device = compute_device
self.vert_sizes = vert_sizes
self.est_normal_thresh = est_normal_thresh
# Uniform and program
self.compile_shaders()
self.uniforms = dotdict() # uniform values
# Before initialization
self.max_verts = 0
self.max_faces = 0
# OpenGL data
if filename: self.load_from_file(filename)
else: self.load_from_data(verts, faces, colors, normals, scalars)
def compile_shaders(self):
try:
self.mesh_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER)
)
self.point_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
@property
def n_verts_bytes(self):
return len(self.verts) * self.vert_size * self.verts.element_size()
@property
def n_faces_bytes(self):
return len(self.faces) * self.face_size * self.faces.element_size()
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
@property
def faces_data(self): # a heavy copy operation
faces = self.faces.ravel().numpy() # N, 3
faces = np.asarray(faces, dtype=np.uint32, order='C')
return faces
@property
def face_size(self):
return self.render_type.value
@property
def vert_size(self):
return sum(self.vert_sizes)
def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'):
verts, faces, colors, normals, scalars = self.load_data_from_file(filename)
self.load_from_data(verts, faces, colors, normals, scalars)
def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'):
self.name = os.path.split(filename)[-1]
verts, faces, colors, normals, scalars = None, None, None, None, None
verts, faces = load_mesh(filename, device=self.store_device)
if not len(faces):
verts, colors, normals, scalars = load_pts(filename)
self.render_type = Mesh.RenderType.POINTS
else:
self.render_type = Mesh.RenderType(faces.shape[-1]) # use value
return verts, faces, colors, normals, scalars
def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()):
# Data type conversion
verts = torch.as_tensor(verts) # convert to tensor if input is of other types
if verts.dtype == torch.float32:
pass # supports this for now
elif verts.dtype == torch.float16:
pass # supports this for now
else:
verts = verts.type(torch.float) # convert to float32 if input is of higher precision
gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT
self.vert_gl_types = [gl_dtype] * len(self.vert_sizes)
# Prepare main mesh data: vertices and faces
self.verts = torch.as_tensor(verts, device=self.store_device)
self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support
# Prepare colors and normals
if colors is not None:
self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype)
else:
bounds = get_bounds(self.verts[None])[0]
self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0])
if normals is not None:
self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype)
else:
self.estimate_vertex_normals()
# Prepare other scalars
if scalars is not None:
for k, v in scalars.items():
setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok?
# Prepare OpenGL related buffer
self.update_gl_buffers()
def estimate_vertex_normals(self):
def est_pcd_norms():
if self.verts.dtype == torch.half:
self.normals = self.verts
else:
from pytorch3d.structures import Pointclouds, Meshes
pcd = Pointclouds([self.verts]).to(self.compute_device)
self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim
def est_tri_norms():
if self.verts.dtype == torch.half:
self.normals = self.verts
else:
from pytorch3d.structures import Pointclouds, Meshes
mesh = Meshes([self.verts], [self.faces]).to(self.compute_device)
self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim
if not len(self.verts) > self.est_normal_thresh:
if self.render_type == Mesh.RenderType.TRIS: est_tri_norms()
elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms()
else:
# log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping'))
self.normals = self.verts
else:
# log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation'))
self.normals = self.verts
def offscreen_render(self, eglctx: "eglContextManager", camera: Camera):
eglctx.resize(camera.W, camera.H)
self.render(camera)
def render(self, camera: Camera):
if not self.visible: return
# For point rendering
if self.render_type == Mesh.RenderType.POINTS:
gl.glUseProgram(self.point_program)
self.use_gl_program(self.point_program)
else:
gl.glUseProgram(self.mesh_program)
self.use_gl_program(self.mesh_program)
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
if self.render_type == Mesh.RenderType.POINTS:
gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices
elif self.render_type == Mesh.RenderType.LINES:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.TRIS:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.QUADS:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.STRIPS:
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))
else:
raise NotImplementedError
gl.glBindVertexArray(0)
def use_gl_program(self, program: shaders.ShaderProgram):
use_gl_program(program)
self.uniforms.shade_flat = gl.glGetUniformLocation(program, "shade_flat")
self.uniforms.point_radius = gl.glGetUniformLocation(program, "point_radius")
self.uniforms.render_normal = gl.glGetUniformLocation(program, "render_normal")
self.uniforms.H = gl.glGetUniformLocation(program, "H")
self.uniforms.W = gl.glGetUniformLocation(program, "W")
self.uniforms.n = gl.glGetUniformLocation(program, "n")
self.uniforms.f = gl.glGetUniformLocation(program, "f")
self.uniforms.P = gl.glGetUniformLocation(program, "P")
self.uniforms.K = gl.glGetUniformLocation(program, "K")
self.uniforms.V = gl.glGetUniformLocation(program, "V")
self.uniforms.M = gl.glGetUniformLocation(program, "M")
def upload_gl_uniforms(self, camera: Camera):
K = camera.gl_ixt # hold the reference
V = camera.gl_ext # hold the reference
M = glm.identity(mat4)
P = K * V * M
gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat)
gl.glUniform1f(self.uniforms.point_radius, self.point_radius)
gl.glUniform1i(self.uniforms.render_normal, self.render_normal)
gl.glUniform1i(self.uniforms.H, camera.H) # o2w
gl.glUniform1i(self.uniforms.W, camera.W) # o2w
gl.glUniform1f(self.uniforms.n, camera.n) # o2w
gl.glUniform1f(self.uniforms.f, camera.f) # o2w
gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip
gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip
gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c
gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w
def update_gl_buffers(self):
# Might be overwritten
self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0,
len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated
if hasattr(self, 'verts'):
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)
gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference
if hasattr(self, 'faces'):
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data)
def resize_buffers(self, v: int = 0, f: int = 0):
if v > self.max_verts or f > self.max_faces:
if v > self.max_verts: self.max_verts = v
if f > self.max_faces: self.max_faces = f
self.init_gl_buffers(v, f)
def init_gl_buffers(self, v: int = 0, f: int = 0):
# This will only init the corresponding buffer object
n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes
n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes
# Housekeeping
if hasattr(self, 'vao'):
gl.glDeleteVertexArrays(1, [self.vao])
gl.glDeleteBuffers(2, [self.vbo, self.ebo])
self.vao = gl.glGenVertexArrays(1)
self.vbo = gl.glGenBuffers(1)
self.ebo = gl.glGenBuffers(1)
gl.glBindVertexArray(self.vao)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)
gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work
# https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao
cumsum = 0
for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)):
gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float
gl.glEnableVertexAttribArray(i)
cumsum += s
if n_faces_bytes > 0:
# Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW)
gl.glBindVertexArray(0)
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import push_button_color, pop_button_color
i = batch.i
will_delete = batch.will_delete
slider_width = batch.slider_width
imgui.push_item_width(slider_width * 0.5)
mesh.name = imgui.input_text(f'Mesh name##{i}', mesh.name)[1]
if imgui.begin_combo(f'Mesh type##{i}', mesh.render_type.name):
for t in Mesh.RenderType:
if imgui.selectable(t.name, mesh.render_type == t)[1]:
mesh.render_type = t # construct enum from name
if mesh.render_type == t:
imgui.set_item_default_focus()
imgui.end_combo()
imgui.pop_item_width()
if hasattr(mesh, 'point_radius'):
mesh.point_radius = imgui.slider_float(f'Point radius##{i}', mesh.point_radius, 0.0005, 3.0)[1] # 0.1mm
if hasattr(mesh, 'pts_per_pix'):
mesh.pts_per_pix = imgui.slider_int('Point per pixel', mesh.pts_per_pix, 0, 60)[1] # 0.1mm
if hasattr(mesh, 'shade_flat'):
push_button_color(0x55cc33ff if not mesh.shade_flat else 0x8855aaff)
if imgui.button(f'Smooth##{i}' if not mesh.shade_flat else f' Flat ##{i}'):
mesh.shade_flat = not mesh.shade_flat
pop_button_color()
if hasattr(mesh, 'render_normal'):
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.render_normal else 0x8855aaff)
if imgui.button(f'Color ##{i}' if not mesh.render_normal else f'Normal##{i}'):
mesh.render_normal = not mesh.render_normal
pop_button_color()
if hasattr(mesh, 'visible'):
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.visible else 0x8855aaff)
if imgui.button(f'Show##{i}' if not mesh.visible else f'Hide##{i}'):
mesh.visible = not mesh.visible
pop_button_color()
# Render the delete button
imgui.same_line()
push_button_color(0xff5533ff)
if imgui.button(f'Delete##{i}'):
will_delete.append(i)
pop_button_color()
class Quad(Mesh):
# A shared texture for CUDA (pytorch) and OpenGL
# Could be rendererd to screen using blitting or just drawing a quad
def __init__(self,
H: int = 256, W: int = 256,
use_quad_draw: bool = True,
use_quad_cuda: bool = True,
compose: bool = False,
compose_power: float = 1.0,
): # the texture to blip
self.use_quad_draw = use_quad_draw
self.use_quad_cuda = use_quad_cuda
self.vert_sizes = [3] # only position
self.vert_gl_types = [gl.GL_FLOAT] # only position
self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type
self.max_verts, self.max_faces = 0, 0
self.verts = torch.as_tensor([[-1., -1., 0.5],
[1., -1., 0.5],
[-1., 1., 0.5],
[1., 1., 0.5],])
self.update_gl_buffers()
self.compile_shaders()
self.max_H, self.max_W = H, W
self.H, self.W = H, W
self.compose = compose
self.compose_power = compose_power
self.init_texture()
@property
def n_faces_bytes(self): return 0
def use_gl_program(self, program: shaders.ShaderProgram):
super().use_gl_program(program)
self.uniforms.tex = gl.glGetUniformLocation(program, 'tex')
gl.glUseProgram(self.quad_program) # use a different program
gl.glUniform1i(self.uniforms.tex, 0)
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers
self.H, self.W = H, W
if self.H > self.max_H or self.W > self.max_W: # max got updated
self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)
self.init_texture()
def init_texture(self):
if hasattr(self, 'cu_tex'):
from cuda import cudart
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex))
if hasattr(self, 'fbo'):
gl.glDeleteFramebuffers(1, [self.fbo])
gl.glDeleteTextures(1, [self.tex])
# Init the texture to be blit onto the screen
self.tex = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0))
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Init the framebuffer object if explicit blitting is used (slower than drawing quad)
self.fbo = gl.glGenFramebuffers(1)
old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo)
if self.use_quad_cuda:
from cuda import cudart
if self.compose:
# Both reading and writing of this resource is required
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone
else:
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
try:
self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags))
except RuntimeError as e:
log(red('Failed to initialize Quad with CUDA-GL interop, will use slow upload: '), e)
self.use_quad_cuda = False
def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
if not self.use_quad_cuda:
self.upload_to_texture(image)
return
if not hasattr(self, 'cu_tex'):
self.init_texture()
# assert self.use_quad_cuda, "Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad"
w = w or self.W
h = h or self.H
if image.shape[-1] == 3:
image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0))
if self.compose:
"""
Blit current framebuffer to this texture (self.tex)
Read content of this texture into a cuda buffer
Perform alpha blending based on the frame's alpha channel
Copy the blended image back into the texture (self.tex)
"""
old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0
gl.glBlitFramebuffer(x, y, w, h,
x, y, w, h,
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old)
buffer = torch.empty_like(image)
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst
w * 4 * buffer.element_size(), # dpitch
cu_tex_arr, # src
x * 4 * image.element_size(), # wOffset
y, # hOffset
w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes)
h, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
# cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]])
alpha = image[..., -1:] / 255
image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int
image[..., -1:] = buffer[..., -1:] + image[..., -1:]
image = image.clip(0, 255)
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr,
x * 4 * image.element_size(),
y,
image.data_ptr(),
w * 4 * image.element_size(), # differently sized
w * 4 * image.element_size(), # rgba, should do a composition first
h,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))
def upload_to_texture(self, ptr: np.ndarray, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
w = w or self.W
h = h or self.H
if isinstance(ptr, torch.Tensor):
ptr = ptr.detach().cpu().numpy() # slow sync and copy operation # MARK: SYNC
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, x, y, w, h, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[y:h, x:w]) # to gpu, might slow down?
@property
def verts_data(self): # a heavy copy operation
verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
def render(self, camera: Camera = None):
self.draw() # no uploading needed
def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
"""
Upload the texture instead of the camera
This respects the OpenGL convension of lower left corners
"""
if not self.use_quad_draw:
self.blit(x, y, w, h)
return
w = w or self.W
h = h or self.H
_, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(x, y, w, h)
gl.glScissor(x, y, w, h) # only render in this small region of the viewport
gl.glUseProgram(self.quad_program) # use a different program
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glBindVertexArray(self.vao)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))
gl.glBindVertexArray(0)
# Some house keepings
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H)
def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
"""
This respects the OpenGL convension of lower left corners
"""
w = w or self.W
h = h or self.H
old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0
gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped
x, y, x + w, y + h, # the height is flipped
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old)
class UQuad(Mesh):
"""
Responsible for initializing textures with a single value
or blitting a texture to a framebuffer (possibly better done with blit instead of quad drawing)
Effectively clearing the texture for real, see: https://stackoverflow.com/questions/37335281/is-glcleargl-color-buffer-bit-preferred-before-a-whole-frame-buffer-overwritte
"""
def __init__(self):
self.n_blit_values = 3
self.vert_sizes = [3] # only position
self.vert_gl_types = [gl.GL_FLOAT] # only position
self.max_verts, self.max_faces = 0, 0
self.verts = torch.as_tensor([[-1., -1., 0.5],
[1., -1., 0.5],
[-1., 1., 0.5],
[1., 1., 0.5],])
self.compile_shaders()
self.uniforms = dotdict() # uniform values
self.use_gl_programs(self.quad_program)
self.update_gl_buffers()
@property
def n_faces_bytes(self): return 0
@property
def verts_data(self): # a heavy copy operation
verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
def use_gl_programs(self, program: shaders.ShaderProgram):
for i in range(self.n_blit_values):
self.uniforms[f'value{i}'] = gl.glGetUniformLocation(program, f'value{i}')
for i in range(self.n_blit_values):
self.uniforms[f'use_tex{i}'] = gl.glGetUniformLocation(program, f'use_tex{i}')
gl.glUseProgram(self.program) # use a different program
for i in range(self.n_blit_values):
self.uniforms[f'tex{i}'] = gl.glGetUniformLocation(program, f'tex{i}')
gl.glUniform1i(self.uniforms[f'tex{i}'], i)
def upload_gl_uniforms(self, values: List[List[float]], use_texs: List[bool]):
for i, v in enumerate(values):
v = vec4(v) # HACK: Hold the reference for this upload
gl.glUniform4fv(self.uniforms[f'value{i}'], 1, glm.value_ptr(v)) # as float array
for i, v in enumerate(use_texs):
gl.glUniform1i(self.uniforms[f'use_tex{i}'], v)
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('uquad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('uquad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def draw(self, values: List[List[float]] = [], use_texs=[]):
"""
This function will render 'value' to the currently bound framebuffer, up to six outputs
"""
old_prog = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM)
old_vao = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING)
gl.glUseProgram(self.quad_program)
self.upload_gl_uniforms(values, use_texs) # should be a noop
# Prepare to render to textures
gl.glBindVertexArray(self.vao)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) # number of vertices
gl.glBindVertexArray(old_vao)
gl.glUseProgram(old_prog)
class DQuad(UQuad):
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('dquad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('dquad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def draw(self, values: List[List[float]] = [], use_texs=[]):
old_function = gl.glGetIntegerv(gl.GL_DEPTH_FUNC)
gl.glDepthFunc(gl.GL_ALWAYS)
super().draw(values, use_texs)
gl.glDepthFunc(old_function)
def hardware_rendering_framebuffer(H: int, W: int, gl_tex_dtype=gl.GL_RGBA16F):
# Prepare for write frame buffers
color_buffer = gl.glGenTextures(1)
depth_upper = gl.glGenTextures(1)
depth_lower = gl.glGenTextures(1)
depth_attach = gl.glGenTextures(1)
fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb
# Init the texture (call the resizing function), will simply allocate empty memory
# The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter).
gl.glBindTexture(gl.GL_TEXTURE_2D, color_buffer)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_tex_dtype, W, H, 0, gl.GL_RGBA, gl.GL_FLOAT, ctypes.c_void_p(0)) # 16 * 4
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_upper)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Bind texture to fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, color_buffer, 0) # location 0
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_upper, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT2, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0)
gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2])
# Check framebuffer status
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
log(red('Framebuffer not complete, exiting...'))
raise RuntimeError('Incomplete framebuffer')
# Restore the original state
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
return color_buffer, depth_upper, depth_lower, depth_attach, fbo
def hareward_peeling_framebuffer(H: int, W: int):
# Prepare for write frame buffers
index_buffer = gl.glGenTextures(1)
depth_lower = gl.glGenTextures(1)
depth_attach = gl.glGenTextures(1)
fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb
# Init the texture (call the resizing function), will simply allocate empty memory
# The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter).
gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Bind texture to fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0)
gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1])
# Check framebuffer status
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
log(red('Framebuffer not complete, exiting...'))
raise RuntimeError('Incomplete framebuffer')
# Restore the original state
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
return index_buffer, depth_lower, depth_attach, fbo
class Gaussian(Mesh):
def __init__(self,
filename: str = 'assets/meshes/zju3dv.npz',
gaussian_cfg: dotdict = dotdict(),
quad_cfg: dotdict = dotdict(),
view_depth: bool = False, # show depth or show color
dpt_cm: str = 'linear',
H: int = 1024,
W: int = 1024,
**kwargs,
):
# Import Gaussian Model
from easyvolcap.engine.registry import call_from_cfg
from easyvolcap.utils.gaussian_utils import GaussianModel
# Housekeeping
super().__init__(**kwargs)
self.name = split(filename)[-1]
# Init Gaussian related models, for now only the first gaussian model is supported
if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'):
# Load from GaussianTSampler
pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe)
pretrained = pretrained.model
state_dict = dotdict()
for k, v in pretrained.items():
if k.startswith('sampler.pcds.0'):
state_dict[k.replace('sampler.pcds.0.', '')] = v
# Load the parameters into the gaussian model
self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model
self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model
self.gaussian_model.cuda() # move the parameters to GPU
elif filename.endswith('.ply'):
# Load raw GaussianModel
# pts, rgb, norm, scalars = load_pts(filename)
self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model
self.gaussian_model.load_ply(filename) # load the original gaussian model
self.gaussian_model.cuda()
else:
raise NotImplementedError
# Init rendering quad
self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W)
# Other configurations
self.view_depth = view_depth
self.dpt_cm = dpt_cm
del self.shade_flat
del self.point_radius
del self.render_normal
# Disabling initialization
def load_from_file(self, *args, **kwargs):
pass
def load_from_data(self, *args, **kwargs):
pass
def compile_shaders(self):
pass
def update_gl_buffers(self):
pass
def resize_textures(self, H: int, W: int):
self.quad.resize_textures(H, W)
# The actual rendering function
@torch.no_grad()
def render(self, camera: Camera):
# Perform actual gaussian rendering
batch = add_batch(to_cuda(camera.to_batch()))
rgb, acc, dpt = self.gaussian_model.render(batch)
if self.view_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
else:
rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4
# Copy rendered tensor to screen
rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform
self.quad.copy_to_texture(rgba)
self.quad.render()
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
super().render_imgui(viewer, batch)
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import push_button_color, pop_button_color
i = batch.i
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.view_depth else 0x8855aaff)
if imgui.button(f'Color##{i}' if not mesh.view_depth else f' Depth ##{i}'):
mesh.view_depth = not mesh.view_depth
pop_button_color()
class PointSplat(Gaussian, nn.Module):
def __init__(self,
filename: str = 'assets/meshes/zju3dv.ply',
quad_cfg: dotdict = dotdict(),
view_depth: bool = False, # show depth or show color
dpt_cm: str = 'linear',
H: int = 1024,
W: int = 1024,
**kwargs,
):
# Import Gaussian Model
from easyvolcap.engine.registry import call_from_cfg
from easyvolcap.utils.data_utils import load_pts
from easyvolcap.utils.net_utils import make_buffer
from easyvolcap.models.samplers.gaussiant_sampler import GaussianTSampler
# Housekeeping
super(Gaussian, self).__init__(**kwargs)
self.name = split(filename)[-1]
self.render_radius = MethodType(GaussianTSampler.render_radius, self) # override the method
# Init PointSplat related models, for now only the first gaussian model is supported
if filename.endswith('.ply'):
# Load raw GaussianModel
pts, rgb, norms, scalars = load_pts(filename)
occ, rad = scalars.alpha, scalars.radius
self.pts = make_buffer(torch.from_numpy(pts)) # N, 3
self.rgb = make_buffer(torch.from_numpy(rgb)) # N, 3
self.occ = make_buffer(torch.from_numpy(occ)) # N, 1
self.rad = make_buffer(torch.from_numpy(rad)) # N, 1
else:
raise NotImplementedError
# Init rendering quad
self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W)
self.cuda() # move to cuda
# Other configurations
self.view_depth = view_depth
self.dpt_cm = dpt_cm
self.radius_mult = 1.0
self.alpha_mult = 1.0
# The actual rendering function
@torch.no_grad()
def render(self, camera: Camera):
# Perform actual gaussian rendering
batch = add_batch(to_cuda(camera.to_batch()))
sh0 = rgb2sh0(self.rgb[..., None])
xyz = self.pts
occ = (self.occ * self.alpha_mult).clip(0, 1)
rad = self.rad * self.radius_mult
rgb, acc, dpt = self.render_radius(*add_batch([xyz, sh0, rad, occ]), batch)
rgb, acc, dpt = rgb[0], acc[0], dpt[0]
if self.view_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
else:
rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4
# Copy rendered tensor to screen
rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform
self.quad.copy_to_texture(rgba)
self.quad.render()
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
super().render_imgui(viewer, batch)
i = batch.i
from imgui_bundle import imgui
mesh.radius_mult = imgui.slider_float(f'Point radius multiplier##{i}', mesh.radius_mult, 0.1, 3.0)[1] # 0.1mm
mesh.alpha_mult = imgui.slider_float(f'Point alpha multiplier##{i}', mesh.alpha_mult, 0.1, 3.0)[1] # 0.1mm
class Splat(Mesh): # FIXME: Not rendering, need to debug this
def __init__(self,
*args,
H: int = 512,
W: int = 512,
tex_dtype: str = torch.half,
pts_per_pix: int = 24, # render less for the static background since we're only doing a demo
blit_last_ratio: float = 0.0,
volume_rendering: bool = True,
radii_mult_volume: float = 1.00, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better
radii_mult_solid: float = 0.85, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better
point_smooth: bool = True,
alpha_blending: bool = True,
**kwargs):
kwargs = dotdict(kwargs)
kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1])
self.tex_dtype = getattr(torch, tex_dtype) if isinstance(tex_dtype, str) else tex_dtype
self.gl_tex_dtype = gl.GL_RGBA16F if self.tex_dtype == torch.half else gl.GL_RGBA32F
super().__init__(*args, **kwargs)
self.use_gl_program(self.splat_program)
self.pts_per_pix = pts_per_pix
self.blit_last_ratio = blit_last_ratio
self.volume_rendering = volume_rendering
self.radii_mult_volume = radii_mult_volume
self.radii_mult_solid = radii_mult_solid
self.point_smooth = point_smooth
self.alpha_blending = alpha_blending
self.max_H, self.max_W = H, W
self.H, self.W = H, W
self.init_textures()
from easyvolcap.models.samplers.gaussiant_sampler import GaussianTSampler
self.render_radius = MethodType(GaussianTSampler.render_radius, self) # override the method
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C') # this should only be invoked once
return verts
def use_gl_program(self, program: shaders.ShaderProgram):
super().use_gl_program(program)
# Special controlling variables
self.uniforms.alpha_blending = gl.glGetUniformLocation(program, f'alpha_blending')
self.uniforms.point_smooth = gl.glGetUniformLocation(program, f'point_smooth')
self.uniforms.radii_mult = gl.glGetUniformLocation(program, f'radii_mult')
# Special rendering variables
self.uniforms.pass_index = gl.glGetUniformLocation(program, f'pass_index')
self.uniforms.read_color = gl.glGetUniformLocation(program, f'read_color')
self.uniforms.read_upper = gl.glGetUniformLocation(program, f'read_upper')
self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower')
gl.glUniform1i(self.uniforms.read_color, 0)
gl.glUniform1i(self.uniforms.read_upper, 1)
gl.glUniform1i(self.uniforms.read_lower, 2)
def compile_shaders(self):
try:
self.splat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('splat.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('splat.frag'), gl.GL_FRAGMENT_SHADER)
)
self.usplat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('usplat.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('usplat.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def rasterize(self, camera: Camera = None, length: int = None):
if self.volume_rendering:
return self.rasterize_volume(camera, length)
else:
return self.rasterize_solid(camera, length)
def rasterize_volume(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera
"""
Let's try to analyze what's happening here
We want to:
1. Render the front-most color to color buffer
2. UNUSED: Render the front-most depth + some large margin to a depth upper limit buffer
3. Render the front-most depth + some small margin to a depth lower limit buffer
4. Switch between the render target and sampling target
5. Use the previous rendered color, depth upper limit and lower limit as textures
6. When current depth is smaller than the lower limit, we've already rendered this in the first pass, discard
7. UNUSED: When current depth is larger than the upper limit, it will probabily not contribute much to final results, discard
8. UNUSED: When the accumulated opacity reaches almost 1, subsequent rendering would not have much effect, return directly
9. When the point coordinates falls out of bound of the current sphere, dicard (this could be optimized with finutining in rectangle)
10. Finally, try to render the final color using the volume rendering equation (by accumulating alpha values from front to back)
Required cleanup checklist:
1. Before rendering the first pass, we need to clear the color and depth texture, this is not done, need to check multi-frame accumulation on this
2. Before rendering next pass, it's also recommended to blit color and depth values from previous pass to avoid assign them in the shader
"""
front_fbo, front_color, front_upper, front_lower = self.read_fbo, self.read_color, self.read_upper, self.read_lower
back_fbo, back_color, back_upper, back_lower = self.write_fbo, self.write_color, self.write_upper, self.write_lower
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0])
# gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9])
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0])
# gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9])
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.splat_program) # TODO: Implement this with a mapping and a lazy modification
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
# The actual multi pass rendering process happens here
for pass_index in range(self.pts_per_pix):
# Swap buffers to render the next pass
front_fbo, front_color, front_upper, front_lower, back_fbo, back_color, back_upper, back_lower = \
back_fbo, back_color, back_upper, back_lower, front_fbo, front_color, front_upper, front_lower
# Bind the read texture and bind the write render frame buffer
gl.glBindTextures(0, 3, [front_color, front_upper, front_lower])
# Move content from write_fbo to screen fbo
if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo)
for i in range(3):
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + i)
gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + i)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2])
# Clear depth buffer for depth testing
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
gl.glUniform1i(self.uniforms.pass_index, pass_index) # pass index
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return back_fbo
def upload_gl_uniforms(self, camera: Camera):
super().upload_gl_uniforms(camera)
gl.glUniform1i(self.uniforms.point_smooth, self.point_smooth)
gl.glUniform1i(self.uniforms.alpha_blending, self.alpha_blending)
if self.volume_rendering:
gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_volume) # radii mult
else:
gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_solid) # radii mult
def rasterize_solid(self, camera: Camera = None, length: int = None):
# Only clear the output once
back_fbo = self.write_fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0]) # color
# gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) # depth upper
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0]) # depth lower
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.usplat_program)
self.upload_gl_uniforms(camera)
gl.glUniform1i(self.uniforms.pass_index, 0) # pass index
gl.glBindVertexArray(self.vao)
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return back_fbo
def show(self, back_fbo: int):
# Move content from write_fbo to screen fbo
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, back_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, 0) # render the final content onto screen
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
def render(self, camera):
if not self.visible: return
self.show(self.rasterize(camera))
def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers
self.H, self.W = H, W
if self.H > self.max_H or self.W > self.max_W: # max got updated
self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)
self.init_textures()
def init_textures(self):
if hasattr(self, 'write_fbo'):
gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo])
gl.glDeleteTextures(8, [self.write_color, self.write_upper, self.write_lower, self.write_attach, self.read_color, self.read_upper, self.read_lower, self.read_attach])
self.write_color, self.write_upper, self.write_lower, self.write_attach, self.write_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype)
self.read_color, self.read_upper, self.read_lower, self.read_attach, self.read_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype)
log(f'Created texture of h, w: {self.max_H}, {self.max_W}')
class HardwareRendering(Splat):
def __init__(self,
dtype=torch.half,
**kwargs,
):
self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype
self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT
kwargs = dotdict(kwargs)
kwargs.blit_last_ratio = kwargs.get('blit_last_ratio', 0.90)
kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1])
super().__init__(**kwargs) # verts, color, radius, alpha
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once
return verts
def init_gl_buffers(self, v: int = 0, f: int = 0):
from cuda import cudart
if hasattr(self, 'cu_vbo'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo))
super().init_gl_buffers(v, f)
# Register vertex buffer obejct
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
try:
self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags))
except RuntimeError as e:
log(red(f'Your system does not support CUDA-GL interop, please use pytorch3d\'s implementation instead'))
log(red(f'This can be done by specifying {blue("model_cfg.sampler_cfg.use_cudagl=False model_cfg.sampler_cfg.use_diffgl=False")} at the end of your command'))
log(red(f'Note that this implementation is extremely slow, we recommend running on a native system that support the interop'))
# raise RuntimeError(str(e) + ": This unrecoverable, please read the error message above")
raise e
def init_textures(self):
from cuda import cudart
if hasattr(self, 'cu_read_color'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_color))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_color))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower))
super().init_textures()
# Register image to read from
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly
self.cu_read_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_color, gl.GL_TEXTURE_2D, flags))
self.cu_write_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_color, gl.GL_TEXTURE_2D, flags))
self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags))
self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags))
def forward(self, xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor, batch: dotdict):
"""
Renders a 3D point cloud using OpenGL and returns the rendered RGB image, accumulated alpha image, and depth map.
Args:
xyz (torch.Tensor): A tensor of shape (B, N, 3) containing the 3D coordinates of the points.
rgb (torch.Tensor): A tensor of shape (B, N, 3) containing the RGB color values of the points.
rad (torch.Tensor): A tensor of shape (B, N, 1) containing the radii of the points.
batch (dotdict): A dictionary containing the camera parameters and other metadata for the batch.
Returns:
A tuple containing the rendered RGB image, accumulated alpha image, and depth map, all as torch.Tensors.
The RGB image has shape (1, H, W, 3), the alpha image has shape (1, H, W, 1), and the depth map has shape (1, H, W, 1).
The method first resizes the OpenGL texture to match the height and width of the output image. It then sets the OpenGL viewport and scissor to only render in the region of the viewport specified by the output image size.
It concatenates the `xyz`, `rgb`, and `rad` tensors along the last dimension and flattens the result into a 1D tensor.
The method then uploads the input data to OpenGL for rendering and performs depth peeling using OpenGL. The method uploads the camera parameters to OpenGL and renders the point cloud, saving the output buffer to the `back_fbo` attribute of the class.
Finally, the method copies the rendered image and depth back to the CPU as torch.Tensors and reshapes them to match the output image size. The RGB image is returned with shape (1, H, W, 3), the accumulated alpha image is returned with shape (1, H, W, 1), and the depth map is returned with shape (1, H, W, 1).
"""
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
# !: BATCH
H, W = batch.meta.H[0].item(), batch.meta.W[0].item()
self.resize_textures(H, W) # maybe resize the texture
self.resize_buffers(xyz.shape[1]) # maybe resize the buffer
_, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H) # only render in this small region of the viewport
# Prepare for input data
data = torch.cat([xyz, rgb, rad, occ], dim=-1).type(self.dtype).ravel()
# Upload to opengl for rendering
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo))
assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side'
CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr,
data.data_ptr(),
data.numel() * data.element_size(),
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
# Perform rasterization (depth peeling using OpenGL)
if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish
back_fbo = self.rasterize(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo
# Copy rendered image and depth back as tensor
cu_tex = self.cu_write_color if back_fbo == self.write_fbo else self.cu_read_color # double buffered depth peeling
cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling
# Prepare the output # !: BATCH
rgb_map = torch.empty((H, W, 4), dtype=self.tex_dtype, device='cuda') # to hold the data from opengl
dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl
# The resources in resources may be accessed by CUDA until they are unmapped.
# The graphics API from which resources were registered should not access any resources while they are mapped by CUDA.
# If an application does so, the results are undefined.
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0))
cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0))
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(rgb_map.data_ptr(), # dst
W * 4 * rgb_map.element_size(), # dpitch
cu_tex_arr, # src
0, # wOffset
0, # hOffset
W * 4 * rgb_map.element_size(), # width Width of matrix transfer (columns in bytes)
H, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(),
W * 1 * dpt_map.element_size(),
cu_dpt_arr,
0,
0,
W * 1 * dpt_map.element_size(),
H,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
# Ouput reshaping
rgb_map, dpt_map = rgb_map[None].flip(1), dpt_map[None].flip(1)
rgb_map, acc_map = rgb_map[..., :3], rgb_map[..., 3:]
dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map)
# Some house keepings
gl.glViewport(0, 0, old_W, old_H)
gl.glScissor(0, 0, old_W, old_H)
return rgb_map, acc_map, dpt_map
class HardwarePeeling(Splat):
def __init__(self,
dtype=torch.float,
**kwargs):
self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype
self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT
super().__init__(**kwargs,
blit_last_ratio=-10.0,
vert_sizes=[3, 1],
) # verts, radius, index
# from pytorch3d.renderer import AlphaCompositor
# self.compositor = AlphaCompositor() # this the key to convergence, this is differentiable
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.radius], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once
return verts
def init_gl_buffers(self, v: int = 0, f: int = 0):
from cuda import cudart
if hasattr(self, 'cu_vbo'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo))
super().init_gl_buffers(v, f)
# Register vertex buffer obejct
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags))\
def use_gl_program(self, program):
super().use_gl_program(program)
gl.glUseProgram(self.splat_program) # use a different program
self.uniforms.read_index = gl.glGetUniformLocation(program, f'read_index')
self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower')
gl.glUniform1i(self.uniforms.read_index, 0)
gl.glUniform1i(self.uniforms.read_lower, 1)
def upload_gl_uniforms(self, camera: Camera):
super().upload_gl_uniforms(camera)
def compile_shaders(self):
try:
self.splat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('idx_splat.vert'), gl.GL_VERTEX_SHADER), # use the pass through quad shader
shaders.compileShader(load_shader_source('idx_splat.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def init_textures(self):
from cuda import cudart
if hasattr(self, 'cu_read_index'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_index))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_index))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower))
if hasattr(self, 'write_fbo'):
gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo])
gl.glDeleteTextures(6, [self.write_index, self.write_lower, self.write_attach, self.read_index, self.read_lower, self.read_attach])
self.write_index, self.write_lower, self.write_attach, self.write_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W)
self.read_index, self.read_lower, self.read_attach, self.read_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W)
# Register image to read from
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly
self.cu_read_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_index, gl.GL_TEXTURE_2D, flags))
self.cu_write_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_index, gl.GL_TEXTURE_2D, flags))
self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags))
self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags))
log(f'Created texture of h, w: {self.max_H}, {self.max_W}')
def rasterize_generator(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera
front_fbo, front_index, front_lower = self.read_fbo, self.read_index, self.read_lower
back_fbo, back_index, back_lower = self.write_fbo, self.write_index, self.write_lower
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1])
gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1])
gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.splat_program)
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
# The actual multi pass rendering process happens here
for pass_index in range(self.pts_per_pix):
# Swap buffers to render the next pass
front_fbo, front_index, front_lower, back_fbo, back_index, back_lower = \
back_fbo, back_index, back_lower, front_fbo, front_index, front_lower
# Bind the read texture and bind the write render frame buffer
gl.glBindTextures(0, 2, [front_index, front_lower])
# Move content from write_fbo to screen fbo
if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo)
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + 1)
gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + 1)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1])
else:
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
# Clear depth buffer for depth testing
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) # clear the indices buffer for later rendering and retrieving
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
yield back_fbo # give the CUDA end a chance to read from this frame buffer after rendering
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return
def forward(self,
xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor,
batch: dotdict,
return_frags: bool = False,
return_full: bool = False,
):
"""
Get all indices from the depth peeling passes
Compute the vertex weight here in torch(cuda)
Use the indices to pass through a compositor
The backward pass should only be valid on the torch side, and it should've been enough
TODO: This function is too memory intensive
TODO: Performing IBR is too memory intensive
"""
# This the slow part, but not differentiable
idx, _, _ = self.forward_idx(xyz, rad, batch) # B, H, W, K
msk = idx != -1 # B, H, W, K
idx = torch.where(msk, idx, 0).long()
# Sample things needed for computing screen space weight
H, W, K, R, T, C = get_opencv_camera_params(batch)
K, R, T, C = K.to(xyz.dtype), R.to(xyz.dtype), T.to(xyz.dtype), C.to(xyz.dtype)
pix_xyz = (xyz @ R.mT + T.mT) @ K.mT # B, P, 3
pix_xyz_xy = pix_xyz[..., :-1] / (pix_xyz[..., -1:] + 1e-10)
pix_rad = abs(K[..., 1, 1][..., None] * rad[..., 0] / (pix_xyz[..., -1] + 1e-10)) # z: B, 1 * B, N, world space radius
mean_xy = multi_gather(pix_xyz_xy, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, 2) # B, HWK, 2 -> B, H, W, K, 2
xy = create_meshgrid(H, W, idx.device, dtype=xyz.dtype).flip(-1)[None].expand(idx.shape[0], H, W, 2) # create screen space xy (opencv)
dists = (xy[..., None, :] - mean_xy).pow(2).sum(-1) # B, H, W, K
# Point values
dpt = (xyz - C.mT).norm(dim=-1, keepdim=True) # B, N, 1
pix_occ = multi_gather(occ, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape)
pix_rad = multi_gather(pix_rad, idx.view(idx.shape[0], -1), dim=-1).view(*idx.shape) # -> B, H, W, K
pix_occ = pix_occ * (1 - dists / (pix_rad * pix_rad + 1e-10)) # B, H, W, K
pix_occ = pix_occ.clip(0, 1)
pix_occ = torch.where(msk, pix_occ, 0)
if return_frags:
return idx, pix_occ # B, H, W, K
# The actual computation
rgb = torch.cat([rgb, occ, dpt], dim=-1) # B, N, 3 + C
pix_rgb = multi_gather(rgb, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, rgb.shape[-1]) # B, H, W, K, -1
_, rgb, _ = volume_rendering(pix_rgb, pix_occ[..., None]) # B, H, W, -1
rgb, acc, dpt = rgb[..., :-2], rgb[..., -2:-1], rgb[..., -1:]
dpt = dpt + (1 - acc) * dpt.max() # only for the looks (rendered depth are already premultiplied)
if return_full:
return rgb, acc, dpt, idx, pix_occ
else:
return rgb, acc, dpt
def forward_idx(self, xyz: torch.Tensor, rad: torch.Tensor, batch: dotdict):
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
# !: BATCH
H, W = batch.meta.H[0].item(), batch.meta.W[0].item()
self.resize_textures(H, W) # maybe resize the texture
self.resize_buffers(xyz.shape[1]) # maybe resize the buffer
_, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H) # only render in this small region of the viewport
# Prepare for input data
data = torch.cat([xyz, rad], dim=-1).type(self.dtype).ravel()
# Upload to opengl for rendering
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo))
assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side'
CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr,
data.data_ptr(),
data.numel() * data.element_size(),
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
# Perform rasterization (depth peeling using OpenGL)
if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish
# FIXME: Strange bug occurs if batch parameter is passed in directly for the construction of Camera(batch=batch.meta)
gen = self.rasterize_generator(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo
ind_maps = []
dpt_maps = []
acc_maps = []
for back_fbo in gen:
# Copy rendered image and depth back as tensor
cu_tex = self.cu_write_index if back_fbo == self.write_fbo else self.cu_read_index # double buffered depth peeling
cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling
# Prepare the output # !: BATCH
ind_map = torch.empty((H, W, 1), dtype=torch.int, device='cuda') # to hold the data from opengl
dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl
# The resources in resources may be accessed by CUDA until they are unmapped.
# The graphics API from which resources were registered should not access any resources while they are mapped by CUDA.
# If an application does so, the results are undefined.
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0))
cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0))
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(ind_map.data_ptr(), # dst
W * ind_map.shape[-1] * ind_map.element_size(), # dpitch
cu_tex_arr, # src
0, # wOffset
0, # hOffset
W * ind_map.shape[-1] * ind_map.element_size(), # width Width of matrix transfer (columns in bytes)
H, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(),
W * dpt_map.shape[-1] * dpt_map.element_size(),
cu_dpt_arr,
0,
0,
W * dpt_map.shape[-1] * dpt_map.element_size(),
H,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
# Ouput reshaping
ind_map, dpt_map = ind_map[None].flip(1), dpt_map[None].flip(1)
acc_map = ind_map != -1
dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map)
ind_maps.append(ind_map)
acc_maps.append(acc_map)
dpt_maps.append(dpt_map)
ind_map = torch.cat(ind_maps, dim=-1) # B, H, W, K
acc_map = torch.cat(acc_maps, dim=-1) # B, H, W, K
dpt_map = torch.cat(dpt_maps, dim=-1) # B, H, W, K
# Some house keepings
gl.glViewport(0, 0, old_W, old_H)
gl.glScissor(0, 0, old_W, old_H)
return ind_map, acc_map, dpt_map
|
evocodebench_data_78
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager # must be imported before OpenGL.GL
from easyvolcap.runners.volumetric_video_viewer import VolumetricVideoViewer
import os
import sys
import glm
import torch
import ctypes
import numpy as np
from torch import nn
from enum import Enum, auto
from types import MethodType
from typing import Dict, Union, List
from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.viewer_utils import Camera
from easyvolcap.utils.bound_utils import get_bounds
from easyvolcap.utils.chunk_utils import multi_gather
from easyvolcap.utils.color_utils import cm_cpu_store
from easyvolcap.utils.ray_utils import create_meshgrid
from easyvolcap.utils.depth_utils import depth_curve_fn
from easyvolcap.utils.gaussian_utils import rgb2sh0, sh02rgb
from easyvolcap.utils.nerf_utils import volume_rendering, raw2alpha
from easyvolcap.utils.data_utils import load_pts, load_mesh, to_cuda, add_batch
from easyvolcap.utils.cuda_utils import CHECK_CUDART_ERROR, FORMAT_CUDART_ERROR
from easyvolcap.utils.net_utils import typed, torch_dtype_to_numpy_dtype, load_pretrained
from easyvolcap.utils.fcds_utils import prepare_feedback_transform, get_opencv_camera_params
# fmt: off
# Environment variable messaging
# Need to export EGL_DEVICE_ID before trying to import egl
# And we need to consider the case when we're performing distributed training
# from easyvolcap.engine import cfg, args # FIXME: GLOBAL IMPORTS
if 'easyvolcap.engine' in sys.modules and \
(sys.modules['easyvolcap.engine'].args.type != 'gui' or \
sys.modules['easyvolcap.engine'].cfg.viewer_cfg.type != 'VolumetricVideoViewer'): # FIXME: GLOBAL VARIABLES
try:
from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager
except Exception as e:
log(yellow(f'Could not import EGL related modules. {type(e).__name__}: {e}'))
os.environ['PYOPENGL_PLATFORM'] = ''
def is_wsl2():
"""Returns True if the current environment is WSL2, False otherwise."""
return exists("/etc/wsl.conf") and os.environ.get("WSL_DISTRO_NAME")
if is_wsl2():
os.environ['PYOPENGL_PLATFORM'] = 'glx'
import OpenGL.GL as gl
try:
from OpenGL.GL import shaders
except Exception as e:
print(f'WARNING: OpenGL shaders import error encountered, please install the latest PyOpenGL from github using:')
print(f'pip install git+https://github.com/mcfletch/pyopengl')
raise e
# fmt: on
def linearize_depth(d, n: float, f: float):
# 0-1 -> -1,1
# ndc -> view
return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n))
def common_opengl_options():
# Use program point size
gl.glEnable(gl.GL_PROGRAM_POINT_SIZE)
# Performs face culling
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_BACK)
# Performs alpha trans testing
# gl.glEnable(gl.GL_ALPHA_TEST)
try: gl.glEnable(gl.GL_ALPHA_TEST)
except gl.GLError as e: pass
# Performs z-buffer testing
gl.glEnable(gl.GL_DEPTH_TEST)
# gl.glDepthMask(gl.GL_TRUE)
gl.glDepthFunc(gl.GL_LEQUAL)
# gl.glDepthRange(-1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
# Enable some masking tests
gl.glEnable(gl.GL_SCISSOR_TEST)
# Enable this to correctly render points
# https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310
# gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW
try: gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW
except gl.GLError as e: pass
# gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW
# # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory.
# # The second argument specifies that our pixels will be in bytes.
# gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
def load_shader_source(file: str = 'splat.frag'):
# Ideally we can just specify the shader name instead of an variable
if not exists(file):
file = f'{dirname(__file__)}/shaders/{file}'
if not exists(file):
file = file.replace('shaders/', '')
if not exists(file):
raise RuntimeError(f'Shader file: {file} does not exist')
with open(file, 'r') as f:
return f.read()
def use_gl_program(program: Union[shaders.ShaderProgram, dict]):
if isinstance(program, dict):
# Recompile the program if the user supplied sources
program = dotdict(program)
program = shaders.compileProgram(
shaders.compileShader(program.VERT_SHADER_SRC, gl.GL_VERTEX_SHADER),
shaders.compileShader(program.FRAG_SHADER_SRC, gl.GL_FRAGMENT_SHADER)
)
return gl.glUseProgram(program)
class Mesh:
class RenderType(Enum):
POINTS = 1
LINES = 2
TRIS = 3
QUADS = 4 # TODO: Support quad loading
STRIPS = 5
# Helper class to render a mesh on opengl
# This implementation should only be used for debug visualization
# Since no differentiable mechanism will be added
# We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly
def __init__(self,
verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update
faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update
colors: torch.Tensor = None,
normals: torch.Tensor = None,
scalars: dotdict[str, torch.Tensor] = dotdict(),
render_type: RenderType = RenderType.TRIS,
# Misc info
name: str = 'mesh',
filename: str = '',
visible: bool = True,
# Render options
shade_flat: bool = False, # smooth shading
point_radius: float = 0.015,
render_normal: bool = False,
# Storage options
store_device: str = 'cpu',
compute_device: str = 'cuda',
vert_sizes=[3, 3, 3], # pos + color + norm
# Init options
est_normal_thresh: int = 100000,
# Ignore unused input
**kwargs,
) -> None:
super().__init__()
self.name = name
self.visible = visible
self.render_type = render_type
self.shade_flat = shade_flat
self.point_radius = point_radius
self.render_normal = render_normal
self.store_device = store_device
self.compute_device = compute_device
self.vert_sizes = vert_sizes
self.est_normal_thresh = est_normal_thresh
# Uniform and program
self.compile_shaders()
self.uniforms = dotdict() # uniform values
# Before initialization
self.max_verts = 0
self.max_faces = 0
# OpenGL data
if filename: self.load_from_file(filename)
else: self.load_from_data(verts, faces, colors, normals, scalars)
def compile_shaders(self):
try:
self.mesh_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER)
)
self.point_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
@property
def n_verts_bytes(self):
return len(self.verts) * self.vert_size * self.verts.element_size()
@property
def n_faces_bytes(self):
return len(self.faces) * self.face_size * self.faces.element_size()
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
@property
def faces_data(self): # a heavy copy operation
faces = self.faces.ravel().numpy() # N, 3
faces = np.asarray(faces, dtype=np.uint32, order='C')
return faces
@property
def face_size(self):
return self.render_type.value
@property
def vert_size(self):
return sum(self.vert_sizes)
def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'):
verts, faces, colors, normals, scalars = self.load_data_from_file(filename)
self.load_from_data(verts, faces, colors, normals, scalars)
def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'):
self.name = os.path.split(filename)[-1]
verts, faces, colors, normals, scalars = None, None, None, None, None
verts, faces = load_mesh(filename, device=self.store_device)
if not len(faces):
verts, colors, normals, scalars = load_pts(filename)
self.render_type = Mesh.RenderType.POINTS
else:
self.render_type = Mesh.RenderType(faces.shape[-1]) # use value
return verts, faces, colors, normals, scalars
def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()):
# Data type conversion
verts = torch.as_tensor(verts) # convert to tensor if input is of other types
if verts.dtype == torch.float32:
pass # supports this for now
elif verts.dtype == torch.float16:
pass # supports this for now
else:
verts = verts.type(torch.float) # convert to float32 if input is of higher precision
gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT
self.vert_gl_types = [gl_dtype] * len(self.vert_sizes)
# Prepare main mesh data: vertices and faces
self.verts = torch.as_tensor(verts, device=self.store_device)
self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support
# Prepare colors and normals
if colors is not None:
self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype)
else:
bounds = get_bounds(self.verts[None])[0]
self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0])
if normals is not None:
self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype)
else:
self.estimate_vertex_normals()
# Prepare other scalars
if scalars is not None:
for k, v in scalars.items():
setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok?
# Prepare OpenGL related buffer
self.update_gl_buffers()
def estimate_vertex_normals(self):
def est_pcd_norms():
if self.verts.dtype == torch.half:
self.normals = self.verts
else:
from pytorch3d.structures import Pointclouds, Meshes
pcd = Pointclouds([self.verts]).to(self.compute_device)
self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim
def est_tri_norms():
if self.verts.dtype == torch.half:
self.normals = self.verts
else:
from pytorch3d.structures import Pointclouds, Meshes
mesh = Meshes([self.verts], [self.faces]).to(self.compute_device)
self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim
if not len(self.verts) > self.est_normal_thresh:
if self.render_type == Mesh.RenderType.TRIS: est_tri_norms()
elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms()
else:
# log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping'))
self.normals = self.verts
else:
# log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation'))
self.normals = self.verts
def offscreen_render(self, eglctx: "eglContextManager", camera: Camera):
eglctx.resize(camera.W, camera.H)
self.render(camera)
def render(self, camera: Camera):
if not self.visible: return
# For point rendering
if self.render_type == Mesh.RenderType.POINTS:
gl.glUseProgram(self.point_program)
self.use_gl_program(self.point_program)
else:
gl.glUseProgram(self.mesh_program)
self.use_gl_program(self.mesh_program)
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
if self.render_type == Mesh.RenderType.POINTS:
gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices
elif self.render_type == Mesh.RenderType.LINES:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.TRIS:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.QUADS:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.STRIPS:
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))
else:
raise NotImplementedError
gl.glBindVertexArray(0)
def use_gl_program(self, program: shaders.ShaderProgram):
use_gl_program(program)
self.uniforms.shade_flat = gl.glGetUniformLocation(program, "shade_flat")
self.uniforms.point_radius = gl.glGetUniformLocation(program, "point_radius")
self.uniforms.render_normal = gl.glGetUniformLocation(program, "render_normal")
self.uniforms.H = gl.glGetUniformLocation(program, "H")
self.uniforms.W = gl.glGetUniformLocation(program, "W")
self.uniforms.n = gl.glGetUniformLocation(program, "n")
self.uniforms.f = gl.glGetUniformLocation(program, "f")
self.uniforms.P = gl.glGetUniformLocation(program, "P")
self.uniforms.K = gl.glGetUniformLocation(program, "K")
self.uniforms.V = gl.glGetUniformLocation(program, "V")
self.uniforms.M = gl.glGetUniformLocation(program, "M")
def upload_gl_uniforms(self, camera: Camera):
K = camera.gl_ixt # hold the reference
V = camera.gl_ext # hold the reference
M = glm.identity(mat4)
P = K * V * M
gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat)
gl.glUniform1f(self.uniforms.point_radius, self.point_radius)
gl.glUniform1i(self.uniforms.render_normal, self.render_normal)
gl.glUniform1i(self.uniforms.H, camera.H) # o2w
gl.glUniform1i(self.uniforms.W, camera.W) # o2w
gl.glUniform1f(self.uniforms.n, camera.n) # o2w
gl.glUniform1f(self.uniforms.f, camera.f) # o2w
gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip
gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip
gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c
gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w
def update_gl_buffers(self):
# Might be overwritten
self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0,
len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated
if hasattr(self, 'verts'):
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)
gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference
if hasattr(self, 'faces'):
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data)
def resize_buffers(self, v: int = 0, f: int = 0):
if v > self.max_verts or f > self.max_faces:
if v > self.max_verts: self.max_verts = v
if f > self.max_faces: self.max_faces = f
self.init_gl_buffers(v, f)
def init_gl_buffers(self, v: int = 0, f: int = 0):
# This will only init the corresponding buffer object
n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes
n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes
# Housekeeping
if hasattr(self, 'vao'):
gl.glDeleteVertexArrays(1, [self.vao])
gl.glDeleteBuffers(2, [self.vbo, self.ebo])
self.vao = gl.glGenVertexArrays(1)
self.vbo = gl.glGenBuffers(1)
self.ebo = gl.glGenBuffers(1)
gl.glBindVertexArray(self.vao)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)
gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work
# https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao
cumsum = 0
for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)):
gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float
gl.glEnableVertexAttribArray(i)
cumsum += s
if n_faces_bytes > 0:
# Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW)
gl.glBindVertexArray(0)
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import push_button_color, pop_button_color
i = batch.i
will_delete = batch.will_delete
slider_width = batch.slider_width
imgui.push_item_width(slider_width * 0.5)
mesh.name = imgui.input_text(f'Mesh name##{i}', mesh.name)[1]
if imgui.begin_combo(f'Mesh type##{i}', mesh.render_type.name):
for t in Mesh.RenderType:
if imgui.selectable(t.name, mesh.render_type == t)[1]:
mesh.render_type = t # construct enum from name
if mesh.render_type == t:
imgui.set_item_default_focus()
imgui.end_combo()
imgui.pop_item_width()
if hasattr(mesh, 'point_radius'):
mesh.point_radius = imgui.slider_float(f'Point radius##{i}', mesh.point_radius, 0.0005, 3.0)[1] # 0.1mm
if hasattr(mesh, 'pts_per_pix'):
mesh.pts_per_pix = imgui.slider_int('Point per pixel', mesh.pts_per_pix, 0, 60)[1] # 0.1mm
if hasattr(mesh, 'shade_flat'):
push_button_color(0x55cc33ff if not mesh.shade_flat else 0x8855aaff)
if imgui.button(f'Smooth##{i}' if not mesh.shade_flat else f' Flat ##{i}'):
mesh.shade_flat = not mesh.shade_flat
pop_button_color()
if hasattr(mesh, 'render_normal'):
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.render_normal else 0x8855aaff)
if imgui.button(f'Color ##{i}' if not mesh.render_normal else f'Normal##{i}'):
mesh.render_normal = not mesh.render_normal
pop_button_color()
if hasattr(mesh, 'visible'):
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.visible else 0x8855aaff)
if imgui.button(f'Show##{i}' if not mesh.visible else f'Hide##{i}'):
mesh.visible = not mesh.visible
pop_button_color()
# Render the delete button
imgui.same_line()
push_button_color(0xff5533ff)
if imgui.button(f'Delete##{i}'):
will_delete.append(i)
pop_button_color()
class Quad(Mesh):
# A shared texture for CUDA (pytorch) and OpenGL
# Could be rendererd to screen using blitting or just drawing a quad
def __init__(self,
H: int = 256, W: int = 256,
use_quad_draw: bool = True,
use_quad_cuda: bool = True,
compose: bool = False,
compose_power: float = 1.0,
): # the texture to blip
self.use_quad_draw = use_quad_draw
self.use_quad_cuda = use_quad_cuda
self.vert_sizes = [3] # only position
self.vert_gl_types = [gl.GL_FLOAT] # only position
self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type
self.max_verts, self.max_faces = 0, 0
self.verts = torch.as_tensor([[-1., -1., 0.5],
[1., -1., 0.5],
[-1., 1., 0.5],
[1., 1., 0.5],])
self.update_gl_buffers()
self.compile_shaders()
self.max_H, self.max_W = H, W
self.H, self.W = H, W
self.compose = compose
self.compose_power = compose_power
self.init_texture()
@property
def n_faces_bytes(self): return 0
def use_gl_program(self, program: shaders.ShaderProgram):
super().use_gl_program(program)
self.uniforms.tex = gl.glGetUniformLocation(program, 'tex')
gl.glUseProgram(self.quad_program) # use a different program
gl.glUniform1i(self.uniforms.tex, 0)
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers
self.H, self.W = H, W
if self.H > self.max_H or self.W > self.max_W: # max got updated
self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)
self.init_texture()
def init_texture(self):
if hasattr(self, 'cu_tex'):
from cuda import cudart
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex))
if hasattr(self, 'fbo'):
gl.glDeleteFramebuffers(1, [self.fbo])
gl.glDeleteTextures(1, [self.tex])
# Init the texture to be blit onto the screen
self.tex = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0))
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Init the framebuffer object if explicit blitting is used (slower than drawing quad)
self.fbo = gl.glGenFramebuffers(1)
old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo)
if self.use_quad_cuda:
from cuda import cudart
if self.compose:
# Both reading and writing of this resource is required
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone
else:
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
try:
self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags))
except RuntimeError as e:
log(red('Failed to initialize Quad with CUDA-GL interop, will use slow upload: '), e)
self.use_quad_cuda = False
def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
if not self.use_quad_cuda:
self.upload_to_texture(image)
return
if not hasattr(self, 'cu_tex'):
self.init_texture()
# assert self.use_quad_cuda, "Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad"
w = w or self.W
h = h or self.H
if image.shape[-1] == 3:
image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0))
if self.compose:
"""
Blit current framebuffer to this texture (self.tex)
Read content of this texture into a cuda buffer
Perform alpha blending based on the frame's alpha channel
Copy the blended image back into the texture (self.tex)
"""
old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0
gl.glBlitFramebuffer(x, y, w, h,
x, y, w, h,
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old)
buffer = torch.empty_like(image)
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst
w * 4 * buffer.element_size(), # dpitch
cu_tex_arr, # src
x * 4 * image.element_size(), # wOffset
y, # hOffset
w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes)
h, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
# cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]])
alpha = image[..., -1:] / 255
image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int
image[..., -1:] = buffer[..., -1:] + image[..., -1:]
image = image.clip(0, 255)
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr,
x * 4 * image.element_size(),
y,
image.data_ptr(),
w * 4 * image.element_size(), # differently sized
w * 4 * image.element_size(), # rgba, should do a composition first
h,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))
def upload_to_texture(self, ptr: np.ndarray, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
w = w or self.W
h = h or self.H
if isinstance(ptr, torch.Tensor):
ptr = ptr.detach().cpu().numpy() # slow sync and copy operation # MARK: SYNC
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, x, y, w, h, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[y:h, x:w]) # to gpu, might slow down?
@property
def verts_data(self): # a heavy copy operation
verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
def render(self, camera: Camera = None):
self.draw() # no uploading needed
def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
"""
Upload the texture instead of the camera
This respects the OpenGL convension of lower left corners
"""
if not self.use_quad_draw:
self.blit(x, y, w, h)
return
w = w or self.W
h = h or self.H
_, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(x, y, w, h)
gl.glScissor(x, y, w, h) # only render in this small region of the viewport
gl.glUseProgram(self.quad_program) # use a different program
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glBindVertexArray(self.vao)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))
gl.glBindVertexArray(0)
# Some house keepings
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H)
def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
"""
This respects the OpenGL convension of lower left corners
"""
w = w or self.W
h = h or self.H
old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0
gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped
x, y, x + w, y + h, # the height is flipped
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old)
class UQuad(Mesh):
"""
Responsible for initializing textures with a single value
or blitting a texture to a framebuffer (possibly better done with blit instead of quad drawing)
Effectively clearing the texture for real, see: https://stackoverflow.com/questions/37335281/is-glcleargl-color-buffer-bit-preferred-before-a-whole-frame-buffer-overwritte
"""
def __init__(self):
self.n_blit_values = 3
self.vert_sizes = [3] # only position
self.vert_gl_types = [gl.GL_FLOAT] # only position
self.max_verts, self.max_faces = 0, 0
self.verts = torch.as_tensor([[-1., -1., 0.5],
[1., -1., 0.5],
[-1., 1., 0.5],
[1., 1., 0.5],])
self.compile_shaders()
self.uniforms = dotdict() # uniform values
self.use_gl_programs(self.quad_program)
self.update_gl_buffers()
@property
def n_faces_bytes(self): return 0
@property
def verts_data(self): # a heavy copy operation
verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
def use_gl_programs(self, program: shaders.ShaderProgram):
for i in range(self.n_blit_values):
self.uniforms[f'value{i}'] = gl.glGetUniformLocation(program, f'value{i}')
for i in range(self.n_blit_values):
self.uniforms[f'use_tex{i}'] = gl.glGetUniformLocation(program, f'use_tex{i}')
gl.glUseProgram(self.program) # use a different program
for i in range(self.n_blit_values):
self.uniforms[f'tex{i}'] = gl.glGetUniformLocation(program, f'tex{i}')
gl.glUniform1i(self.uniforms[f'tex{i}'], i)
def upload_gl_uniforms(self, values: List[List[float]], use_texs: List[bool]):
for i, v in enumerate(values):
v = vec4(v) # HACK: Hold the reference for this upload
gl.glUniform4fv(self.uniforms[f'value{i}'], 1, glm.value_ptr(v)) # as float array
for i, v in enumerate(use_texs):
gl.glUniform1i(self.uniforms[f'use_tex{i}'], v)
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('uquad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('uquad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def draw(self, values: List[List[float]] = [], use_texs=[]):
"""
This function will render 'value' to the currently bound framebuffer, up to six outputs
"""
old_prog = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM)
old_vao = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING)
gl.glUseProgram(self.quad_program)
self.upload_gl_uniforms(values, use_texs) # should be a noop
# Prepare to render to textures
gl.glBindVertexArray(self.vao)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) # number of vertices
gl.glBindVertexArray(old_vao)
gl.glUseProgram(old_prog)
class DQuad(UQuad):
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('dquad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('dquad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def draw(self, values: List[List[float]] = [], use_texs=[]):
old_function = gl.glGetIntegerv(gl.GL_DEPTH_FUNC)
gl.glDepthFunc(gl.GL_ALWAYS)
super().draw(values, use_texs)
gl.glDepthFunc(old_function)
def hardware_rendering_framebuffer(H: int, W: int, gl_tex_dtype=gl.GL_RGBA16F):
# Prepare for write frame buffers
color_buffer = gl.glGenTextures(1)
depth_upper = gl.glGenTextures(1)
depth_lower = gl.glGenTextures(1)
depth_attach = gl.glGenTextures(1)
fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb
# Init the texture (call the resizing function), will simply allocate empty memory
# The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter).
gl.glBindTexture(gl.GL_TEXTURE_2D, color_buffer)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_tex_dtype, W, H, 0, gl.GL_RGBA, gl.GL_FLOAT, ctypes.c_void_p(0)) # 16 * 4
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_upper)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Bind texture to fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, color_buffer, 0) # location 0
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_upper, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT2, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0)
gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2])
# Check framebuffer status
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
log(red('Framebuffer not complete, exiting...'))
raise RuntimeError('Incomplete framebuffer')
# Restore the original state
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
return color_buffer, depth_upper, depth_lower, depth_attach, fbo
def hareward_peeling_framebuffer(H: int, W: int):
# Prepare for write frame buffers
index_buffer = gl.glGenTextures(1)
depth_lower = gl.glGenTextures(1)
depth_attach = gl.glGenTextures(1)
fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb
# Init the texture (call the resizing function), will simply allocate empty memory
# The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter).
gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Bind texture to fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0)
gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1])
# Check framebuffer status
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
log(red('Framebuffer not complete, exiting...'))
raise RuntimeError('Incomplete framebuffer')
# Restore the original state
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
return index_buffer, depth_lower, depth_attach, fbo
class Gaussian(Mesh):
def __init__(self,
filename: str = 'assets/meshes/zju3dv.npz',
gaussian_cfg: dotdict = dotdict(),
quad_cfg: dotdict = dotdict(),
view_depth: bool = False, # show depth or show color
dpt_cm: str = 'linear',
H: int = 1024,
W: int = 1024,
**kwargs,
):
# Import Gaussian Model
from easyvolcap.engine.registry import call_from_cfg
from easyvolcap.utils.gaussian_utils import GaussianModel
# Housekeeping
super().__init__(**kwargs)
self.name = split(filename)[-1]
# Init Gaussian related models, for now only the first gaussian model is supported
if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'):
# Load from GaussianTSampler
pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe)
pretrained = pretrained.model
state_dict = dotdict()
for k, v in pretrained.items():
if k.startswith('sampler.pcds.0'):
state_dict[k.replace('sampler.pcds.0.', '')] = v
# Load the parameters into the gaussian model
self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model
self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model
self.gaussian_model.cuda() # move the parameters to GPU
elif filename.endswith('.ply'):
# Load raw GaussianModel
# pts, rgb, norm, scalars = load_pts(filename)
self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model
self.gaussian_model.load_ply(filename) # load the original gaussian model
self.gaussian_model.cuda()
else:
raise NotImplementedError
# Init rendering quad
self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W)
# Other configurations
self.view_depth = view_depth
self.dpt_cm = dpt_cm
del self.shade_flat
del self.point_radius
del self.render_normal
# Disabling initialization
def load_from_file(self, *args, **kwargs):
pass
def load_from_data(self, *args, **kwargs):
pass
def compile_shaders(self):
pass
def update_gl_buffers(self):
pass
def resize_textures(self, H: int, W: int):
self.quad.resize_textures(H, W)
# The actual rendering function
@torch.no_grad()
def render(self, camera: Camera):
# Perform actual gaussian rendering
batch = add_batch(to_cuda(camera.to_batch()))
rgb, acc, dpt = self.gaussian_model.render(batch)
if self.view_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
else:
rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4
# Copy rendered tensor to screen
rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform
self.quad.copy_to_texture(rgba)
self.quad.render()
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
super().render_imgui(viewer, batch)
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import push_button_color, pop_button_color
i = batch.i
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.view_depth else 0x8855aaff)
if imgui.button(f'Color##{i}' if not mesh.view_depth else f' Depth ##{i}'):
mesh.view_depth = not mesh.view_depth
pop_button_color()
class PointSplat(Gaussian, nn.Module):
def __init__(self,
filename: str = 'assets/meshes/zju3dv.ply',
quad_cfg: dotdict = dotdict(),
view_depth: bool = False, # show depth or show color
dpt_cm: str = 'linear',
H: int = 1024,
W: int = 1024,
**kwargs,
):
# Import Gaussian Model
from easyvolcap.engine.registry import call_from_cfg
from easyvolcap.utils.data_utils import load_pts
from easyvolcap.utils.net_utils import make_buffer
from easyvolcap.models.samplers.gaussiant_sampler import GaussianTSampler
# Housekeeping
super(Gaussian, self).__init__(**kwargs)
self.name = split(filename)[-1]
self.render_radius = MethodType(GaussianTSampler.render_radius, self) # override the method
# Init PointSplat related models, for now only the first gaussian model is supported
if filename.endswith('.ply'):
# Load raw GaussianModel
pts, rgb, norms, scalars = load_pts(filename)
occ, rad = scalars.alpha, scalars.radius
self.pts = make_buffer(torch.from_numpy(pts)) # N, 3
self.rgb = make_buffer(torch.from_numpy(rgb)) # N, 3
self.occ = make_buffer(torch.from_numpy(occ)) # N, 1
self.rad = make_buffer(torch.from_numpy(rad)) # N, 1
else:
raise NotImplementedError
# Init rendering quad
self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W)
self.cuda() # move to cuda
# Other configurations
self.view_depth = view_depth
self.dpt_cm = dpt_cm
self.radius_mult = 1.0
self.alpha_mult = 1.0
# The actual rendering function
@torch.no_grad()
def render(self, camera: Camera):
# Perform actual gaussian rendering
batch = add_batch(to_cuda(camera.to_batch()))
sh0 = rgb2sh0(self.rgb[..., None])
xyz = self.pts
occ = (self.occ * self.alpha_mult).clip(0, 1)
rad = self.rad * self.radius_mult
rgb, acc, dpt = self.render_radius(*add_batch([xyz, sh0, rad, occ]), batch)
rgb, acc, dpt = rgb[0], acc[0], dpt[0]
if self.view_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
else:
rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4
# Copy rendered tensor to screen
rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform
self.quad.copy_to_texture(rgba)
self.quad.render()
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
super().render_imgui(viewer, batch)
i = batch.i
from imgui_bundle import imgui
mesh.radius_mult = imgui.slider_float(f'Point radius multiplier##{i}', mesh.radius_mult, 0.1, 3.0)[1] # 0.1mm
mesh.alpha_mult = imgui.slider_float(f'Point alpha multiplier##{i}', mesh.alpha_mult, 0.1, 3.0)[1] # 0.1mm
class Splat(Mesh): # FIXME: Not rendering, need to debug this
def __init__(self,
*args,
H: int = 512,
W: int = 512,
tex_dtype: str = torch.half,
pts_per_pix: int = 24, # render less for the static background since we're only doing a demo
blit_last_ratio: float = 0.0,
volume_rendering: bool = True,
radii_mult_volume: float = 1.00, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better
radii_mult_solid: float = 0.85, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better
point_smooth: bool = True,
alpha_blending: bool = True,
**kwargs):
kwargs = dotdict(kwargs)
kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1])
self.tex_dtype = getattr(torch, tex_dtype) if isinstance(tex_dtype, str) else tex_dtype
self.gl_tex_dtype = gl.GL_RGBA16F if self.tex_dtype == torch.half else gl.GL_RGBA32F
super().__init__(*args, **kwargs)
self.use_gl_program(self.splat_program)
self.pts_per_pix = pts_per_pix
self.blit_last_ratio = blit_last_ratio
self.volume_rendering = volume_rendering
self.radii_mult_volume = radii_mult_volume
self.radii_mult_solid = radii_mult_solid
self.point_smooth = point_smooth
self.alpha_blending = alpha_blending
self.max_H, self.max_W = H, W
self.H, self.W = H, W
self.init_textures()
from easyvolcap.models.samplers.gaussiant_sampler import GaussianTSampler
self.render_radius = MethodType(GaussianTSampler.render_radius, self) # override the method
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C') # this should only be invoked once
return verts
def use_gl_program(self, program: shaders.ShaderProgram):
super().use_gl_program(program)
# Special controlling variables
self.uniforms.alpha_blending = gl.glGetUniformLocation(program, f'alpha_blending')
self.uniforms.point_smooth = gl.glGetUniformLocation(program, f'point_smooth')
self.uniforms.radii_mult = gl.glGetUniformLocation(program, f'radii_mult')
# Special rendering variables
self.uniforms.pass_index = gl.glGetUniformLocation(program, f'pass_index')
self.uniforms.read_color = gl.glGetUniformLocation(program, f'read_color')
self.uniforms.read_upper = gl.glGetUniformLocation(program, f'read_upper')
self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower')
gl.glUniform1i(self.uniforms.read_color, 0)
gl.glUniform1i(self.uniforms.read_upper, 1)
gl.glUniform1i(self.uniforms.read_lower, 2)
def compile_shaders(self):
try:
self.splat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('splat.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('splat.frag'), gl.GL_FRAGMENT_SHADER)
)
self.usplat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('usplat.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('usplat.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def rasterize(self, camera: Camera = None, length: int = None):
if self.volume_rendering:
return self.rasterize_volume(camera, length)
else:
return self.rasterize_solid(camera, length)
def rasterize_volume(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera
"""
Let's try to analyze what's happening here
We want to:
1. Render the front-most color to color buffer
2. UNUSED: Render the front-most depth + some large margin to a depth upper limit buffer
3. Render the front-most depth + some small margin to a depth lower limit buffer
4. Switch between the render target and sampling target
5. Use the previous rendered color, depth upper limit and lower limit as textures
6. When current depth is smaller than the lower limit, we've already rendered this in the first pass, discard
7. UNUSED: When current depth is larger than the upper limit, it will probabily not contribute much to final results, discard
8. UNUSED: When the accumulated opacity reaches almost 1, subsequent rendering would not have much effect, return directly
9. When the point coordinates falls out of bound of the current sphere, dicard (this could be optimized with finutining in rectangle)
10. Finally, try to render the final color using the volume rendering equation (by accumulating alpha values from front to back)
Required cleanup checklist:
1. Before rendering the first pass, we need to clear the color and depth texture, this is not done, need to check multi-frame accumulation on this
2. Before rendering next pass, it's also recommended to blit color and depth values from previous pass to avoid assign them in the shader
"""
front_fbo, front_color, front_upper, front_lower = self.read_fbo, self.read_color, self.read_upper, self.read_lower
back_fbo, back_color, back_upper, back_lower = self.write_fbo, self.write_color, self.write_upper, self.write_lower
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0])
# gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9])
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0])
# gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9])
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.splat_program) # TODO: Implement this with a mapping and a lazy modification
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
# The actual multi pass rendering process happens here
for pass_index in range(self.pts_per_pix):
# Swap buffers to render the next pass
front_fbo, front_color, front_upper, front_lower, back_fbo, back_color, back_upper, back_lower = \
back_fbo, back_color, back_upper, back_lower, front_fbo, front_color, front_upper, front_lower
# Bind the read texture and bind the write render frame buffer
gl.glBindTextures(0, 3, [front_color, front_upper, front_lower])
# Move content from write_fbo to screen fbo
if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo)
for i in range(3):
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + i)
gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + i)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2])
# Clear depth buffer for depth testing
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
gl.glUniform1i(self.uniforms.pass_index, pass_index) # pass index
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return back_fbo
def upload_gl_uniforms(self, camera: Camera):
super().upload_gl_uniforms(camera)
gl.glUniform1i(self.uniforms.point_smooth, self.point_smooth)
gl.glUniform1i(self.uniforms.alpha_blending, self.alpha_blending)
if self.volume_rendering:
gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_volume) # radii mult
else:
gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_solid) # radii mult
def rasterize_solid(self, camera: Camera = None, length: int = None):
# Only clear the output once
back_fbo = self.write_fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0]) # color
# gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) # depth upper
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0]) # depth lower
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.usplat_program)
self.upload_gl_uniforms(camera)
gl.glUniform1i(self.uniforms.pass_index, 0) # pass index
gl.glBindVertexArray(self.vao)
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return back_fbo
def show(self, back_fbo: int):
# Move content from write_fbo to screen fbo
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, back_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, 0) # render the final content onto screen
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
def render(self, camera):
if not self.visible: return
self.show(self.rasterize(camera))
def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers
self.H, self.W = H, W
if self.H > self.max_H or self.W > self.max_W: # max got updated
self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)
self.init_textures()
def init_textures(self):
if hasattr(self, 'write_fbo'):
gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo])
gl.glDeleteTextures(8, [self.write_color, self.write_upper, self.write_lower, self.write_attach, self.read_color, self.read_upper, self.read_lower, self.read_attach])
self.write_color, self.write_upper, self.write_lower, self.write_attach, self.write_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype)
self.read_color, self.read_upper, self.read_lower, self.read_attach, self.read_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype)
log(f'Created texture of h, w: {self.max_H}, {self.max_W}')
class HardwareRendering(Splat):
def __init__(self,
dtype=torch.half,
**kwargs,
):
self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype
self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT
kwargs = dotdict(kwargs)
kwargs.blit_last_ratio = kwargs.get('blit_last_ratio', 0.90)
kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1])
super().__init__(**kwargs) # verts, color, radius, alpha
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once
return verts
def init_gl_buffers(self, v: int = 0, f: int = 0):
from cuda import cudart
if hasattr(self, 'cu_vbo'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo))
super().init_gl_buffers(v, f)
# Register vertex buffer obejct
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
try:
self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags))
except RuntimeError as e:
log(red(f'Your system does not support CUDA-GL interop, please use pytorch3d\'s implementation instead'))
log(red(f'This can be done by specifying {blue("model_cfg.sampler_cfg.use_cudagl=False model_cfg.sampler_cfg.use_diffgl=False")} at the end of your command'))
log(red(f'Note that this implementation is extremely slow, we recommend running on a native system that support the interop'))
# raise RuntimeError(str(e) + ": This unrecoverable, please read the error message above")
raise e
def init_textures(self):
from cuda import cudart
if hasattr(self, 'cu_read_color'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_color))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_color))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower))
super().init_textures()
# Register image to read from
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly
self.cu_read_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_color, gl.GL_TEXTURE_2D, flags))
self.cu_write_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_color, gl.GL_TEXTURE_2D, flags))
self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags))
self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags))
def forward(self, xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor, batch: dotdict):
"""
Renders a 3D point cloud using OpenGL and returns the rendered RGB image, accumulated alpha image, and depth map.
Args:
xyz (torch.Tensor): A tensor of shape (B, N, 3) containing the 3D coordinates of the points.
rgb (torch.Tensor): A tensor of shape (B, N, 3) containing the RGB color values of the points.
rad (torch.Tensor): A tensor of shape (B, N, 1) containing the radii of the points.
batch (dotdict): A dictionary containing the camera parameters and other metadata for the batch.
Returns:
A tuple containing the rendered RGB image, accumulated alpha image, and depth map, all as torch.Tensors.
The RGB image has shape (1, H, W, 3), the alpha image has shape (1, H, W, 1), and the depth map has shape (1, H, W, 1).
The method first resizes the OpenGL texture to match the height and width of the output image. It then sets the OpenGL viewport and scissor to only render in the region of the viewport specified by the output image size.
It concatenates the `xyz`, `rgb`, and `rad` tensors along the last dimension and flattens the result into a 1D tensor.
The method then uploads the input data to OpenGL for rendering and performs depth peeling using OpenGL. The method uploads the camera parameters to OpenGL and renders the point cloud, saving the output buffer to the `back_fbo` attribute of the class.
Finally, the method copies the rendered image and depth back to the CPU as torch.Tensors and reshapes them to match the output image size. The RGB image is returned with shape (1, H, W, 3), the accumulated alpha image is returned with shape (1, H, W, 1), and the depth map is returned with shape (1, H, W, 1).
"""
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
# !: BATCH
H, W = batch.meta.H[0].item(), batch.meta.W[0].item()
self.resize_textures(H, W) # maybe resize the texture
self.resize_buffers(xyz.shape[1]) # maybe resize the buffer
_, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H) # only render in this small region of the viewport
# Prepare for input data
data = torch.cat([xyz, rgb, rad, occ], dim=-1).type(self.dtype).ravel()
# Upload to opengl for rendering
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo))
assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side'
CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr,
data.data_ptr(),
data.numel() * data.element_size(),
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
# Perform rasterization (depth peeling using OpenGL)
if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish
back_fbo = self.rasterize(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo
# Copy rendered image and depth back as tensor
cu_tex = self.cu_write_color if back_fbo == self.write_fbo else self.cu_read_color # double buffered depth peeling
cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling
# Prepare the output # !: BATCH
rgb_map = torch.empty((H, W, 4), dtype=self.tex_dtype, device='cuda') # to hold the data from opengl
dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl
# The resources in resources may be accessed by CUDA until they are unmapped.
# The graphics API from which resources were registered should not access any resources while they are mapped by CUDA.
# If an application does so, the results are undefined.
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0))
cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0))
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(rgb_map.data_ptr(), # dst
W * 4 * rgb_map.element_size(), # dpitch
cu_tex_arr, # src
0, # wOffset
0, # hOffset
W * 4 * rgb_map.element_size(), # width Width of matrix transfer (columns in bytes)
H, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(),
W * 1 * dpt_map.element_size(),
cu_dpt_arr,
0,
0,
W * 1 * dpt_map.element_size(),
H,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
# Ouput reshaping
rgb_map, dpt_map = rgb_map[None].flip(1), dpt_map[None].flip(1)
rgb_map, acc_map = rgb_map[..., :3], rgb_map[..., 3:]
dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map)
# Some house keepings
gl.glViewport(0, 0, old_W, old_H)
gl.glScissor(0, 0, old_W, old_H)
return rgb_map, acc_map, dpt_map
class HardwarePeeling(Splat):
def __init__(self,
dtype=torch.float,
**kwargs):
self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype
self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT
super().__init__(**kwargs,
blit_last_ratio=-10.0,
vert_sizes=[3, 1],
) # verts, radius, index
# from pytorch3d.renderer import AlphaCompositor
# self.compositor = AlphaCompositor() # this the key to convergence, this is differentiable
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.radius], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once
return verts
def init_gl_buffers(self, v: int = 0, f: int = 0):
from cuda import cudart
if hasattr(self, 'cu_vbo'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo))
super().init_gl_buffers(v, f)
# Register vertex buffer obejct
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags))\
def use_gl_program(self, program):
super().use_gl_program(program)
gl.glUseProgram(self.splat_program) # use a different program
self.uniforms.read_index = gl.glGetUniformLocation(program, f'read_index')
self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower')
gl.glUniform1i(self.uniforms.read_index, 0)
gl.glUniform1i(self.uniforms.read_lower, 1)
def upload_gl_uniforms(self, camera: Camera):
super().upload_gl_uniforms(camera)
def compile_shaders(self):
try:
self.splat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('idx_splat.vert'), gl.GL_VERTEX_SHADER), # use the pass through quad shader
shaders.compileShader(load_shader_source('idx_splat.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def init_textures(self):
from cuda import cudart
if hasattr(self, 'cu_read_index'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_index))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_index))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower))
if hasattr(self, 'write_fbo'):
gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo])
gl.glDeleteTextures(6, [self.write_index, self.write_lower, self.write_attach, self.read_index, self.read_lower, self.read_attach])
self.write_index, self.write_lower, self.write_attach, self.write_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W)
self.read_index, self.read_lower, self.read_attach, self.read_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W)
# Register image to read from
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly
self.cu_read_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_index, gl.GL_TEXTURE_2D, flags))
self.cu_write_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_index, gl.GL_TEXTURE_2D, flags))
self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags))
self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags))
log(f'Created texture of h, w: {self.max_H}, {self.max_W}')
def rasterize_generator(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera
front_fbo, front_index, front_lower = self.read_fbo, self.read_index, self.read_lower
back_fbo, back_index, back_lower = self.write_fbo, self.write_index, self.write_lower
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1])
gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1])
gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.splat_program)
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
# The actual multi pass rendering process happens here
for pass_index in range(self.pts_per_pix):
# Swap buffers to render the next pass
front_fbo, front_index, front_lower, back_fbo, back_index, back_lower = \
back_fbo, back_index, back_lower, front_fbo, front_index, front_lower
# Bind the read texture and bind the write render frame buffer
gl.glBindTextures(0, 2, [front_index, front_lower])
# Move content from write_fbo to screen fbo
if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo)
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + 1)
gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + 1)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1])
else:
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
# Clear depth buffer for depth testing
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) # clear the indices buffer for later rendering and retrieving
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
yield back_fbo # give the CUDA end a chance to read from this frame buffer after rendering
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return
def forward(self,
xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor,
batch: dotdict,
return_frags: bool = False,
return_full: bool = False,
):
"""
Get all indices from the depth peeling passes
Compute the vertex weight here in torch(cuda)
Use the indices to pass through a compositor
The backward pass should only be valid on the torch side, and it should've been enough
TODO: This function is too memory intensive
TODO: Performing IBR is too memory intensive
"""
# This the slow part, but not differentiable
idx, _, _ = self.forward_idx(xyz, rad, batch) # B, H, W, K
msk = idx != -1 # B, H, W, K
idx = torch.where(msk, idx, 0).long()
# Sample things needed for computing screen space weight
H, W, K, R, T, C = get_opencv_camera_params(batch)
K, R, T, C = K.to(xyz.dtype), R.to(xyz.dtype), T.to(xyz.dtype), C.to(xyz.dtype)
pix_xyz = (xyz @ R.mT + T.mT) @ K.mT # B, P, 3
pix_xyz_xy = pix_xyz[..., :-1] / (pix_xyz[..., -1:] + 1e-10)
pix_rad = abs(K[..., 1, 1][..., None] * rad[..., 0] / (pix_xyz[..., -1] + 1e-10)) # z: B, 1 * B, N, world space radius
mean_xy = multi_gather(pix_xyz_xy, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, 2) # B, HWK, 2 -> B, H, W, K, 2
xy = create_meshgrid(H, W, idx.device, dtype=xyz.dtype).flip(-1)[None].expand(idx.shape[0], H, W, 2) # create screen space xy (opencv)
dists = (xy[..., None, :] - mean_xy).pow(2).sum(-1) # B, H, W, K
# Point values
dpt = (xyz - C.mT).norm(dim=-1, keepdim=True) # B, N, 1
pix_occ = multi_gather(occ, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape)
pix_rad = multi_gather(pix_rad, idx.view(idx.shape[0], -1), dim=-1).view(*idx.shape) # -> B, H, W, K
pix_occ = pix_occ * (1 - dists / (pix_rad * pix_rad + 1e-10)) # B, H, W, K
pix_occ = pix_occ.clip(0, 1)
pix_occ = torch.where(msk, pix_occ, 0)
if return_frags:
return idx, pix_occ # B, H, W, K
# The actual computation
rgb = torch.cat([rgb, occ, dpt], dim=-1) # B, N, 3 + C
pix_rgb = multi_gather(rgb, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, rgb.shape[-1]) # B, H, W, K, -1
_, rgb, _ = volume_rendering(pix_rgb, pix_occ[..., None]) # B, H, W, -1
rgb, acc, dpt = rgb[..., :-2], rgb[..., -2:-1], rgb[..., -1:]
dpt = dpt + (1 - acc) * dpt.max() # only for the looks (rendered depth are already premultiplied)
if return_full:
return rgb, acc, dpt, idx, pix_occ
else:
return rgb, acc, dpt
def forward_idx(self, xyz: torch.Tensor, rad: torch.Tensor, batch: dotdict):
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
# !: BATCH
H, W = batch.meta.H[0].item(), batch.meta.W[0].item()
self.resize_textures(H, W) # maybe resize the texture
self.resize_buffers(xyz.shape[1]) # maybe resize the buffer
_, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H) # only render in this small region of the viewport
# Prepare for input data
data = torch.cat([xyz, rad], dim=-1).type(self.dtype).ravel()
# Upload to opengl for rendering
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo))
assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side'
CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr,
data.data_ptr(),
data.numel() * data.element_size(),
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
# Perform rasterization (depth peeling using OpenGL)
if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish
# FIXME: Strange bug occurs if batch parameter is passed in directly for the construction of Camera(batch=batch.meta)
gen = self.rasterize_generator(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo
ind_maps = []
dpt_maps = []
acc_maps = []
for back_fbo in gen:
# Copy rendered image and depth back as tensor
cu_tex = self.cu_write_index if back_fbo == self.write_fbo else self.cu_read_index # double buffered depth peeling
cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling
# Prepare the output # !: BATCH
ind_map = torch.empty((H, W, 1), dtype=torch.int, device='cuda') # to hold the data from opengl
dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl
# The resources in resources may be accessed by CUDA until they are unmapped.
# The graphics API from which resources were registered should not access any resources while they are mapped by CUDA.
# If an application does so, the results are undefined.
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0))
cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0))
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(ind_map.data_ptr(), # dst
W * ind_map.shape[-1] * ind_map.element_size(), # dpitch
cu_tex_arr, # src
0, # wOffset
0, # hOffset
W * ind_map.shape[-1] * ind_map.element_size(), # width Width of matrix transfer (columns in bytes)
H, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(),
W * dpt_map.shape[-1] * dpt_map.element_size(),
cu_dpt_arr,
0,
0,
W * dpt_map.shape[-1] * dpt_map.element_size(),
H,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
# Ouput reshaping
ind_map, dpt_map = ind_map[None].flip(1), dpt_map[None].flip(1)
acc_map = ind_map != -1
dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map)
ind_maps.append(ind_map)
acc_maps.append(acc_map)
dpt_maps.append(dpt_map)
ind_map = torch.cat(ind_maps, dim=-1) # B, H, W, K
acc_map = torch.cat(acc_maps, dim=-1) # B, H, W, K
dpt_map = torch.cat(dpt_maps, dim=-1) # B, H, W, K
# Some house keepings
gl.glViewport(0, 0, old_W, old_H)
gl.glScissor(0, 0, old_W, old_H)
return ind_map, acc_map, dpt_map
|
evocodebench_data_79
|
# Feature Cloud Sequence utilities
# This files builds the components for the feature cloud sequence sampler
import torch
from typing import List, Dict, Union
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.raster_utils import get_ndc_perspective_matrix
from easyvolcap.utils.chunk_utils import multi_gather, multi_scatter
from easyvolcap.utils.math_utils import normalize_sum, affine_inverse, affine_padding
from easyvolcap.utils.net_utils import MLP
def estimate_occupancy_field(xyz: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor):
# This method builds a function to evaluate the occupancy field of the point cloud density field
# We sample the point cloud with a ball query for the largest radius in the set
# The actual alpha is decreased as the distance to the closest points
# If multiple points fall into the region of interest, we compute for alpha on all of them and performs a add operation
from pytorch3d.ops import ball_query
max_rad = rad.max()
# B, N, 3
# B, N, 1
# B, N, 1
def field(pts: torch.Tensor, K=10):
# pts: B, P, 3
sh = pts.shape
pts = pts.view(pts.shape[0], -1, 3)
knn = ball_query(pts, xyz, K=K, radius=max_rad, return_nn=False)
idx, dists = knn.idx, knn.dists # B, P, K
msk = idx != -1
idx = torch.where(msk, idx, 0).long()
pix_rad = multi_gather(rad[..., 0], idx.view(idx.shape[0], -1), dim=-1).view(idx.shape) # B, P, K
pix_occ = multi_gather(occ[..., 0], idx.view(idx.shape[0], -1), dim=-1).view(idx.shape) # B, P, K
pix_occ = pix_occ * (1 - dists / (pix_rad * pix_rad)) # B, P, K
pix_occ = torch.where(msk, pix_occ, 0)
pix_occ = pix_occ.clip(0, 1)
pix_occ = pix_occ.sum(dim=-1, keepdim=True) # B, P, 1
return pix_occ.view(*sh[:-1], 1)
return field
# @torch.jit.script
def prepare_feedback_transform(H: int, W: int, K: torch.Tensor, R: torch.Tensor, T: torch.Tensor,
n: torch.Tensor,
f: torch.Tensor,
xyz: torch.Tensor,
rgb: torch.Tensor,
rad: torch.Tensor):
ixt = get_ndc_perspective_matrix(K, H, W, n[..., 0], f[..., 0]).to(xyz.dtype) # to opengl, remove last dim of n and f
w2c = affine_padding(torch.cat([R, T], dim=-1)).to(xyz.dtype)
c2w = affine_inverse(w2c)
c2w[..., 0] *= 1 # flip x
c2w[..., 1] *= -1 # flip y
c2w[..., 2] *= -1 # flip z
ext = affine_inverse(c2w)
pix_xyz = torch.cat([xyz, torch.ones_like(xyz[..., :1])], dim=-1) @ ext.mT @ ixt.mT
pix_rad = abs(H * ixt[..., 1, 1][..., None, None] * rad / pix_xyz[..., -1:]) # z: B, 1 * B, N, world space radius -> ndc radius B, N, 1
# Prepare data to be rendered
data = torch.cat([pix_xyz, rgb, pix_rad], dim=-1).ravel() # organize the data inside vbo
return data
def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
"""
Converts rotation matrices to 6D rotation representation by Zhou et al. [1]
by dropping the last row. Note that 6D representation is not unique.
Args:
matrix: batch of rotation matrices of size (*, 3, 3)
Returns:
6D rotation representation, of size (*, 6)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
batch_dim = matrix.size()[:-2]
return matrix[..., :2, :].clone().reshape(batch_dim + (6,))
@run_once
def warn_once_about_pulsar_fxfy():
log(yellow(
"Pulsar only supports a single focal lengths. For converting OpenCV "
"focal lengths, we average them for x and y directions. "
"The focal lengths for x and y you provided differ by more than 1%, "
"which means this could introduce a noticeable error."
))
def get_pulsar_camera_params(
R: torch.Tensor,
tvec: torch.Tensor,
camera_matrix: torch.Tensor,
image_size: torch.Tensor,
znear: float = 0.1,
) -> torch.Tensor:
assert len(camera_matrix.size()) == 3, "This function requires batched inputs!"
assert len(R.size()) == 3, "This function requires batched inputs!"
assert len(tvec.size()) in (2, 3), "This function reuqires batched inputs!"
# Validate parameters.
image_size_wh = image_size.to(R).flip(dims=(1,))
assert torch.all(
image_size_wh > 0
), "height and width must be positive but min is: %s" % (
str(image_size_wh.min().item())
)
assert (
camera_matrix.size(1) == 3 and camera_matrix.size(2) == 3
), "Incorrect camera matrix shape: expected 3x3 but got %dx%d" % (
camera_matrix.size(1),
camera_matrix.size(2),
)
assert (
R.size(1) == 3 and R.size(2) == 3
), "Incorrect R shape: expected 3x3 but got %dx%d" % (
R.size(1),
R.size(2),
)
if len(tvec.size()) == 2:
tvec = tvec.unsqueeze(2)
assert (
tvec.size(1) == 3 and tvec.size(2) == 1
), "Incorrect tvec shape: expected 3x1 but got %dx%d" % (
tvec.size(1),
tvec.size(2),
)
# Check batch size.
batch_size = camera_matrix.size(0)
assert R.size(0) == batch_size, "Expected R to have batch size %d. Has size %d." % (
batch_size,
R.size(0),
)
assert (
tvec.size(0) == batch_size
), "Expected tvec to have batch size %d. Has size %d." % (
batch_size,
tvec.size(0),
)
# Check image sizes.
image_w = image_size_wh[0, 0]
image_h = image_size_wh[0, 1]
assert torch.all(
image_size_wh[:, 0] == image_w
), "All images in a batch must have the same width!"
assert torch.all(
image_size_wh[:, 1] == image_h
), "All images in a batch must have the same height!"
# Focal length.
fx = camera_matrix[:, 0, 0].unsqueeze(1)
fy = camera_matrix[:, 1, 1].unsqueeze(1)
# Check that we introduce less than 1% error by averaging the focal lengths.
fx_y = fx / fy
if torch.any(fx_y > 1.01) or torch.any(fx_y < 0.99):
warn_once_about_pulsar_fxfy()
f = (fx + fy) / 2
# Normalize f into normalized device coordinates.
focal_length_px = f / image_w
# Transfer into focal_length and sensor_width.
# NOTE: Using torch.tensor instead of torch.as_tensor will cause cpu gpu sync
focal_length = torch.as_tensor([znear - 1e-5], dtype=torch.float32, device=R.device)
focal_length = focal_length[None, :].repeat(batch_size, 1)
sensor_width = focal_length / focal_length_px
# Principal point.
cx = camera_matrix[:, 0, 2].unsqueeze(1)
cy = camera_matrix[:, 1, 2].unsqueeze(1)
# Transfer principal point offset into centered offset.
cx = -(cx - image_w / 2)
cy = cy - image_h / 2
# Concatenate to final vector.
param = torch.cat([focal_length, sensor_width, cx, cy], dim=1)
R_trans = R.permute(0, 2, 1)
cam_pos = -torch.bmm(R_trans, tvec).squeeze(2)
cam_rot = matrix_to_rotation_6d(R_trans)
cam_params = torch.cat([cam_pos, cam_rot, param], dim=1)
return cam_params
def get_opencv_camera_params(batch: dotdict):
H = batch.meta.H[0].item() # !: BATCH
W = batch.meta.W[0].item() # !: BATCH
K = batch.K
R = batch.R
T = batch.T
C = -batch.R.mT @ batch.T # B, 3, 1
return H, W, K, R, T, C
def get_pytorch3d_camera_params(batch: dotdict):
# Extract pytorc3d camera parameters from batch input
# R and T are applied on the right (requires a transposed R from OpenCV camera format)
# Coordinate system is different from that of OpenCV (cv: right down front, 3d: left up front)
# However, the correction has to be down on both T and R... (instead of just R)
C = -batch.R.mT @ batch.T # B, 3, 1
R = batch.R.clone()
R[..., 0, :] *= -1 # flip x row
R[..., 1, :] *= -1 # flip y row
T = (-R @ C)[..., 0] # c2w back to w2c
R = R.mT # applied left (left multiply to right multiply, god knows why...)
H = batch.meta.H[0].item() # !: BATCH
W = batch.meta.W[0].item() # !: BATCH
K = get_pytorch3d_ndc_K(batch.K, H, W)
return H, W, K, R, T, C
# TODO: Remove pcd_t and with_t semantics, this is a legacy API
def voxel_surface_down_sample(pcd: torch.Tensor, pcd_t: torch.Tensor = None, voxel_size: float = 0.01, dist_th: float = 0.025, n_points: int = 65536):
# !: BATCH
# TODO: Use number of vertices for good estimation
import open3d as o3d
import numpy as np
import mcubes
from easyvolcap.utils.sample_utils import point_mesh_distance
from pytorch3d.ops import knn_points, ball_query, sample_farthest_points
# Convert torch tensor to Open3D PointCloud
o3d_pcd = o3d.geometry.PointCloud()
o3d_pcd.points = o3d.utility.Vector3dVector(pcd.view(-1, 3).detach().cpu().numpy())
# Create VoxelGrid from PointCloud
o3d_vox = o3d.geometry.VoxelGrid.create_from_point_cloud(o3d_pcd, voxel_size=voxel_size)
# Extract dense grid from VoxelGrid using get_voxel
voxels = o3d_vox.get_voxels()
max_index = np.array([vox.grid_index for vox in voxels]).max(axis=0) # !: for-loop
dense_grid = np.zeros((max_index[0] + 1, max_index[1] + 1, max_index[2] + 1))
for vox in voxels: # !: for-loop
dense_grid[vox.grid_index[0], vox.grid_index[1], vox.grid_index[2]] = 1
# Use marching cubes to obtain mesh from dense grid
vertices, triangles = mcubes.marching_cubes(dense_grid, 0.5)
vertices = vertices * voxel_size + o3d_vox.origin # resizing
# Convert mesh data to torch tensors
triangles_torch = torch.as_tensor(vertices[triangles], device=pcd.device, dtype=pcd.dtype).float()
# Calculate distances using point_mesh_distance
dists, _ = point_mesh_distance(pcd[0], triangles_torch)
# Select points based on distances
valid = (dists < dist_th).nonzero()[..., 0]
while (len(valid) - n_points) / n_points > 0.005:
# There are too many valid points, should control its number
ratio = len(valid) / len(pcd[0]) # the ratio of valid points
n_expected = int(n_points / ratio) # the expected number of points before surface sampling
pcd = random(pcd, n_points=n_expected)
# Calculate distances using point_mesh_distance
dists, _ = point_mesh_distance(pcd[0], triangles_torch)
# Select points based on distances
valid = (dists < dist_th).nonzero()[..., 0]
_, valid = dists.topk(n_points, dim=-1, sorted=False, largest=False)
pcd_new = torch.index_select(pcd[0], 0, valid)[None]
return pcd_new
def filter_bounds(pcd: torch.Tensor, pcd_t: torch.Tensor = None, bounds: torch.Tensor = None):
valid = ((pcd - bounds[..., 0, :]) > 0).all(dim=-1) & ((pcd - bounds[..., 1, :]) < 0).all(dim=-1) # mask: B, N
valid = valid[0].nonzero()[None] # B, S -> B, V # MARK: SYNC
pcd = multi_gather(pcd, valid, dim=-2)
return pcd
def duplicate(pcd: torch.Tensor, pcd_t: torch.Tensor = None, std: float = 0.005 * 0.1):
# return pcd.repeat_interleave(2, dim=-2), ind.repeat_interleave(2, dim=-2)
pcd_new = torch.normal(pcd, std=std)
return torch.cat([pcd, pcd_new], dim=-2)
def farthest(pcd: torch.Tensor, pcd_t: torch.Tensor = None, lengths: torch.Tensor = None, n_points: int = 65536):
from pytorch3d.ops import knn_points, ball_query, sample_farthest_points
idx = sample_farthest_points(pcd, lengths, K=n_points)[1] # N, K (padded)
return multi_gather(pcd, idx)
def random(pcd: torch.Tensor, pcd_t: torch.Tensor = None, n_points: int = 65536, std: float = 0.001):
inds = torch.stack([torch.randperm(pcd.shape[-2], device=pcd.device)[:n_points] for b in range(len(pcd))]) # B, S,
return multi_gather(pcd, inds)
def voxel_down_sample(pcd: torch.Tensor, pcd_t: torch.Tensor = None, voxel_size=0.005):
import open3d as o3d
o3d_pcd = o3d.geometry.PointCloud()
o3d_pcd.points = o3d.utility.Vector3dVector(pcd.view(-1, 3).detach().cpu().numpy())
o3d_pcd = o3d_pcd.voxel_down_sample(voxel_size)
return torch.as_tensor(np.array(o3d_pcd.points)).to(pcd.device, pcd.dtype, non_blocking=True).view(pcd.shape[0], -1, 3)
def remove_outlier(pcd: torch.Tensor, pcd_t: torch.Tensor = None, K: int = 20, std_ratio=2.0, return_inds=False): # !: BATCH
import open3d as o3d
o3d_pcd = o3d.geometry.PointCloud()
o3d_pcd.points = o3d.utility.Vector3dVector(pcd.view(-1, 3).detach().cpu().numpy())
cl, ind = o3d_pcd.remove_statistical_outlier(nb_neighbors=K, std_ratio=std_ratio)
if return_inds:
return torch.as_tensor(np.array(ind), device=pcd.device)[None] # N,
return torch.as_tensor(np.array(o3d_pcd.points)[np.array(ind)]).to(pcd.device, pcd.dtype, non_blocking=True).view(pcd.shape[0], -1, 3)
def farthest_down_sample(pcd: torch.Tensor, pcd_t: torch.Tensor = None, K: int = 65536):
import open3d as o3d
o3d_pcd = o3d.geometry.PointCloud()
o3d_pcd.points = o3d.utility.Vector3dVector(pcd.view(-1, 3).detach().cpu().numpy())
o3d_pcd = o3d_pcd.farthest_point_down_sample(K)
return torch.as_tensor(np.array(o3d_pcd.points)).to(pcd.device, pcd.dtype, non_blocking=True).view(pcd.shape[0], -1, 3)
def sample_random_points(pcd: torch.Tensor, pcd_t: torch.Tensor = None, K: int = 500):
bounds = torch.stack([pcd.min(dim=-2)[0] - 0.033, pcd.max(dim=-2)[0] + 0.033], dim=-2) # B, 2, 3
pts = torch.rand(*pcd.shape[:-2], K, 3, device=pcd.device) * (bounds[..., 1:, :] - bounds[..., :1, :]) + bounds[..., :1, :]
return pts
def sample_filter_random_points(pcd: torch.Tensor, pcd_t: torch.Tensor = None, K: int = 500, update_radius=0.05, filter_K=10):
pts = sample_random_points(pcd, pcd_t, K) # ugly interface
pts = filter_points(pts, pcd, update_radius, filter_K)
return pts
def get_pytorch3d_ndc_K(K: torch.Tensor, H: int, W: int):
M = min(H, W)
K = torch.cat([K, torch.zeros_like(K[..., -1:, :])], dim=-2)
K = torch.cat([K, torch.zeros_like(K[..., :, -1:])], dim=-1)
K[..., 3, 2] = 1 # ...? # HACK: pytorch3d magic
K[..., 2, 2] = 0 # ...? # HACK: pytorch3d magic
K[..., 2, 3] = 1 # ...? # HACK: pytorch3d magic
K[..., 0, 1] = 0
K[..., 1, 0] = 0
K[..., 2, 0] = 0
K[..., 2, 1] = 0
# return K
K[..., 0, 0] = K[..., 0, 0] * 2.0 / M # fx
K[..., 1, 1] = K[..., 1, 1] * 2.0 / M # fy
K[..., 0, 2] = -(K[..., 0, 2] - W / 2.0) * 2.0 / M # px
K[..., 1, 2] = -(K[..., 1, 2] - H / 2.0) * 2.0 / M # py
return K
def expand_points_features(render_scale: Union[float, int], pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float):
# FIXME: Duplicated code for these
n_points = pcd_old.shape[-2]
if isinstance(render_scale, int):
target_n_points = render_scale
n_points = pcd_old.shape[-2]
render_scale = target_n_points / n_points
target_n_points = int(render_scale * n_points)
return generate_points_features(target_n_points, pcd_old, ind_old, radius)
def expand_points(render_scale: Union[float, int], pcd_old: torch.Tensor, radius: float):
n_points = pcd_old.shape[-2]
if isinstance(render_scale, int):
target_n_points = render_scale
n_points = pcd_old.shape[-2]
render_scale = target_n_points / n_points
target_n_points = int(render_scale * n_points)
return generate_points(target_n_points, pcd_old, radius)
def generate_points_features(n_points: int, pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float):
pcd_new = sample_random_points(pcd_old, K=n_points)
pcd_new, ind_new = update_points_features(pcd_new, pcd_old, ind_old, radius)
return pcd_new, ind_new
def generate_points(n_points: int, pcd_old: torch.Tensor, radius: float):
pcd_new = sample_random_points(pcd_old, K=n_points)
pcd_new = update_points(pcd_new, pcd_old, radius)
return pcd_new
def surface_points(pcd: torch.Tensor, pcd_t: torch.Tensor = None, radius: float = 0.05, K: int = 500, n_points: float = 16384):
# Try to retain the surface points
from pytorch3d.ops import knn_points, ball_query
# 1. Perform a ball query (with a large upper limit number of points)
# 2. Sort all points based on the number of neighbors
close = ball_query(pcd, pcd, radius=radius, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
dists = torch.where(idx == -1, torch.inf, 0.1) # B, S, K, equal weight, just for filtering
idx = torch.where(idx == -1, 0, idx) # B, S, K
# Find mean points
B, S, C = pcd.shape
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = multi_gather(pcd, idx.view(B, S * K)).view(B, S, K, -1)
pcd_new = (pcd_new * weights).sum(dim=-2) # B, S, 3
# Find mean deviation
dists = (pcd_new - pcd).norm(dim=-1) # B, S,
valid = (dists).topk(n_points, dim=-1, sorted=False)[1] # B, K
pcd_new = multi_gather(pcd, valid, dim=-2)
return pcd_new
def surface_points_features(pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float = 0.05, K: int = 500, n_points: float = 16384):
# Try to retain the surface points
from pytorch3d.ops import knn_points, ball_query
# 1. Perform a ball query (with a large upper limit number of points)
# 2. Sort all points based on the number of neighbors
close = ball_query(pcd_old, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
dists = torch.where(idx == -1, torch.inf, 0.1) # B, S, K, equal weight, just for filtering
idx = torch.where(idx == -1, 0, idx) # B, S, K
# Find mean points
B, S, C = pcd_old.shape
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = multi_gather(pcd_old, idx.view(B, S * K)).view(B, S, K, -1)
pcd_new = (pcd_new * weights).sum(dim=-2) # B, S, 3
# Find mean deviation
dists = (pcd_new - pcd_old).norm(dim=-1) # B, S,
valid = (dists).topk(n_points, dim=-1, sorted=False)[1] # B, K
pcd_new = multi_gather(pcd_old, valid, dim=-2)
ind_new = multi_gather(ind_old, valid, dim=-2)
return pcd_new, ind_new
def filter_points(pcd_new: torch.Tensor, pcd_old: torch.Tensor, radius: float = 0.05, K: int = 10, fill_ratio: float = 0.1):
# This will lead to shrinking
from pytorch3d.ops import knn_points, ball_query
close = ball_query(pcd_new, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
# !: BATCH
good = (idx != -1).sum(dim=-1) / K > fill_ratio
valid = good[0].nonzero()[None] # B, S -> B, V # MARK: SYNC
idx = multi_gather(idx, valid, dim=-2)
dists = multi_gather(dists, valid, dim=-2)
pcd_new = multi_gather(pcd_new, valid, dim=-2)
dists = torch.where(idx == -1, torch.inf, dists) # B, S, K
idx = torch.where(idx == -1, 0, idx) # B, S, K
B, S, C = pcd_new.shape
B, N, C = pcd_old.shape
pcd_new = multi_gather(pcd_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, 3
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = (pcd_new * weights).sum(dim=-2)
return pcd_new
def filter_points_features(pcd_new: torch.Tensor, pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float = 0.05, K: int = 10, fill_ratio: float = 0.1):
# This will lead to shrinking
from pytorch3d.ops import knn_points, ball_query
close = ball_query(pcd_new, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
# !: BATCH
good = (idx != -1).sum(dim=-1) / K > fill_ratio
valid = good[0].nonzero()[None] # B, S -> B, V # MARK: SYNC
idx = multi_gather(idx, valid, dim=-2)
dists = multi_gather(dists, valid, dim=-2)
pcd_new = multi_gather(pcd_new, valid, dim=-2)
dists = torch.where(idx == -1, torch.inf, dists) # B, S, K
idx = torch.where(idx == -1, 0, idx) # B, S, K
B, S, C = pcd_new.shape
B, N, C = pcd_old.shape
pcd_new = multi_gather(pcd_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, 3
ind_new = multi_gather(ind_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, C
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = (pcd_new * weights).sum(dim=-2)
ind_new = (ind_new * weights).sum(dim=-2)
# pcd_new = pcd_new.mean(dim=-2)
# ind_new = ind_new.mean(dim=-2)
return pcd_new, ind_new
def update_points_features(pcd_new: torch.Tensor, pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float = 0.05, K: int = 5):
# This will lead to shrinking
from pytorch3d.ops import knn_points, ball_query
# close = ball_query(pcd_new, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
close = knn_points(pcd_new, pcd_old, return_sorted=False, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
B, S, C = pcd_new.shape
B, N, C = pcd_old.shape
pcd_new = multi_gather(pcd_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, 3
ind_new = multi_gather(ind_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, C
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = (pcd_new * weights).sum(dim=-2)
ind_new = (ind_new * weights).sum(dim=-2)
# pcd_new = pcd_new.mean(dim=-2)
# ind_new = ind_new.mean(dim=-2)
return pcd_new, ind_new
def update_points(pcd_new: torch.Tensor, pcd_old: torch.Tensor, radius: float = 0.05, K: int = 5):
# This will lead to shrinking
from pytorch3d.ops import knn_points, ball_query
# close = ball_query(pcd_new, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
close = knn_points(pcd_new, pcd_old, return_sorted=False, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
B, S, C = pcd_new.shape
B, N, C = pcd_old.shape
pcd_new = multi_gather(pcd_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, 3
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = (pcd_new * weights).sum(dim=-2)
# pcd_new = pcd_new.mean(dim=-2)
return pcd_new
def update_features(pcd_new: torch.Tensor, pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float = 0.05, K: int = 5):
# This will lead to shrinking
from pytorch3d.ops import knn_points, ball_query
# close = ball_query(pcd_new, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
close = knn_points(pcd_new, pcd_old, return_sorted=False, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
B, S, C = pcd_new.shape
B, N, C = pcd_old.shape
ind_new = multi_gather(ind_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, C
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
ind_new = (ind_new * weights).sum(dim=-2)
# ind_new = ind_new.mean(dim=-2)
return ind_new
def weight_function(d2: torch.Tensor, radius: float = 0.05, delta: float = 0.001):
# Radius weighted function from structured local radiance field
weights = (-d2 / (2 * radius ** 2)).exp().clip(0) # B, S, K
weights = normalize_sum(weights)
return weights
|
evocodebench_data_80
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager # must be imported before OpenGL.GL
from easyvolcap.runners.volumetric_video_viewer import VolumetricVideoViewer
import os
import sys
import glm
import torch
import ctypes
import numpy as np
from torch import nn
from enum import Enum, auto
from types import MethodType
from typing import Dict, Union, List
from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.viewer_utils import Camera
from easyvolcap.utils.bound_utils import get_bounds
from easyvolcap.utils.chunk_utils import multi_gather
from easyvolcap.utils.color_utils import cm_cpu_store
from easyvolcap.utils.ray_utils import create_meshgrid
from easyvolcap.utils.depth_utils import depth_curve_fn
from easyvolcap.utils.gaussian_utils import rgb2sh0, sh02rgb
from easyvolcap.utils.nerf_utils import volume_rendering, raw2alpha
from easyvolcap.utils.data_utils import load_pts, load_mesh, to_cuda, add_batch
from easyvolcap.utils.cuda_utils import CHECK_CUDART_ERROR, FORMAT_CUDART_ERROR
from easyvolcap.utils.net_utils import typed, torch_dtype_to_numpy_dtype, load_pretrained
from easyvolcap.utils.fcds_utils import prepare_feedback_transform, get_opencv_camera_params
# fmt: off
# Environment variable messaging
# Need to export EGL_DEVICE_ID before trying to import egl
# And we need to consider the case when we're performing distributed training
# from easyvolcap.engine import cfg, args # FIXME: GLOBAL IMPORTS
if 'easyvolcap.engine' in sys.modules and \
(sys.modules['easyvolcap.engine'].args.type != 'gui' or \
sys.modules['easyvolcap.engine'].cfg.viewer_cfg.type != 'VolumetricVideoViewer'): # FIXME: GLOBAL VARIABLES
try:
from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager
except Exception as e:
log(yellow(f'Could not import EGL related modules. {type(e).__name__}: {e}'))
os.environ['PYOPENGL_PLATFORM'] = ''
def is_wsl2():
"""Returns True if the current environment is WSL2, False otherwise."""
return exists("/etc/wsl.conf") and os.environ.get("WSL_DISTRO_NAME")
if is_wsl2():
os.environ['PYOPENGL_PLATFORM'] = 'glx'
import OpenGL.GL as gl
try:
from OpenGL.GL import shaders
except Exception as e:
print(f'WARNING: OpenGL shaders import error encountered, please install the latest PyOpenGL from github using:')
print(f'pip install git+https://github.com/mcfletch/pyopengl')
raise e
# fmt: on
def linearize_depth(d, n: float, f: float):
# 0-1 -> -1,1
# ndc -> view
return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n))
def common_opengl_options():
# Use program point size
gl.glEnable(gl.GL_PROGRAM_POINT_SIZE)
# Performs face culling
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_BACK)
# Performs alpha trans testing
# gl.glEnable(gl.GL_ALPHA_TEST)
try: gl.glEnable(gl.GL_ALPHA_TEST)
except gl.GLError as e: pass
# Performs z-buffer testing
gl.glEnable(gl.GL_DEPTH_TEST)
# gl.glDepthMask(gl.GL_TRUE)
gl.glDepthFunc(gl.GL_LEQUAL)
# gl.glDepthRange(-1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
# Enable some masking tests
gl.glEnable(gl.GL_SCISSOR_TEST)
# Enable this to correctly render points
# https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310
# gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW
try: gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW
except gl.GLError as e: pass
# gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW
# # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory.
# # The second argument specifies that our pixels will be in bytes.
# gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
def load_shader_source(file: str = 'splat.frag'):
# Ideally we can just specify the shader name instead of an variable
if not exists(file):
file = f'{dirname(__file__)}/shaders/{file}'
if not exists(file):
file = file.replace('shaders/', '')
if not exists(file):
raise RuntimeError(f'Shader file: {file} does not exist')
with open(file, 'r') as f:
return f.read()
def use_gl_program(program: Union[shaders.ShaderProgram, dict]):
if isinstance(program, dict):
# Recompile the program if the user supplied sources
program = dotdict(program)
program = shaders.compileProgram(
shaders.compileShader(program.VERT_SHADER_SRC, gl.GL_VERTEX_SHADER),
shaders.compileShader(program.FRAG_SHADER_SRC, gl.GL_FRAGMENT_SHADER)
)
return gl.glUseProgram(program)
class Mesh:
class RenderType(Enum):
POINTS = 1
LINES = 2
TRIS = 3
QUADS = 4 # TODO: Support quad loading
STRIPS = 5
# Helper class to render a mesh on opengl
# This implementation should only be used for debug visualization
# Since no differentiable mechanism will be added
# We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly
def __init__(self,
verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update
faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update
colors: torch.Tensor = None,
normals: torch.Tensor = None,
scalars: dotdict[str, torch.Tensor] = dotdict(),
render_type: RenderType = RenderType.TRIS,
# Misc info
name: str = 'mesh',
filename: str = '',
visible: bool = True,
# Render options
shade_flat: bool = False, # smooth shading
point_radius: float = 0.015,
render_normal: bool = False,
# Storage options
store_device: str = 'cpu',
compute_device: str = 'cuda',
vert_sizes=[3, 3, 3], # pos + color + norm
# Init options
est_normal_thresh: int = 100000,
# Ignore unused input
**kwargs,
) -> None:
super().__init__()
self.name = name
self.visible = visible
self.render_type = render_type
self.shade_flat = shade_flat
self.point_radius = point_radius
self.render_normal = render_normal
self.store_device = store_device
self.compute_device = compute_device
self.vert_sizes = vert_sizes
self.est_normal_thresh = est_normal_thresh
# Uniform and program
self.compile_shaders()
self.uniforms = dotdict() # uniform values
# Before initialization
self.max_verts = 0
self.max_faces = 0
# OpenGL data
if filename: self.load_from_file(filename)
else: self.load_from_data(verts, faces, colors, normals, scalars)
def compile_shaders(self):
try:
self.mesh_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER)
)
self.point_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
@property
def n_verts_bytes(self):
return len(self.verts) * self.vert_size * self.verts.element_size()
@property
def n_faces_bytes(self):
return len(self.faces) * self.face_size * self.faces.element_size()
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
@property
def faces_data(self): # a heavy copy operation
faces = self.faces.ravel().numpy() # N, 3
faces = np.asarray(faces, dtype=np.uint32, order='C')
return faces
@property
def face_size(self):
return self.render_type.value
@property
def vert_size(self):
return sum(self.vert_sizes)
def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'):
verts, faces, colors, normals, scalars = self.load_data_from_file(filename)
self.load_from_data(verts, faces, colors, normals, scalars)
def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'):
self.name = os.path.split(filename)[-1]
verts, faces, colors, normals, scalars = None, None, None, None, None
verts, faces = load_mesh(filename, device=self.store_device)
if not len(faces):
verts, colors, normals, scalars = load_pts(filename)
self.render_type = Mesh.RenderType.POINTS
else:
self.render_type = Mesh.RenderType(faces.shape[-1]) # use value
return verts, faces, colors, normals, scalars
def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()):
# Data type conversion
verts = torch.as_tensor(verts) # convert to tensor if input is of other types
if verts.dtype == torch.float32:
pass # supports this for now
elif verts.dtype == torch.float16:
pass # supports this for now
else:
verts = verts.type(torch.float) # convert to float32 if input is of higher precision
gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT
self.vert_gl_types = [gl_dtype] * len(self.vert_sizes)
# Prepare main mesh data: vertices and faces
self.verts = torch.as_tensor(verts, device=self.store_device)
self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support
# Prepare colors and normals
if colors is not None:
self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype)
else:
bounds = get_bounds(self.verts[None])[0]
self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0])
if normals is not None:
self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype)
else:
self.estimate_vertex_normals()
# Prepare other scalars
if scalars is not None:
for k, v in scalars.items():
setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok?
# Prepare OpenGL related buffer
self.update_gl_buffers()
def estimate_vertex_normals(self):
def est_pcd_norms():
if self.verts.dtype == torch.half:
self.normals = self.verts
else:
from pytorch3d.structures import Pointclouds, Meshes
pcd = Pointclouds([self.verts]).to(self.compute_device)
self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim
def est_tri_norms():
if self.verts.dtype == torch.half:
self.normals = self.verts
else:
from pytorch3d.structures import Pointclouds, Meshes
mesh = Meshes([self.verts], [self.faces]).to(self.compute_device)
self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim
if not len(self.verts) > self.est_normal_thresh:
if self.render_type == Mesh.RenderType.TRIS: est_tri_norms()
elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms()
else:
# log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping'))
self.normals = self.verts
else:
# log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation'))
self.normals = self.verts
def offscreen_render(self, eglctx: "eglContextManager", camera: Camera):
eglctx.resize(camera.W, camera.H)
self.render(camera)
def render(self, camera: Camera):
if not self.visible: return
# For point rendering
if self.render_type == Mesh.RenderType.POINTS:
gl.glUseProgram(self.point_program)
self.use_gl_program(self.point_program)
else:
gl.glUseProgram(self.mesh_program)
self.use_gl_program(self.mesh_program)
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
if self.render_type == Mesh.RenderType.POINTS:
gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices
elif self.render_type == Mesh.RenderType.LINES:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.TRIS:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.QUADS:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.STRIPS:
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))
else:
raise NotImplementedError
gl.glBindVertexArray(0)
def use_gl_program(self, program: shaders.ShaderProgram):
use_gl_program(program)
self.uniforms.shade_flat = gl.glGetUniformLocation(program, "shade_flat")
self.uniforms.point_radius = gl.glGetUniformLocation(program, "point_radius")
self.uniforms.render_normal = gl.glGetUniformLocation(program, "render_normal")
self.uniforms.H = gl.glGetUniformLocation(program, "H")
self.uniforms.W = gl.glGetUniformLocation(program, "W")
self.uniforms.n = gl.glGetUniformLocation(program, "n")
self.uniforms.f = gl.glGetUniformLocation(program, "f")
self.uniforms.P = gl.glGetUniformLocation(program, "P")
self.uniforms.K = gl.glGetUniformLocation(program, "K")
self.uniforms.V = gl.glGetUniformLocation(program, "V")
self.uniforms.M = gl.glGetUniformLocation(program, "M")
def upload_gl_uniforms(self, camera: Camera):
K = camera.gl_ixt # hold the reference
V = camera.gl_ext # hold the reference
M = glm.identity(mat4)
P = K * V * M
gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat)
gl.glUniform1f(self.uniforms.point_radius, self.point_radius)
gl.glUniform1i(self.uniforms.render_normal, self.render_normal)
gl.glUniform1i(self.uniforms.H, camera.H) # o2w
gl.glUniform1i(self.uniforms.W, camera.W) # o2w
gl.glUniform1f(self.uniforms.n, camera.n) # o2w
gl.glUniform1f(self.uniforms.f, camera.f) # o2w
gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip
gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip
gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c
gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w
def update_gl_buffers(self):
# Might be overwritten
self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0,
len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated
if hasattr(self, 'verts'):
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)
gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference
if hasattr(self, 'faces'):
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data)
def resize_buffers(self, v: int = 0, f: int = 0):
if v > self.max_verts or f > self.max_faces:
if v > self.max_verts: self.max_verts = v
if f > self.max_faces: self.max_faces = f
self.init_gl_buffers(v, f)
def init_gl_buffers(self, v: int = 0, f: int = 0):
# This will only init the corresponding buffer object
n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes
n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes
# Housekeeping
if hasattr(self, 'vao'):
gl.glDeleteVertexArrays(1, [self.vao])
gl.glDeleteBuffers(2, [self.vbo, self.ebo])
self.vao = gl.glGenVertexArrays(1)
self.vbo = gl.glGenBuffers(1)
self.ebo = gl.glGenBuffers(1)
gl.glBindVertexArray(self.vao)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)
gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work
# https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao
cumsum = 0
for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)):
gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float
gl.glEnableVertexAttribArray(i)
cumsum += s
if n_faces_bytes > 0:
# Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW)
gl.glBindVertexArray(0)
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import push_button_color, pop_button_color
i = batch.i
will_delete = batch.will_delete
slider_width = batch.slider_width
imgui.push_item_width(slider_width * 0.5)
mesh.name = imgui.input_text(f'Mesh name##{i}', mesh.name)[1]
if imgui.begin_combo(f'Mesh type##{i}', mesh.render_type.name):
for t in Mesh.RenderType:
if imgui.selectable(t.name, mesh.render_type == t)[1]:
mesh.render_type = t # construct enum from name
if mesh.render_type == t:
imgui.set_item_default_focus()
imgui.end_combo()
imgui.pop_item_width()
if hasattr(mesh, 'point_radius'):
mesh.point_radius = imgui.slider_float(f'Point radius##{i}', mesh.point_radius, 0.0005, 3.0)[1] # 0.1mm
if hasattr(mesh, 'pts_per_pix'):
mesh.pts_per_pix = imgui.slider_int('Point per pixel', mesh.pts_per_pix, 0, 60)[1] # 0.1mm
if hasattr(mesh, 'shade_flat'):
push_button_color(0x55cc33ff if not mesh.shade_flat else 0x8855aaff)
if imgui.button(f'Smooth##{i}' if not mesh.shade_flat else f' Flat ##{i}'):
mesh.shade_flat = not mesh.shade_flat
pop_button_color()
if hasattr(mesh, 'render_normal'):
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.render_normal else 0x8855aaff)
if imgui.button(f'Color ##{i}' if not mesh.render_normal else f'Normal##{i}'):
mesh.render_normal = not mesh.render_normal
pop_button_color()
if hasattr(mesh, 'visible'):
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.visible else 0x8855aaff)
if imgui.button(f'Show##{i}' if not mesh.visible else f'Hide##{i}'):
mesh.visible = not mesh.visible
pop_button_color()
# Render the delete button
imgui.same_line()
push_button_color(0xff5533ff)
if imgui.button(f'Delete##{i}'):
will_delete.append(i)
pop_button_color()
class Quad(Mesh):
# A shared texture for CUDA (pytorch) and OpenGL
# Could be rendererd to screen using blitting or just drawing a quad
def __init__(self,
H: int = 256, W: int = 256,
use_quad_draw: bool = True,
use_quad_cuda: bool = True,
compose: bool = False,
compose_power: float = 1.0,
): # the texture to blip
self.use_quad_draw = use_quad_draw
self.use_quad_cuda = use_quad_cuda
self.vert_sizes = [3] # only position
self.vert_gl_types = [gl.GL_FLOAT] # only position
self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type
self.max_verts, self.max_faces = 0, 0
self.verts = torch.as_tensor([[-1., -1., 0.5],
[1., -1., 0.5],
[-1., 1., 0.5],
[1., 1., 0.5],])
self.update_gl_buffers()
self.compile_shaders()
self.max_H, self.max_W = H, W
self.H, self.W = H, W
self.compose = compose
self.compose_power = compose_power
self.init_texture()
@property
def n_faces_bytes(self): return 0
def use_gl_program(self, program: shaders.ShaderProgram):
super().use_gl_program(program)
self.uniforms.tex = gl.glGetUniformLocation(program, 'tex')
gl.glUseProgram(self.quad_program) # use a different program
gl.glUniform1i(self.uniforms.tex, 0)
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers
self.H, self.W = H, W
if self.H > self.max_H or self.W > self.max_W: # max got updated
self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)
self.init_texture()
def init_texture(self):
if hasattr(self, 'cu_tex'):
from cuda import cudart
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex))
if hasattr(self, 'fbo'):
gl.glDeleteFramebuffers(1, [self.fbo])
gl.glDeleteTextures(1, [self.tex])
# Init the texture to be blit onto the screen
self.tex = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0))
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Init the framebuffer object if explicit blitting is used (slower than drawing quad)
self.fbo = gl.glGenFramebuffers(1)
old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo)
if self.use_quad_cuda:
from cuda import cudart
if self.compose:
# Both reading and writing of this resource is required
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone
else:
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
try:
self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags))
except RuntimeError as e:
log(red('Failed to initialize Quad with CUDA-GL interop, will use slow upload: '), e)
self.use_quad_cuda = False
def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
if not self.use_quad_cuda:
self.upload_to_texture(image)
return
if not hasattr(self, 'cu_tex'):
self.init_texture()
# assert self.use_quad_cuda, "Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad"
w = w or self.W
h = h or self.H
if image.shape[-1] == 3:
image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0))
if self.compose:
"""
Blit current framebuffer to this texture (self.tex)
Read content of this texture into a cuda buffer
Perform alpha blending based on the frame's alpha channel
Copy the blended image back into the texture (self.tex)
"""
old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0
gl.glBlitFramebuffer(x, y, w, h,
x, y, w, h,
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old)
buffer = torch.empty_like(image)
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst
w * 4 * buffer.element_size(), # dpitch
cu_tex_arr, # src
x * 4 * image.element_size(), # wOffset
y, # hOffset
w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes)
h, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
# cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]])
alpha = image[..., -1:] / 255
image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int
image[..., -1:] = buffer[..., -1:] + image[..., -1:]
image = image.clip(0, 255)
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr,
x * 4 * image.element_size(),
y,
image.data_ptr(),
w * 4 * image.element_size(), # differently sized
w * 4 * image.element_size(), # rgba, should do a composition first
h,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))
def upload_to_texture(self, ptr: np.ndarray, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
w = w or self.W
h = h or self.H
if isinstance(ptr, torch.Tensor):
ptr = ptr.detach().cpu().numpy() # slow sync and copy operation # MARK: SYNC
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, x, y, w, h, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[y:h, x:w]) # to gpu, might slow down?
@property
def verts_data(self): # a heavy copy operation
verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
def render(self, camera: Camera = None):
self.draw() # no uploading needed
def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
"""
Upload the texture instead of the camera
This respects the OpenGL convension of lower left corners
"""
if not self.use_quad_draw:
self.blit(x, y, w, h)
return
w = w or self.W
h = h or self.H
_, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(x, y, w, h)
gl.glScissor(x, y, w, h) # only render in this small region of the viewport
gl.glUseProgram(self.quad_program) # use a different program
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glBindVertexArray(self.vao)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))
gl.glBindVertexArray(0)
# Some house keepings
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H)
def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
"""
This respects the OpenGL convension of lower left corners
"""
w = w or self.W
h = h or self.H
old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0
gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped
x, y, x + w, y + h, # the height is flipped
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old)
class UQuad(Mesh):
"""
Responsible for initializing textures with a single value
or blitting a texture to a framebuffer (possibly better done with blit instead of quad drawing)
Effectively clearing the texture for real, see: https://stackoverflow.com/questions/37335281/is-glcleargl-color-buffer-bit-preferred-before-a-whole-frame-buffer-overwritte
"""
def __init__(self):
self.n_blit_values = 3
self.vert_sizes = [3] # only position
self.vert_gl_types = [gl.GL_FLOAT] # only position
self.max_verts, self.max_faces = 0, 0
self.verts = torch.as_tensor([[-1., -1., 0.5],
[1., -1., 0.5],
[-1., 1., 0.5],
[1., 1., 0.5],])
self.compile_shaders()
self.uniforms = dotdict() # uniform values
self.use_gl_programs(self.quad_program)
self.update_gl_buffers()
@property
def n_faces_bytes(self): return 0
@property
def verts_data(self): # a heavy copy operation
verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
def use_gl_programs(self, program: shaders.ShaderProgram):
for i in range(self.n_blit_values):
self.uniforms[f'value{i}'] = gl.glGetUniformLocation(program, f'value{i}')
for i in range(self.n_blit_values):
self.uniforms[f'use_tex{i}'] = gl.glGetUniformLocation(program, f'use_tex{i}')
gl.glUseProgram(self.program) # use a different program
for i in range(self.n_blit_values):
self.uniforms[f'tex{i}'] = gl.glGetUniformLocation(program, f'tex{i}')
gl.glUniform1i(self.uniforms[f'tex{i}'], i)
def upload_gl_uniforms(self, values: List[List[float]], use_texs: List[bool]):
for i, v in enumerate(values):
v = vec4(v) # HACK: Hold the reference for this upload
gl.glUniform4fv(self.uniforms[f'value{i}'], 1, glm.value_ptr(v)) # as float array
for i, v in enumerate(use_texs):
gl.glUniform1i(self.uniforms[f'use_tex{i}'], v)
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('uquad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('uquad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def draw(self, values: List[List[float]] = [], use_texs=[]):
"""
This function will render 'value' to the currently bound framebuffer, up to six outputs
"""
old_prog = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM)
old_vao = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING)
gl.glUseProgram(self.quad_program)
self.upload_gl_uniforms(values, use_texs) # should be a noop
# Prepare to render to textures
gl.glBindVertexArray(self.vao)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) # number of vertices
gl.glBindVertexArray(old_vao)
gl.glUseProgram(old_prog)
class DQuad(UQuad):
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('dquad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('dquad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def draw(self, values: List[List[float]] = [], use_texs=[]):
old_function = gl.glGetIntegerv(gl.GL_DEPTH_FUNC)
gl.glDepthFunc(gl.GL_ALWAYS)
super().draw(values, use_texs)
gl.glDepthFunc(old_function)
def hardware_rendering_framebuffer(H: int, W: int, gl_tex_dtype=gl.GL_RGBA16F):
# Prepare for write frame buffers
color_buffer = gl.glGenTextures(1)
depth_upper = gl.glGenTextures(1)
depth_lower = gl.glGenTextures(1)
depth_attach = gl.glGenTextures(1)
fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb
# Init the texture (call the resizing function), will simply allocate empty memory
# The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter).
gl.glBindTexture(gl.GL_TEXTURE_2D, color_buffer)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_tex_dtype, W, H, 0, gl.GL_RGBA, gl.GL_FLOAT, ctypes.c_void_p(0)) # 16 * 4
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_upper)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Bind texture to fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, color_buffer, 0) # location 0
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_upper, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT2, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0)
gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2])
# Check framebuffer status
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
log(red('Framebuffer not complete, exiting...'))
raise RuntimeError('Incomplete framebuffer')
# Restore the original state
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
return color_buffer, depth_upper, depth_lower, depth_attach, fbo
def hareward_peeling_framebuffer(H: int, W: int):
# Prepare for write frame buffers
index_buffer = gl.glGenTextures(1)
depth_lower = gl.glGenTextures(1)
depth_attach = gl.glGenTextures(1)
fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb
# Init the texture (call the resizing function), will simply allocate empty memory
# The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter).
gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Bind texture to fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0)
gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1])
# Check framebuffer status
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
log(red('Framebuffer not complete, exiting...'))
raise RuntimeError('Incomplete framebuffer')
# Restore the original state
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
return index_buffer, depth_lower, depth_attach, fbo
class Gaussian(Mesh):
def __init__(self,
filename: str = 'assets/meshes/zju3dv.npz',
gaussian_cfg: dotdict = dotdict(),
quad_cfg: dotdict = dotdict(),
view_depth: bool = False, # show depth or show color
dpt_cm: str = 'linear',
H: int = 1024,
W: int = 1024,
**kwargs,
):
# Import Gaussian Model
from easyvolcap.engine.registry import call_from_cfg
from easyvolcap.utils.gaussian_utils import GaussianModel
# Housekeeping
super().__init__(**kwargs)
self.name = split(filename)[-1]
# Init Gaussian related models, for now only the first gaussian model is supported
if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'):
# Load from GaussianTSampler
pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe)
pretrained = pretrained.model
state_dict = dotdict()
for k, v in pretrained.items():
if k.startswith('sampler.pcds.0'):
state_dict[k.replace('sampler.pcds.0.', '')] = v
# Load the parameters into the gaussian model
self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model
self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model
self.gaussian_model.cuda() # move the parameters to GPU
elif filename.endswith('.ply'):
# Load raw GaussianModel
# pts, rgb, norm, scalars = load_pts(filename)
self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model
self.gaussian_model.load_ply(filename) # load the original gaussian model
self.gaussian_model.cuda()
else:
raise NotImplementedError
# Init rendering quad
self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W)
# Other configurations
self.view_depth = view_depth
self.dpt_cm = dpt_cm
del self.shade_flat
del self.point_radius
del self.render_normal
# Disabling initialization
def load_from_file(self, *args, **kwargs):
pass
def load_from_data(self, *args, **kwargs):
pass
def compile_shaders(self):
pass
def update_gl_buffers(self):
pass
def resize_textures(self, H: int, W: int):
self.quad.resize_textures(H, W)
# The actual rendering function
@torch.no_grad()
def render(self, camera: Camera):
# Perform actual gaussian rendering
batch = add_batch(to_cuda(camera.to_batch()))
rgb, acc, dpt = self.gaussian_model.render(batch)
if self.view_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
else:
rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4
# Copy rendered tensor to screen
rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform
self.quad.copy_to_texture(rgba)
self.quad.render()
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
super().render_imgui(viewer, batch)
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import push_button_color, pop_button_color
i = batch.i
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.view_depth else 0x8855aaff)
if imgui.button(f'Color##{i}' if not mesh.view_depth else f' Depth ##{i}'):
mesh.view_depth = not mesh.view_depth
pop_button_color()
class PointSplat(Gaussian, nn.Module):
def __init__(self,
filename: str = 'assets/meshes/zju3dv.ply',
quad_cfg: dotdict = dotdict(),
view_depth: bool = False, # show depth or show color
dpt_cm: str = 'linear',
H: int = 1024,
W: int = 1024,
**kwargs,
):
# Import Gaussian Model
from easyvolcap.engine.registry import call_from_cfg
from easyvolcap.utils.data_utils import load_pts
from easyvolcap.utils.net_utils import make_buffer
from easyvolcap.models.samplers.gaussiant_sampler import GaussianTSampler
# Housekeeping
super(Gaussian, self).__init__(**kwargs)
self.name = split(filename)[-1]
self.render_radius = MethodType(GaussianTSampler.render_radius, self) # override the method
# Init PointSplat related models, for now only the first gaussian model is supported
if filename.endswith('.ply'):
# Load raw GaussianModel
pts, rgb, norms, scalars = load_pts(filename)
occ, rad = scalars.alpha, scalars.radius
self.pts = make_buffer(torch.from_numpy(pts)) # N, 3
self.rgb = make_buffer(torch.from_numpy(rgb)) # N, 3
self.occ = make_buffer(torch.from_numpy(occ)) # N, 1
self.rad = make_buffer(torch.from_numpy(rad)) # N, 1
else:
raise NotImplementedError
# Init rendering quad
self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W)
self.cuda() # move to cuda
# Other configurations
self.view_depth = view_depth
self.dpt_cm = dpt_cm
self.radius_mult = 1.0
self.alpha_mult = 1.0
# The actual rendering function
@torch.no_grad()
def render(self, camera: Camera):
# Perform actual gaussian rendering
batch = add_batch(to_cuda(camera.to_batch()))
sh0 = rgb2sh0(self.rgb[..., None])
xyz = self.pts
occ = (self.occ * self.alpha_mult).clip(0, 1)
rad = self.rad * self.radius_mult
rgb, acc, dpt = self.render_radius(*add_batch([xyz, sh0, rad, occ]), batch)
rgb, acc, dpt = rgb[0], acc[0], dpt[0]
if self.view_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
else:
rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4
# Copy rendered tensor to screen
rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform
self.quad.copy_to_texture(rgba)
self.quad.render()
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
super().render_imgui(viewer, batch)
i = batch.i
from imgui_bundle import imgui
mesh.radius_mult = imgui.slider_float(f'Point radius multiplier##{i}', mesh.radius_mult, 0.1, 3.0)[1] # 0.1mm
mesh.alpha_mult = imgui.slider_float(f'Point alpha multiplier##{i}', mesh.alpha_mult, 0.1, 3.0)[1] # 0.1mm
class Splat(Mesh): # FIXME: Not rendering, need to debug this
def __init__(self,
*args,
H: int = 512,
W: int = 512,
tex_dtype: str = torch.half,
pts_per_pix: int = 24, # render less for the static background since we're only doing a demo
blit_last_ratio: float = 0.0,
volume_rendering: bool = True,
radii_mult_volume: float = 1.00, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better
radii_mult_solid: float = 0.85, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better
point_smooth: bool = True,
alpha_blending: bool = True,
**kwargs):
kwargs = dotdict(kwargs)
kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1])
self.tex_dtype = getattr(torch, tex_dtype) if isinstance(tex_dtype, str) else tex_dtype
self.gl_tex_dtype = gl.GL_RGBA16F if self.tex_dtype == torch.half else gl.GL_RGBA32F
super().__init__(*args, **kwargs)
self.use_gl_program(self.splat_program)
self.pts_per_pix = pts_per_pix
self.blit_last_ratio = blit_last_ratio
self.volume_rendering = volume_rendering
self.radii_mult_volume = radii_mult_volume
self.radii_mult_solid = radii_mult_solid
self.point_smooth = point_smooth
self.alpha_blending = alpha_blending
self.max_H, self.max_W = H, W
self.H, self.W = H, W
self.init_textures()
from easyvolcap.models.samplers.gaussiant_sampler import GaussianTSampler
self.render_radius = MethodType(GaussianTSampler.render_radius, self) # override the method
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C') # this should only be invoked once
return verts
def use_gl_program(self, program: shaders.ShaderProgram):
super().use_gl_program(program)
# Special controlling variables
self.uniforms.alpha_blending = gl.glGetUniformLocation(program, f'alpha_blending')
self.uniforms.point_smooth = gl.glGetUniformLocation(program, f'point_smooth')
self.uniforms.radii_mult = gl.glGetUniformLocation(program, f'radii_mult')
# Special rendering variables
self.uniforms.pass_index = gl.glGetUniformLocation(program, f'pass_index')
self.uniforms.read_color = gl.glGetUniformLocation(program, f'read_color')
self.uniforms.read_upper = gl.glGetUniformLocation(program, f'read_upper')
self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower')
gl.glUniform1i(self.uniforms.read_color, 0)
gl.glUniform1i(self.uniforms.read_upper, 1)
gl.glUniform1i(self.uniforms.read_lower, 2)
def compile_shaders(self):
try:
self.splat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('splat.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('splat.frag'), gl.GL_FRAGMENT_SHADER)
)
self.usplat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('usplat.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('usplat.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def rasterize(self, camera: Camera = None, length: int = None):
if self.volume_rendering:
return self.rasterize_volume(camera, length)
else:
return self.rasterize_solid(camera, length)
def rasterize_volume(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera
"""
Let's try to analyze what's happening here
We want to:
1. Render the front-most color to color buffer
2. UNUSED: Render the front-most depth + some large margin to a depth upper limit buffer
3. Render the front-most depth + some small margin to a depth lower limit buffer
4. Switch between the render target and sampling target
5. Use the previous rendered color, depth upper limit and lower limit as textures
6. When current depth is smaller than the lower limit, we've already rendered this in the first pass, discard
7. UNUSED: When current depth is larger than the upper limit, it will probabily not contribute much to final results, discard
8. UNUSED: When the accumulated opacity reaches almost 1, subsequent rendering would not have much effect, return directly
9. When the point coordinates falls out of bound of the current sphere, dicard (this could be optimized with finutining in rectangle)
10. Finally, try to render the final color using the volume rendering equation (by accumulating alpha values from front to back)
Required cleanup checklist:
1. Before rendering the first pass, we need to clear the color and depth texture, this is not done, need to check multi-frame accumulation on this
2. Before rendering next pass, it's also recommended to blit color and depth values from previous pass to avoid assign them in the shader
"""
front_fbo, front_color, front_upper, front_lower = self.read_fbo, self.read_color, self.read_upper, self.read_lower
back_fbo, back_color, back_upper, back_lower = self.write_fbo, self.write_color, self.write_upper, self.write_lower
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0])
# gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9])
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0])
# gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9])
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.splat_program) # TODO: Implement this with a mapping and a lazy modification
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
# The actual multi pass rendering process happens here
for pass_index in range(self.pts_per_pix):
# Swap buffers to render the next pass
front_fbo, front_color, front_upper, front_lower, back_fbo, back_color, back_upper, back_lower = \
back_fbo, back_color, back_upper, back_lower, front_fbo, front_color, front_upper, front_lower
# Bind the read texture and bind the write render frame buffer
gl.glBindTextures(0, 3, [front_color, front_upper, front_lower])
# Move content from write_fbo to screen fbo
if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo)
for i in range(3):
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + i)
gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + i)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2])
# Clear depth buffer for depth testing
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
gl.glUniform1i(self.uniforms.pass_index, pass_index) # pass index
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return back_fbo
def upload_gl_uniforms(self, camera: Camera):
super().upload_gl_uniforms(camera)
gl.glUniform1i(self.uniforms.point_smooth, self.point_smooth)
gl.glUniform1i(self.uniforms.alpha_blending, self.alpha_blending)
if self.volume_rendering:
gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_volume) # radii mult
else:
gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_solid) # radii mult
def rasterize_solid(self, camera: Camera = None, length: int = None):
# Only clear the output once
back_fbo = self.write_fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0]) # color
# gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) # depth upper
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0]) # depth lower
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.usplat_program)
self.upload_gl_uniforms(camera)
gl.glUniform1i(self.uniforms.pass_index, 0) # pass index
gl.glBindVertexArray(self.vao)
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return back_fbo
def show(self, back_fbo: int):
# Move content from write_fbo to screen fbo
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, back_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, 0) # render the final content onto screen
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
def render(self, camera):
if not self.visible: return
self.show(self.rasterize(camera))
def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers
self.H, self.W = H, W
if self.H > self.max_H or self.W > self.max_W: # max got updated
self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)
self.init_textures()
def init_textures(self):
if hasattr(self, 'write_fbo'):
gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo])
gl.glDeleteTextures(8, [self.write_color, self.write_upper, self.write_lower, self.write_attach, self.read_color, self.read_upper, self.read_lower, self.read_attach])
self.write_color, self.write_upper, self.write_lower, self.write_attach, self.write_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype)
self.read_color, self.read_upper, self.read_lower, self.read_attach, self.read_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype)
log(f'Created texture of h, w: {self.max_H}, {self.max_W}')
class HardwareRendering(Splat):
def __init__(self,
dtype=torch.half,
**kwargs,
):
self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype
self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT
kwargs = dotdict(kwargs)
kwargs.blit_last_ratio = kwargs.get('blit_last_ratio', 0.90)
kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1])
super().__init__(**kwargs) # verts, color, radius, alpha
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once
return verts
def init_gl_buffers(self, v: int = 0, f: int = 0):
from cuda import cudart
if hasattr(self, 'cu_vbo'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo))
super().init_gl_buffers(v, f)
# Register vertex buffer obejct
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
try:
self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags))
except RuntimeError as e:
log(red(f'Your system does not support CUDA-GL interop, please use pytorch3d\'s implementation instead'))
log(red(f'This can be done by specifying {blue("model_cfg.sampler_cfg.use_cudagl=False model_cfg.sampler_cfg.use_diffgl=False")} at the end of your command'))
log(red(f'Note that this implementation is extremely slow, we recommend running on a native system that support the interop'))
# raise RuntimeError(str(e) + ": This unrecoverable, please read the error message above")
raise e
def init_textures(self):
from cuda import cudart
if hasattr(self, 'cu_read_color'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_color))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_color))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower))
super().init_textures()
# Register image to read from
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly
self.cu_read_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_color, gl.GL_TEXTURE_2D, flags))
self.cu_write_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_color, gl.GL_TEXTURE_2D, flags))
self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags))
self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags))
def forward(self, xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor, batch: dotdict):
"""
Renders a 3D point cloud using OpenGL and returns the rendered RGB image, accumulated alpha image, and depth map.
Args:
xyz (torch.Tensor): A tensor of shape (B, N, 3) containing the 3D coordinates of the points.
rgb (torch.Tensor): A tensor of shape (B, N, 3) containing the RGB color values of the points.
rad (torch.Tensor): A tensor of shape (B, N, 1) containing the radii of the points.
batch (dotdict): A dictionary containing the camera parameters and other metadata for the batch.
Returns:
A tuple containing the rendered RGB image, accumulated alpha image, and depth map, all as torch.Tensors.
The RGB image has shape (1, H, W, 3), the alpha image has shape (1, H, W, 1), and the depth map has shape (1, H, W, 1).
The method first resizes the OpenGL texture to match the height and width of the output image. It then sets the OpenGL viewport and scissor to only render in the region of the viewport specified by the output image size.
It concatenates the `xyz`, `rgb`, and `rad` tensors along the last dimension and flattens the result into a 1D tensor.
The method then uploads the input data to OpenGL for rendering and performs depth peeling using OpenGL. The method uploads the camera parameters to OpenGL and renders the point cloud, saving the output buffer to the `back_fbo` attribute of the class.
Finally, the method copies the rendered image and depth back to the CPU as torch.Tensors and reshapes them to match the output image size. The RGB image is returned with shape (1, H, W, 3), the accumulated alpha image is returned with shape (1, H, W, 1), and the depth map is returned with shape (1, H, W, 1).
"""
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
# !: BATCH
H, W = batch.meta.H[0].item(), batch.meta.W[0].item()
self.resize_textures(H, W) # maybe resize the texture
self.resize_buffers(xyz.shape[1]) # maybe resize the buffer
_, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H) # only render in this small region of the viewport
# Prepare for input data
data = torch.cat([xyz, rgb, rad, occ], dim=-1).type(self.dtype).ravel()
# Upload to opengl for rendering
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo))
assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side'
CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr,
data.data_ptr(),
data.numel() * data.element_size(),
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
# Perform rasterization (depth peeling using OpenGL)
if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish
back_fbo = self.rasterize(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo
# Copy rendered image and depth back as tensor
cu_tex = self.cu_write_color if back_fbo == self.write_fbo else self.cu_read_color # double buffered depth peeling
cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling
# Prepare the output # !: BATCH
rgb_map = torch.empty((H, W, 4), dtype=self.tex_dtype, device='cuda') # to hold the data from opengl
dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl
# The resources in resources may be accessed by CUDA until they are unmapped.
# The graphics API from which resources were registered should not access any resources while they are mapped by CUDA.
# If an application does so, the results are undefined.
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0))
cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0))
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(rgb_map.data_ptr(), # dst
W * 4 * rgb_map.element_size(), # dpitch
cu_tex_arr, # src
0, # wOffset
0, # hOffset
W * 4 * rgb_map.element_size(), # width Width of matrix transfer (columns in bytes)
H, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(),
W * 1 * dpt_map.element_size(),
cu_dpt_arr,
0,
0,
W * 1 * dpt_map.element_size(),
H,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
# Ouput reshaping
rgb_map, dpt_map = rgb_map[None].flip(1), dpt_map[None].flip(1)
rgb_map, acc_map = rgb_map[..., :3], rgb_map[..., 3:]
dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map)
# Some house keepings
gl.glViewport(0, 0, old_W, old_H)
gl.glScissor(0, 0, old_W, old_H)
return rgb_map, acc_map, dpt_map
class HardwarePeeling(Splat):
def __init__(self,
dtype=torch.float,
**kwargs):
self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype
self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT
super().__init__(**kwargs,
blit_last_ratio=-10.0,
vert_sizes=[3, 1],
) # verts, radius, index
# from pytorch3d.renderer import AlphaCompositor
# self.compositor = AlphaCompositor() # this the key to convergence, this is differentiable
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.radius], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once
return verts
def init_gl_buffers(self, v: int = 0, f: int = 0):
from cuda import cudart
if hasattr(self, 'cu_vbo'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo))
super().init_gl_buffers(v, f)
# Register vertex buffer obejct
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags))\
def use_gl_program(self, program):
super().use_gl_program(program)
gl.glUseProgram(self.splat_program) # use a different program
self.uniforms.read_index = gl.glGetUniformLocation(program, f'read_index')
self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower')
gl.glUniform1i(self.uniforms.read_index, 0)
gl.glUniform1i(self.uniforms.read_lower, 1)
def upload_gl_uniforms(self, camera: Camera):
super().upload_gl_uniforms(camera)
def compile_shaders(self):
try:
self.splat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('idx_splat.vert'), gl.GL_VERTEX_SHADER), # use the pass through quad shader
shaders.compileShader(load_shader_source('idx_splat.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def init_textures(self):
from cuda import cudart
if hasattr(self, 'cu_read_index'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_index))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_index))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower))
if hasattr(self, 'write_fbo'):
gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo])
gl.glDeleteTextures(6, [self.write_index, self.write_lower, self.write_attach, self.read_index, self.read_lower, self.read_attach])
self.write_index, self.write_lower, self.write_attach, self.write_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W)
self.read_index, self.read_lower, self.read_attach, self.read_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W)
# Register image to read from
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly
self.cu_read_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_index, gl.GL_TEXTURE_2D, flags))
self.cu_write_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_index, gl.GL_TEXTURE_2D, flags))
self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags))
self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags))
log(f'Created texture of h, w: {self.max_H}, {self.max_W}')
def rasterize_generator(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera
front_fbo, front_index, front_lower = self.read_fbo, self.read_index, self.read_lower
back_fbo, back_index, back_lower = self.write_fbo, self.write_index, self.write_lower
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1])
gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1])
gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.splat_program)
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
# The actual multi pass rendering process happens here
for pass_index in range(self.pts_per_pix):
# Swap buffers to render the next pass
front_fbo, front_index, front_lower, back_fbo, back_index, back_lower = \
back_fbo, back_index, back_lower, front_fbo, front_index, front_lower
# Bind the read texture and bind the write render frame buffer
gl.glBindTextures(0, 2, [front_index, front_lower])
# Move content from write_fbo to screen fbo
if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo)
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + 1)
gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + 1)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1])
else:
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
# Clear depth buffer for depth testing
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) # clear the indices buffer for later rendering and retrieving
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
yield back_fbo # give the CUDA end a chance to read from this frame buffer after rendering
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return
def forward(self,
xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor,
batch: dotdict,
return_frags: bool = False,
return_full: bool = False,
):
"""
Get all indices from the depth peeling passes
Compute the vertex weight here in torch(cuda)
Use the indices to pass through a compositor
The backward pass should only be valid on the torch side, and it should've been enough
TODO: This function is too memory intensive
TODO: Performing IBR is too memory intensive
"""
# This the slow part, but not differentiable
idx, _, _ = self.forward_idx(xyz, rad, batch) # B, H, W, K
msk = idx != -1 # B, H, W, K
idx = torch.where(msk, idx, 0).long()
# Sample things needed for computing screen space weight
H, W, K, R, T, C = get_opencv_camera_params(batch)
K, R, T, C = K.to(xyz.dtype), R.to(xyz.dtype), T.to(xyz.dtype), C.to(xyz.dtype)
pix_xyz = (xyz @ R.mT + T.mT) @ K.mT # B, P, 3
pix_xyz_xy = pix_xyz[..., :-1] / (pix_xyz[..., -1:] + 1e-10)
pix_rad = abs(K[..., 1, 1][..., None] * rad[..., 0] / (pix_xyz[..., -1] + 1e-10)) # z: B, 1 * B, N, world space radius
mean_xy = multi_gather(pix_xyz_xy, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, 2) # B, HWK, 2 -> B, H, W, K, 2
xy = create_meshgrid(H, W, idx.device, dtype=xyz.dtype).flip(-1)[None].expand(idx.shape[0], H, W, 2) # create screen space xy (opencv)
dists = (xy[..., None, :] - mean_xy).pow(2).sum(-1) # B, H, W, K
# Point values
dpt = (xyz - C.mT).norm(dim=-1, keepdim=True) # B, N, 1
pix_occ = multi_gather(occ, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape)
pix_rad = multi_gather(pix_rad, idx.view(idx.shape[0], -1), dim=-1).view(*idx.shape) # -> B, H, W, K
pix_occ = pix_occ * (1 - dists / (pix_rad * pix_rad + 1e-10)) # B, H, W, K
pix_occ = pix_occ.clip(0, 1)
pix_occ = torch.where(msk, pix_occ, 0)
if return_frags:
return idx, pix_occ # B, H, W, K
# The actual computation
rgb = torch.cat([rgb, occ, dpt], dim=-1) # B, N, 3 + C
pix_rgb = multi_gather(rgb, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, rgb.shape[-1]) # B, H, W, K, -1
_, rgb, _ = volume_rendering(pix_rgb, pix_occ[..., None]) # B, H, W, -1
rgb, acc, dpt = rgb[..., :-2], rgb[..., -2:-1], rgb[..., -1:]
dpt = dpt + (1 - acc) * dpt.max() # only for the looks (rendered depth are already premultiplied)
if return_full:
return rgb, acc, dpt, idx, pix_occ
else:
return rgb, acc, dpt
def forward_idx(self, xyz: torch.Tensor, rad: torch.Tensor, batch: dotdict):
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
# !: BATCH
H, W = batch.meta.H[0].item(), batch.meta.W[0].item()
self.resize_textures(H, W) # maybe resize the texture
self.resize_buffers(xyz.shape[1]) # maybe resize the buffer
_, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H) # only render in this small region of the viewport
# Prepare for input data
data = torch.cat([xyz, rad], dim=-1).type(self.dtype).ravel()
# Upload to opengl for rendering
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo))
assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side'
CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr,
data.data_ptr(),
data.numel() * data.element_size(),
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
# Perform rasterization (depth peeling using OpenGL)
if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish
# FIXME: Strange bug occurs if batch parameter is passed in directly for the construction of Camera(batch=batch.meta)
gen = self.rasterize_generator(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo
ind_maps = []
dpt_maps = []
acc_maps = []
for back_fbo in gen:
# Copy rendered image and depth back as tensor
cu_tex = self.cu_write_index if back_fbo == self.write_fbo else self.cu_read_index # double buffered depth peeling
cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling
# Prepare the output # !: BATCH
ind_map = torch.empty((H, W, 1), dtype=torch.int, device='cuda') # to hold the data from opengl
dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl
# The resources in resources may be accessed by CUDA until they are unmapped.
# The graphics API from which resources were registered should not access any resources while they are mapped by CUDA.
# If an application does so, the results are undefined.
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0))
cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0))
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(ind_map.data_ptr(), # dst
W * ind_map.shape[-1] * ind_map.element_size(), # dpitch
cu_tex_arr, # src
0, # wOffset
0, # hOffset
W * ind_map.shape[-1] * ind_map.element_size(), # width Width of matrix transfer (columns in bytes)
H, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(),
W * dpt_map.shape[-1] * dpt_map.element_size(),
cu_dpt_arr,
0,
0,
W * dpt_map.shape[-1] * dpt_map.element_size(),
H,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
# Ouput reshaping
ind_map, dpt_map = ind_map[None].flip(1), dpt_map[None].flip(1)
acc_map = ind_map != -1
dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map)
ind_maps.append(ind_map)
acc_maps.append(acc_map)
dpt_maps.append(dpt_map)
ind_map = torch.cat(ind_maps, dim=-1) # B, H, W, K
acc_map = torch.cat(acc_maps, dim=-1) # B, H, W, K
dpt_map = torch.cat(dpt_maps, dim=-1) # B, H, W, K
# Some house keepings
gl.glViewport(0, 0, old_W, old_H)
gl.glScissor(0, 0, old_W, old_H)
return ind_map, acc_map, dpt_map
|
evocodebench_data_81
|
# Feature Cloud Sequence utilities
# This files builds the components for the feature cloud sequence sampler
import torch
from typing import List, Dict, Union
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.raster_utils import get_ndc_perspective_matrix
from easyvolcap.utils.chunk_utils import multi_gather, multi_scatter
from easyvolcap.utils.math_utils import normalize_sum, affine_inverse, affine_padding
from easyvolcap.utils.net_utils import MLP
def estimate_occupancy_field(xyz: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor):
# This method builds a function to evaluate the occupancy field of the point cloud density field
# We sample the point cloud with a ball query for the largest radius in the set
# The actual alpha is decreased as the distance to the closest points
# If multiple points fall into the region of interest, we compute for alpha on all of them and performs a add operation
from pytorch3d.ops import ball_query
max_rad = rad.max()
# B, N, 3
# B, N, 1
# B, N, 1
def field(pts: torch.Tensor, K=10):
# pts: B, P, 3
sh = pts.shape
pts = pts.view(pts.shape[0], -1, 3)
knn = ball_query(pts, xyz, K=K, radius=max_rad, return_nn=False)
idx, dists = knn.idx, knn.dists # B, P, K
msk = idx != -1
idx = torch.where(msk, idx, 0).long()
pix_rad = multi_gather(rad[..., 0], idx.view(idx.shape[0], -1), dim=-1).view(idx.shape) # B, P, K
pix_occ = multi_gather(occ[..., 0], idx.view(idx.shape[0], -1), dim=-1).view(idx.shape) # B, P, K
pix_occ = pix_occ * (1 - dists / (pix_rad * pix_rad)) # B, P, K
pix_occ = torch.where(msk, pix_occ, 0)
pix_occ = pix_occ.clip(0, 1)
pix_occ = pix_occ.sum(dim=-1, keepdim=True) # B, P, 1
return pix_occ.view(*sh[:-1], 1)
return field
# @torch.jit.script
def prepare_feedback_transform(H: int, W: int, K: torch.Tensor, R: torch.Tensor, T: torch.Tensor,
n: torch.Tensor,
f: torch.Tensor,
xyz: torch.Tensor,
rgb: torch.Tensor,
rad: torch.Tensor):
ixt = get_ndc_perspective_matrix(K, H, W, n[..., 0], f[..., 0]).to(xyz.dtype) # to opengl, remove last dim of n and f
w2c = affine_padding(torch.cat([R, T], dim=-1)).to(xyz.dtype)
c2w = affine_inverse(w2c)
c2w[..., 0] *= 1 # flip x
c2w[..., 1] *= -1 # flip y
c2w[..., 2] *= -1 # flip z
ext = affine_inverse(c2w)
pix_xyz = torch.cat([xyz, torch.ones_like(xyz[..., :1])], dim=-1) @ ext.mT @ ixt.mT
pix_rad = abs(H * ixt[..., 1, 1][..., None, None] * rad / pix_xyz[..., -1:]) # z: B, 1 * B, N, world space radius -> ndc radius B, N, 1
# Prepare data to be rendered
data = torch.cat([pix_xyz, rgb, pix_rad], dim=-1).ravel() # organize the data inside vbo
return data
def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
"""
Converts rotation matrices to 6D rotation representation by Zhou et al. [1]
by dropping the last row. Note that 6D representation is not unique.
Args:
matrix: batch of rotation matrices of size (*, 3, 3)
Returns:
6D rotation representation, of size (*, 6)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
batch_dim = matrix.size()[:-2]
return matrix[..., :2, :].clone().reshape(batch_dim + (6,))
@run_once
def warn_once_about_pulsar_fxfy():
log(yellow(
"Pulsar only supports a single focal lengths. For converting OpenCV "
"focal lengths, we average them for x and y directions. "
"The focal lengths for x and y you provided differ by more than 1%, "
"which means this could introduce a noticeable error."
))
def get_pulsar_camera_params(
R: torch.Tensor,
tvec: torch.Tensor,
camera_matrix: torch.Tensor,
image_size: torch.Tensor,
znear: float = 0.1,
) -> torch.Tensor:
assert len(camera_matrix.size()) == 3, "This function requires batched inputs!"
assert len(R.size()) == 3, "This function requires batched inputs!"
assert len(tvec.size()) in (2, 3), "This function reuqires batched inputs!"
# Validate parameters.
image_size_wh = image_size.to(R).flip(dims=(1,))
assert torch.all(
image_size_wh > 0
), "height and width must be positive but min is: %s" % (
str(image_size_wh.min().item())
)
assert (
camera_matrix.size(1) == 3 and camera_matrix.size(2) == 3
), "Incorrect camera matrix shape: expected 3x3 but got %dx%d" % (
camera_matrix.size(1),
camera_matrix.size(2),
)
assert (
R.size(1) == 3 and R.size(2) == 3
), "Incorrect R shape: expected 3x3 but got %dx%d" % (
R.size(1),
R.size(2),
)
if len(tvec.size()) == 2:
tvec = tvec.unsqueeze(2)
assert (
tvec.size(1) == 3 and tvec.size(2) == 1
), "Incorrect tvec shape: expected 3x1 but got %dx%d" % (
tvec.size(1),
tvec.size(2),
)
# Check batch size.
batch_size = camera_matrix.size(0)
assert R.size(0) == batch_size, "Expected R to have batch size %d. Has size %d." % (
batch_size,
R.size(0),
)
assert (
tvec.size(0) == batch_size
), "Expected tvec to have batch size %d. Has size %d." % (
batch_size,
tvec.size(0),
)
# Check image sizes.
image_w = image_size_wh[0, 0]
image_h = image_size_wh[0, 1]
assert torch.all(
image_size_wh[:, 0] == image_w
), "All images in a batch must have the same width!"
assert torch.all(
image_size_wh[:, 1] == image_h
), "All images in a batch must have the same height!"
# Focal length.
fx = camera_matrix[:, 0, 0].unsqueeze(1)
fy = camera_matrix[:, 1, 1].unsqueeze(1)
# Check that we introduce less than 1% error by averaging the focal lengths.
fx_y = fx / fy
if torch.any(fx_y > 1.01) or torch.any(fx_y < 0.99):
warn_once_about_pulsar_fxfy()
f = (fx + fy) / 2
# Normalize f into normalized device coordinates.
focal_length_px = f / image_w
# Transfer into focal_length and sensor_width.
# NOTE: Using torch.tensor instead of torch.as_tensor will cause cpu gpu sync
focal_length = torch.as_tensor([znear - 1e-5], dtype=torch.float32, device=R.device)
focal_length = focal_length[None, :].repeat(batch_size, 1)
sensor_width = focal_length / focal_length_px
# Principal point.
cx = camera_matrix[:, 0, 2].unsqueeze(1)
cy = camera_matrix[:, 1, 2].unsqueeze(1)
# Transfer principal point offset into centered offset.
cx = -(cx - image_w / 2)
cy = cy - image_h / 2
# Concatenate to final vector.
param = torch.cat([focal_length, sensor_width, cx, cy], dim=1)
R_trans = R.permute(0, 2, 1)
cam_pos = -torch.bmm(R_trans, tvec).squeeze(2)
cam_rot = matrix_to_rotation_6d(R_trans)
cam_params = torch.cat([cam_pos, cam_rot, param], dim=1)
return cam_params
def get_opencv_camera_params(batch: dotdict):
H = batch.meta.H[0].item() # !: BATCH
W = batch.meta.W[0].item() # !: BATCH
K = batch.K
R = batch.R
T = batch.T
C = -batch.R.mT @ batch.T # B, 3, 1
return H, W, K, R, T, C
def get_pytorch3d_camera_params(batch: dotdict):
# Extract pytorc3d camera parameters from batch input
# R and T are applied on the right (requires a transposed R from OpenCV camera format)
# Coordinate system is different from that of OpenCV (cv: right down front, 3d: left up front)
# However, the correction has to be down on both T and R... (instead of just R)
C = -batch.R.mT @ batch.T # B, 3, 1
R = batch.R.clone()
R[..., 0, :] *= -1 # flip x row
R[..., 1, :] *= -1 # flip y row
T = (-R @ C)[..., 0] # c2w back to w2c
R = R.mT # applied left (left multiply to right multiply, god knows why...)
H = batch.meta.H[0].item() # !: BATCH
W = batch.meta.W[0].item() # !: BATCH
K = get_pytorch3d_ndc_K(batch.K, H, W)
return H, W, K, R, T, C
# TODO: Remove pcd_t and with_t semantics, this is a legacy API
def voxel_surface_down_sample(pcd: torch.Tensor, pcd_t: torch.Tensor = None, voxel_size: float = 0.01, dist_th: float = 0.025, n_points: int = 65536):
# !: BATCH
# TODO: Use number of vertices for good estimation
import open3d as o3d
import numpy as np
import mcubes
from easyvolcap.utils.sample_utils import point_mesh_distance
from pytorch3d.ops import knn_points, ball_query, sample_farthest_points
# Convert torch tensor to Open3D PointCloud
o3d_pcd = o3d.geometry.PointCloud()
o3d_pcd.points = o3d.utility.Vector3dVector(pcd.view(-1, 3).detach().cpu().numpy())
# Create VoxelGrid from PointCloud
o3d_vox = o3d.geometry.VoxelGrid.create_from_point_cloud(o3d_pcd, voxel_size=voxel_size)
# Extract dense grid from VoxelGrid using get_voxel
voxels = o3d_vox.get_voxels()
max_index = np.array([vox.grid_index for vox in voxels]).max(axis=0) # !: for-loop
dense_grid = np.zeros((max_index[0] + 1, max_index[1] + 1, max_index[2] + 1))
for vox in voxels: # !: for-loop
dense_grid[vox.grid_index[0], vox.grid_index[1], vox.grid_index[2]] = 1
# Use marching cubes to obtain mesh from dense grid
vertices, triangles = mcubes.marching_cubes(dense_grid, 0.5)
vertices = vertices * voxel_size + o3d_vox.origin # resizing
# Convert mesh data to torch tensors
triangles_torch = torch.as_tensor(vertices[triangles], device=pcd.device, dtype=pcd.dtype).float()
# Calculate distances using point_mesh_distance
dists, _ = point_mesh_distance(pcd[0], triangles_torch)
# Select points based on distances
valid = (dists < dist_th).nonzero()[..., 0]
while (len(valid) - n_points) / n_points > 0.005:
# There are too many valid points, should control its number
ratio = len(valid) / len(pcd[0]) # the ratio of valid points
n_expected = int(n_points / ratio) # the expected number of points before surface sampling
pcd = random(pcd, n_points=n_expected)
# Calculate distances using point_mesh_distance
dists, _ = point_mesh_distance(pcd[0], triangles_torch)
# Select points based on distances
valid = (dists < dist_th).nonzero()[..., 0]
_, valid = dists.topk(n_points, dim=-1, sorted=False, largest=False)
pcd_new = torch.index_select(pcd[0], 0, valid)[None]
return pcd_new
def filter_bounds(pcd: torch.Tensor, pcd_t: torch.Tensor = None, bounds: torch.Tensor = None):
valid = ((pcd - bounds[..., 0, :]) > 0).all(dim=-1) & ((pcd - bounds[..., 1, :]) < 0).all(dim=-1) # mask: B, N
valid = valid[0].nonzero()[None] # B, S -> B, V # MARK: SYNC
pcd = multi_gather(pcd, valid, dim=-2)
return pcd
def duplicate(pcd: torch.Tensor, pcd_t: torch.Tensor = None, std: float = 0.005 * 0.1):
# return pcd.repeat_interleave(2, dim=-2), ind.repeat_interleave(2, dim=-2)
pcd_new = torch.normal(pcd, std=std)
return torch.cat([pcd, pcd_new], dim=-2)
def farthest(pcd: torch.Tensor, pcd_t: torch.Tensor = None, lengths: torch.Tensor = None, n_points: int = 65536):
from pytorch3d.ops import knn_points, ball_query, sample_farthest_points
idx = sample_farthest_points(pcd, lengths, K=n_points)[1] # N, K (padded)
return multi_gather(pcd, idx)
def random(pcd: torch.Tensor, pcd_t: torch.Tensor = None, n_points: int = 65536, std: float = 0.001):
inds = torch.stack([torch.randperm(pcd.shape[-2], device=pcd.device)[:n_points] for b in range(len(pcd))]) # B, S,
return multi_gather(pcd, inds)
def voxel_down_sample(pcd: torch.Tensor, pcd_t: torch.Tensor = None, voxel_size=0.005):
import open3d as o3d
o3d_pcd = o3d.geometry.PointCloud()
o3d_pcd.points = o3d.utility.Vector3dVector(pcd.view(-1, 3).detach().cpu().numpy())
o3d_pcd = o3d_pcd.voxel_down_sample(voxel_size)
return torch.as_tensor(np.array(o3d_pcd.points)).to(pcd.device, pcd.dtype, non_blocking=True).view(pcd.shape[0], -1, 3)
def remove_outlier(pcd: torch.Tensor, pcd_t: torch.Tensor = None, K: int = 20, std_ratio=2.0, return_inds=False): # !: BATCH
import open3d as o3d
o3d_pcd = o3d.geometry.PointCloud()
o3d_pcd.points = o3d.utility.Vector3dVector(pcd.view(-1, 3).detach().cpu().numpy())
cl, ind = o3d_pcd.remove_statistical_outlier(nb_neighbors=K, std_ratio=std_ratio)
if return_inds:
return torch.as_tensor(np.array(ind), device=pcd.device)[None] # N,
return torch.as_tensor(np.array(o3d_pcd.points)[np.array(ind)]).to(pcd.device, pcd.dtype, non_blocking=True).view(pcd.shape[0], -1, 3)
def farthest_down_sample(pcd: torch.Tensor, pcd_t: torch.Tensor = None, K: int = 65536):
import open3d as o3d
o3d_pcd = o3d.geometry.PointCloud()
o3d_pcd.points = o3d.utility.Vector3dVector(pcd.view(-1, 3).detach().cpu().numpy())
o3d_pcd = o3d_pcd.farthest_point_down_sample(K)
return torch.as_tensor(np.array(o3d_pcd.points)).to(pcd.device, pcd.dtype, non_blocking=True).view(pcd.shape[0], -1, 3)
def sample_random_points(pcd: torch.Tensor, pcd_t: torch.Tensor = None, K: int = 500):
bounds = torch.stack([pcd.min(dim=-2)[0] - 0.033, pcd.max(dim=-2)[0] + 0.033], dim=-2) # B, 2, 3
pts = torch.rand(*pcd.shape[:-2], K, 3, device=pcd.device) * (bounds[..., 1:, :] - bounds[..., :1, :]) + bounds[..., :1, :]
return pts
def sample_filter_random_points(pcd: torch.Tensor, pcd_t: torch.Tensor = None, K: int = 500, update_radius=0.05, filter_K=10):
pts = sample_random_points(pcd, pcd_t, K) # ugly interface
pts = filter_points(pts, pcd, update_radius, filter_K)
return pts
def get_pytorch3d_ndc_K(K: torch.Tensor, H: int, W: int):
M = min(H, W)
K = torch.cat([K, torch.zeros_like(K[..., -1:, :])], dim=-2)
K = torch.cat([K, torch.zeros_like(K[..., :, -1:])], dim=-1)
K[..., 3, 2] = 1 # ...? # HACK: pytorch3d magic
K[..., 2, 2] = 0 # ...? # HACK: pytorch3d magic
K[..., 2, 3] = 1 # ...? # HACK: pytorch3d magic
K[..., 0, 1] = 0
K[..., 1, 0] = 0
K[..., 2, 0] = 0
K[..., 2, 1] = 0
# return K
K[..., 0, 0] = K[..., 0, 0] * 2.0 / M # fx
K[..., 1, 1] = K[..., 1, 1] * 2.0 / M # fy
K[..., 0, 2] = -(K[..., 0, 2] - W / 2.0) * 2.0 / M # px
K[..., 1, 2] = -(K[..., 1, 2] - H / 2.0) * 2.0 / M # py
return K
def expand_points_features(render_scale: Union[float, int], pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float):
# FIXME: Duplicated code for these
n_points = pcd_old.shape[-2]
if isinstance(render_scale, int):
target_n_points = render_scale
n_points = pcd_old.shape[-2]
render_scale = target_n_points / n_points
target_n_points = int(render_scale * n_points)
return generate_points_features(target_n_points, pcd_old, ind_old, radius)
def expand_points(render_scale: Union[float, int], pcd_old: torch.Tensor, radius: float):
n_points = pcd_old.shape[-2]
if isinstance(render_scale, int):
target_n_points = render_scale
n_points = pcd_old.shape[-2]
render_scale = target_n_points / n_points
target_n_points = int(render_scale * n_points)
return generate_points(target_n_points, pcd_old, radius)
def generate_points_features(n_points: int, pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float):
pcd_new = sample_random_points(pcd_old, K=n_points)
pcd_new, ind_new = update_points_features(pcd_new, pcd_old, ind_old, radius)
return pcd_new, ind_new
def generate_points(n_points: int, pcd_old: torch.Tensor, radius: float):
pcd_new = sample_random_points(pcd_old, K=n_points)
pcd_new = update_points(pcd_new, pcd_old, radius)
return pcd_new
def surface_points(pcd: torch.Tensor, pcd_t: torch.Tensor = None, radius: float = 0.05, K: int = 500, n_points: float = 16384):
# Try to retain the surface points
from pytorch3d.ops import knn_points, ball_query
# 1. Perform a ball query (with a large upper limit number of points)
# 2. Sort all points based on the number of neighbors
close = ball_query(pcd, pcd, radius=radius, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
dists = torch.where(idx == -1, torch.inf, 0.1) # B, S, K, equal weight, just for filtering
idx = torch.where(idx == -1, 0, idx) # B, S, K
# Find mean points
B, S, C = pcd.shape
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = multi_gather(pcd, idx.view(B, S * K)).view(B, S, K, -1)
pcd_new = (pcd_new * weights).sum(dim=-2) # B, S, 3
# Find mean deviation
dists = (pcd_new - pcd).norm(dim=-1) # B, S,
valid = (dists).topk(n_points, dim=-1, sorted=False)[1] # B, K
pcd_new = multi_gather(pcd, valid, dim=-2)
return pcd_new
def surface_points_features(pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float = 0.05, K: int = 500, n_points: float = 16384):
# Try to retain the surface points
from pytorch3d.ops import knn_points, ball_query
# 1. Perform a ball query (with a large upper limit number of points)
# 2. Sort all points based on the number of neighbors
close = ball_query(pcd_old, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
dists = torch.where(idx == -1, torch.inf, 0.1) # B, S, K, equal weight, just for filtering
idx = torch.where(idx == -1, 0, idx) # B, S, K
# Find mean points
B, S, C = pcd_old.shape
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = multi_gather(pcd_old, idx.view(B, S * K)).view(B, S, K, -1)
pcd_new = (pcd_new * weights).sum(dim=-2) # B, S, 3
# Find mean deviation
dists = (pcd_new - pcd_old).norm(dim=-1) # B, S,
valid = (dists).topk(n_points, dim=-1, sorted=False)[1] # B, K
pcd_new = multi_gather(pcd_old, valid, dim=-2)
ind_new = multi_gather(ind_old, valid, dim=-2)
return pcd_new, ind_new
def filter_points(pcd_new: torch.Tensor, pcd_old: torch.Tensor, radius: float = 0.05, K: int = 10, fill_ratio: float = 0.1):
# This will lead to shrinking
from pytorch3d.ops import knn_points, ball_query
close = ball_query(pcd_new, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
# !: BATCH
good = (idx != -1).sum(dim=-1) / K > fill_ratio
valid = good[0].nonzero()[None] # B, S -> B, V # MARK: SYNC
idx = multi_gather(idx, valid, dim=-2)
dists = multi_gather(dists, valid, dim=-2)
pcd_new = multi_gather(pcd_new, valid, dim=-2)
dists = torch.where(idx == -1, torch.inf, dists) # B, S, K
idx = torch.where(idx == -1, 0, idx) # B, S, K
B, S, C = pcd_new.shape
B, N, C = pcd_old.shape
pcd_new = multi_gather(pcd_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, 3
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = (pcd_new * weights).sum(dim=-2)
return pcd_new
def filter_points_features(pcd_new: torch.Tensor, pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float = 0.05, K: int = 10, fill_ratio: float = 0.1):
# This will lead to shrinking
from pytorch3d.ops import knn_points, ball_query
close = ball_query(pcd_new, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
# !: BATCH
good = (idx != -1).sum(dim=-1) / K > fill_ratio
valid = good[0].nonzero()[None] # B, S -> B, V # MARK: SYNC
idx = multi_gather(idx, valid, dim=-2)
dists = multi_gather(dists, valid, dim=-2)
pcd_new = multi_gather(pcd_new, valid, dim=-2)
dists = torch.where(idx == -1, torch.inf, dists) # B, S, K
idx = torch.where(idx == -1, 0, idx) # B, S, K
B, S, C = pcd_new.shape
B, N, C = pcd_old.shape
pcd_new = multi_gather(pcd_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, 3
ind_new = multi_gather(ind_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, C
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = (pcd_new * weights).sum(dim=-2)
ind_new = (ind_new * weights).sum(dim=-2)
# pcd_new = pcd_new.mean(dim=-2)
# ind_new = ind_new.mean(dim=-2)
return pcd_new, ind_new
def update_points_features(pcd_new: torch.Tensor, pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float = 0.05, K: int = 5):
# This will lead to shrinking
from pytorch3d.ops import knn_points, ball_query
# close = ball_query(pcd_new, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
close = knn_points(pcd_new, pcd_old, return_sorted=False, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
B, S, C = pcd_new.shape
B, N, C = pcd_old.shape
pcd_new = multi_gather(pcd_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, 3
ind_new = multi_gather(ind_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, C
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = (pcd_new * weights).sum(dim=-2)
ind_new = (ind_new * weights).sum(dim=-2)
# pcd_new = pcd_new.mean(dim=-2)
# ind_new = ind_new.mean(dim=-2)
return pcd_new, ind_new
def update_points(pcd_new: torch.Tensor, pcd_old: torch.Tensor, radius: float = 0.05, K: int = 5):
# This will lead to shrinking
from pytorch3d.ops import knn_points, ball_query
# close = ball_query(pcd_new, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
close = knn_points(pcd_new, pcd_old, return_sorted=False, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
B, S, C = pcd_new.shape
B, N, C = pcd_old.shape
pcd_new = multi_gather(pcd_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, 3
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
pcd_new = (pcd_new * weights).sum(dim=-2)
# pcd_new = pcd_new.mean(dim=-2)
return pcd_new
def update_features(pcd_new: torch.Tensor, pcd_old: torch.Tensor, ind_old: torch.Tensor, radius: float = 0.05, K: int = 5):
# This will lead to shrinking
from pytorch3d.ops import knn_points, ball_query
# close = ball_query(pcd_new, pcd_old, radius=radius, return_nn=False, K=K) # B, S, K
close = knn_points(pcd_new, pcd_old, return_sorted=False, return_nn=False, K=K) # B, S, K
dists, idx = close.dists, close.idx
B, S, C = pcd_new.shape
B, N, C = pcd_old.shape
ind_new = multi_gather(ind_old, idx.view(B, S * K)).view(B, S, K, -1) # B, S, K, C
weights = weight_function(dists, radius)[..., None] # B, S, K, 1
ind_new = (ind_new * weights).sum(dim=-2)
# ind_new = ind_new.mean(dim=-2)
return ind_new
def weight_function(d2: torch.Tensor, radius: float = 0.05, delta: float = 0.001):
# Radius weighted function from structured local radiance field
weights = (-d2 / (2 * radius ** 2)).exp().clip(0) # B, S, K
weights = normalize_sum(weights)
return weights
|
evocodebench_data_82
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager # must be imported before OpenGL.GL
from easyvolcap.runners.volumetric_video_viewer import VolumetricVideoViewer
import os
import sys
import glm
import torch
import ctypes
import numpy as np
from torch import nn
from enum import Enum, auto
from types import MethodType
from typing import Dict, Union, List
from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.viewer_utils import Camera
from easyvolcap.utils.bound_utils import get_bounds
from easyvolcap.utils.chunk_utils import multi_gather
from easyvolcap.utils.color_utils import cm_cpu_store
from easyvolcap.utils.ray_utils import create_meshgrid
from easyvolcap.utils.depth_utils import depth_curve_fn
from easyvolcap.utils.gaussian_utils import rgb2sh0, sh02rgb
from easyvolcap.utils.nerf_utils import volume_rendering, raw2alpha
from easyvolcap.utils.data_utils import load_pts, load_mesh, to_cuda, add_batch
from easyvolcap.utils.cuda_utils import CHECK_CUDART_ERROR, FORMAT_CUDART_ERROR
from easyvolcap.utils.net_utils import typed, torch_dtype_to_numpy_dtype, load_pretrained
from easyvolcap.utils.fcds_utils import prepare_feedback_transform, get_opencv_camera_params
# fmt: off
# Environment variable messaging
# Need to export EGL_DEVICE_ID before trying to import egl
# And we need to consider the case when we're performing distributed training
# from easyvolcap.engine import cfg, args # FIXME: GLOBAL IMPORTS
if 'easyvolcap.engine' in sys.modules and \
(sys.modules['easyvolcap.engine'].args.type != 'gui' or \
sys.modules['easyvolcap.engine'].cfg.viewer_cfg.type != 'VolumetricVideoViewer'): # FIXME: GLOBAL VARIABLES
try:
from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager
except Exception as e:
log(yellow(f'Could not import EGL related modules. {type(e).__name__}: {e}'))
os.environ['PYOPENGL_PLATFORM'] = ''
def is_wsl2():
"""Returns True if the current environment is WSL2, False otherwise."""
return exists("/etc/wsl.conf") and os.environ.get("WSL_DISTRO_NAME")
if is_wsl2():
os.environ['PYOPENGL_PLATFORM'] = 'glx'
import OpenGL.GL as gl
try:
from OpenGL.GL import shaders
except Exception as e:
print(f'WARNING: OpenGL shaders import error encountered, please install the latest PyOpenGL from github using:')
print(f'pip install git+https://github.com/mcfletch/pyopengl')
raise e
# fmt: on
def linearize_depth(d, n: float, f: float):
# 0-1 -> -1,1
# ndc -> view
return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n))
def common_opengl_options():
# Use program point size
gl.glEnable(gl.GL_PROGRAM_POINT_SIZE)
# Performs face culling
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_BACK)
# Performs alpha trans testing
# gl.glEnable(gl.GL_ALPHA_TEST)
try: gl.glEnable(gl.GL_ALPHA_TEST)
except gl.GLError as e: pass
# Performs z-buffer testing
gl.glEnable(gl.GL_DEPTH_TEST)
# gl.glDepthMask(gl.GL_TRUE)
gl.glDepthFunc(gl.GL_LEQUAL)
# gl.glDepthRange(-1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
# Enable some masking tests
gl.glEnable(gl.GL_SCISSOR_TEST)
# Enable this to correctly render points
# https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310
# gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW
try: gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW
except gl.GLError as e: pass
# gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW
# # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory.
# # The second argument specifies that our pixels will be in bytes.
# gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
def load_shader_source(file: str = 'splat.frag'):
# Ideally we can just specify the shader name instead of an variable
if not exists(file):
file = f'{dirname(__file__)}/shaders/{file}'
if not exists(file):
file = file.replace('shaders/', '')
if not exists(file):
raise RuntimeError(f'Shader file: {file} does not exist')
with open(file, 'r') as f:
return f.read()
def use_gl_program(program: Union[shaders.ShaderProgram, dict]):
if isinstance(program, dict):
# Recompile the program if the user supplied sources
program = dotdict(program)
program = shaders.compileProgram(
shaders.compileShader(program.VERT_SHADER_SRC, gl.GL_VERTEX_SHADER),
shaders.compileShader(program.FRAG_SHADER_SRC, gl.GL_FRAGMENT_SHADER)
)
return gl.glUseProgram(program)
class Mesh:
class RenderType(Enum):
POINTS = 1
LINES = 2
TRIS = 3
QUADS = 4 # TODO: Support quad loading
STRIPS = 5
# Helper class to render a mesh on opengl
# This implementation should only be used for debug visualization
# Since no differentiable mechanism will be added
# We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly
def __init__(self,
verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update
faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update
colors: torch.Tensor = None,
normals: torch.Tensor = None,
scalars: dotdict[str, torch.Tensor] = dotdict(),
render_type: RenderType = RenderType.TRIS,
# Misc info
name: str = 'mesh',
filename: str = '',
visible: bool = True,
# Render options
shade_flat: bool = False, # smooth shading
point_radius: float = 0.015,
render_normal: bool = False,
# Storage options
store_device: str = 'cpu',
compute_device: str = 'cuda',
vert_sizes=[3, 3, 3], # pos + color + norm
# Init options
est_normal_thresh: int = 100000,
# Ignore unused input
**kwargs,
) -> None:
super().__init__()
self.name = name
self.visible = visible
self.render_type = render_type
self.shade_flat = shade_flat
self.point_radius = point_radius
self.render_normal = render_normal
self.store_device = store_device
self.compute_device = compute_device
self.vert_sizes = vert_sizes
self.est_normal_thresh = est_normal_thresh
# Uniform and program
self.compile_shaders()
self.uniforms = dotdict() # uniform values
# Before initialization
self.max_verts = 0
self.max_faces = 0
# OpenGL data
if filename: self.load_from_file(filename)
else: self.load_from_data(verts, faces, colors, normals, scalars)
def compile_shaders(self):
try:
self.mesh_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER)
)
self.point_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
@property
def n_verts_bytes(self):
return len(self.verts) * self.vert_size * self.verts.element_size()
@property
def n_faces_bytes(self):
return len(self.faces) * self.face_size * self.faces.element_size()
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
@property
def faces_data(self): # a heavy copy operation
faces = self.faces.ravel().numpy() # N, 3
faces = np.asarray(faces, dtype=np.uint32, order='C')
return faces
@property
def face_size(self):
return self.render_type.value
@property
def vert_size(self):
return sum(self.vert_sizes)
def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'):
verts, faces, colors, normals, scalars = self.load_data_from_file(filename)
self.load_from_data(verts, faces, colors, normals, scalars)
def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'):
self.name = os.path.split(filename)[-1]
verts, faces, colors, normals, scalars = None, None, None, None, None
verts, faces = load_mesh(filename, device=self.store_device)
if not len(faces):
verts, colors, normals, scalars = load_pts(filename)
self.render_type = Mesh.RenderType.POINTS
else:
self.render_type = Mesh.RenderType(faces.shape[-1]) # use value
return verts, faces, colors, normals, scalars
def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()):
# Data type conversion
verts = torch.as_tensor(verts) # convert to tensor if input is of other types
if verts.dtype == torch.float32:
pass # supports this for now
elif verts.dtype == torch.float16:
pass # supports this for now
else:
verts = verts.type(torch.float) # convert to float32 if input is of higher precision
gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT
self.vert_gl_types = [gl_dtype] * len(self.vert_sizes)
# Prepare main mesh data: vertices and faces
self.verts = torch.as_tensor(verts, device=self.store_device)
self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support
# Prepare colors and normals
if colors is not None:
self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype)
else:
bounds = get_bounds(self.verts[None])[0]
self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0])
if normals is not None:
self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype)
else:
self.estimate_vertex_normals()
# Prepare other scalars
if scalars is not None:
for k, v in scalars.items():
setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok?
# Prepare OpenGL related buffer
self.update_gl_buffers()
def estimate_vertex_normals(self):
def est_pcd_norms():
if self.verts.dtype == torch.half:
self.normals = self.verts
else:
from pytorch3d.structures import Pointclouds, Meshes
pcd = Pointclouds([self.verts]).to(self.compute_device)
self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim
def est_tri_norms():
if self.verts.dtype == torch.half:
self.normals = self.verts
else:
from pytorch3d.structures import Pointclouds, Meshes
mesh = Meshes([self.verts], [self.faces]).to(self.compute_device)
self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim
if not len(self.verts) > self.est_normal_thresh:
if self.render_type == Mesh.RenderType.TRIS: est_tri_norms()
elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms()
else:
# log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping'))
self.normals = self.verts
else:
# log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation'))
self.normals = self.verts
def offscreen_render(self, eglctx: "eglContextManager", camera: Camera):
eglctx.resize(camera.W, camera.H)
self.render(camera)
def render(self, camera: Camera):
if not self.visible: return
# For point rendering
if self.render_type == Mesh.RenderType.POINTS:
gl.glUseProgram(self.point_program)
self.use_gl_program(self.point_program)
else:
gl.glUseProgram(self.mesh_program)
self.use_gl_program(self.mesh_program)
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
if self.render_type == Mesh.RenderType.POINTS:
gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices
elif self.render_type == Mesh.RenderType.LINES:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.TRIS:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.QUADS:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices
elif self.render_type == Mesh.RenderType.STRIPS:
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))
else:
raise NotImplementedError
gl.glBindVertexArray(0)
def use_gl_program(self, program: shaders.ShaderProgram):
use_gl_program(program)
self.uniforms.shade_flat = gl.glGetUniformLocation(program, "shade_flat")
self.uniforms.point_radius = gl.glGetUniformLocation(program, "point_radius")
self.uniforms.render_normal = gl.glGetUniformLocation(program, "render_normal")
self.uniforms.H = gl.glGetUniformLocation(program, "H")
self.uniforms.W = gl.glGetUniformLocation(program, "W")
self.uniforms.n = gl.glGetUniformLocation(program, "n")
self.uniforms.f = gl.glGetUniformLocation(program, "f")
self.uniforms.P = gl.glGetUniformLocation(program, "P")
self.uniforms.K = gl.glGetUniformLocation(program, "K")
self.uniforms.V = gl.glGetUniformLocation(program, "V")
self.uniforms.M = gl.glGetUniformLocation(program, "M")
def upload_gl_uniforms(self, camera: Camera):
K = camera.gl_ixt # hold the reference
V = camera.gl_ext # hold the reference
M = glm.identity(mat4)
P = K * V * M
gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat)
gl.glUniform1f(self.uniforms.point_radius, self.point_radius)
gl.glUniform1i(self.uniforms.render_normal, self.render_normal)
gl.glUniform1i(self.uniforms.H, camera.H) # o2w
gl.glUniform1i(self.uniforms.W, camera.W) # o2w
gl.glUniform1f(self.uniforms.n, camera.n) # o2w
gl.glUniform1f(self.uniforms.f, camera.f) # o2w
gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip
gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip
gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c
gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w
def update_gl_buffers(self):
# Might be overwritten
self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0,
len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated
if hasattr(self, 'verts'):
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)
gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference
if hasattr(self, 'faces'):
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data)
def resize_buffers(self, v: int = 0, f: int = 0):
if v > self.max_verts or f > self.max_faces:
if v > self.max_verts: self.max_verts = v
if f > self.max_faces: self.max_faces = f
self.init_gl_buffers(v, f)
def init_gl_buffers(self, v: int = 0, f: int = 0):
# This will only init the corresponding buffer object
n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes
n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes
# Housekeeping
if hasattr(self, 'vao'):
gl.glDeleteVertexArrays(1, [self.vao])
gl.glDeleteBuffers(2, [self.vbo, self.ebo])
self.vao = gl.glGenVertexArrays(1)
self.vbo = gl.glGenBuffers(1)
self.ebo = gl.glGenBuffers(1)
gl.glBindVertexArray(self.vao)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)
gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work
# https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao
cumsum = 0
for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)):
gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float
gl.glEnableVertexAttribArray(i)
cumsum += s
if n_faces_bytes > 0:
# Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)
gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW)
gl.glBindVertexArray(0)
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import push_button_color, pop_button_color
i = batch.i
will_delete = batch.will_delete
slider_width = batch.slider_width
imgui.push_item_width(slider_width * 0.5)
mesh.name = imgui.input_text(f'Mesh name##{i}', mesh.name)[1]
if imgui.begin_combo(f'Mesh type##{i}', mesh.render_type.name):
for t in Mesh.RenderType:
if imgui.selectable(t.name, mesh.render_type == t)[1]:
mesh.render_type = t # construct enum from name
if mesh.render_type == t:
imgui.set_item_default_focus()
imgui.end_combo()
imgui.pop_item_width()
if hasattr(mesh, 'point_radius'):
mesh.point_radius = imgui.slider_float(f'Point radius##{i}', mesh.point_radius, 0.0005, 3.0)[1] # 0.1mm
if hasattr(mesh, 'pts_per_pix'):
mesh.pts_per_pix = imgui.slider_int('Point per pixel', mesh.pts_per_pix, 0, 60)[1] # 0.1mm
if hasattr(mesh, 'shade_flat'):
push_button_color(0x55cc33ff if not mesh.shade_flat else 0x8855aaff)
if imgui.button(f'Smooth##{i}' if not mesh.shade_flat else f' Flat ##{i}'):
mesh.shade_flat = not mesh.shade_flat
pop_button_color()
if hasattr(mesh, 'render_normal'):
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.render_normal else 0x8855aaff)
if imgui.button(f'Color ##{i}' if not mesh.render_normal else f'Normal##{i}'):
mesh.render_normal = not mesh.render_normal
pop_button_color()
if hasattr(mesh, 'visible'):
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.visible else 0x8855aaff)
if imgui.button(f'Show##{i}' if not mesh.visible else f'Hide##{i}'):
mesh.visible = not mesh.visible
pop_button_color()
# Render the delete button
imgui.same_line()
push_button_color(0xff5533ff)
if imgui.button(f'Delete##{i}'):
will_delete.append(i)
pop_button_color()
class Quad(Mesh):
# A shared texture for CUDA (pytorch) and OpenGL
# Could be rendererd to screen using blitting or just drawing a quad
def __init__(self,
H: int = 256, W: int = 256,
use_quad_draw: bool = True,
use_quad_cuda: bool = True,
compose: bool = False,
compose_power: float = 1.0,
): # the texture to blip
self.use_quad_draw = use_quad_draw
self.use_quad_cuda = use_quad_cuda
self.vert_sizes = [3] # only position
self.vert_gl_types = [gl.GL_FLOAT] # only position
self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type
self.max_verts, self.max_faces = 0, 0
self.verts = torch.as_tensor([[-1., -1., 0.5],
[1., -1., 0.5],
[-1., 1., 0.5],
[1., 1., 0.5],])
self.update_gl_buffers()
self.compile_shaders()
self.max_H, self.max_W = H, W
self.H, self.W = H, W
self.compose = compose
self.compose_power = compose_power
self.init_texture()
@property
def n_faces_bytes(self): return 0
def use_gl_program(self, program: shaders.ShaderProgram):
super().use_gl_program(program)
self.uniforms.tex = gl.glGetUniformLocation(program, 'tex')
gl.glUseProgram(self.quad_program) # use a different program
gl.glUniform1i(self.uniforms.tex, 0)
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers
self.H, self.W = H, W
if self.H > self.max_H or self.W > self.max_W: # max got updated
self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)
self.init_texture()
def init_texture(self):
if hasattr(self, 'cu_tex'):
from cuda import cudart
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex))
if hasattr(self, 'fbo'):
gl.glDeleteFramebuffers(1, [self.fbo])
gl.glDeleteTextures(1, [self.tex])
# Init the texture to be blit onto the screen
self.tex = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0))
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Init the framebuffer object if explicit blitting is used (slower than drawing quad)
self.fbo = gl.glGenFramebuffers(1)
old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo)
if self.use_quad_cuda:
from cuda import cudart
if self.compose:
# Both reading and writing of this resource is required
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone
else:
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
try:
self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags))
except RuntimeError as e:
log(red('Failed to initialize Quad with CUDA-GL interop, will use slow upload: '), e)
self.use_quad_cuda = False
def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
if not self.use_quad_cuda:
self.upload_to_texture(image)
return
if not hasattr(self, 'cu_tex'):
self.init_texture()
# assert self.use_quad_cuda, "Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad"
w = w or self.W
h = h or self.H
if image.shape[-1] == 3:
image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0))
if self.compose:
"""
Blit current framebuffer to this texture (self.tex)
Read content of this texture into a cuda buffer
Perform alpha blending based on the frame's alpha channel
Copy the blended image back into the texture (self.tex)
"""
old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0
gl.glBlitFramebuffer(x, y, w, h,
x, y, w, h,
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old)
buffer = torch.empty_like(image)
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst
w * 4 * buffer.element_size(), # dpitch
cu_tex_arr, # src
x * 4 * image.element_size(), # wOffset
y, # hOffset
w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes)
h, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
# cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]])
alpha = image[..., -1:] / 255
image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int
image[..., -1:] = buffer[..., -1:] + image[..., -1:]
image = image.clip(0, 255)
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr,
x * 4 * image.element_size(),
y,
image.data_ptr(),
w * 4 * image.element_size(), # differently sized
w * 4 * image.element_size(), # rgba, should do a composition first
h,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))
def upload_to_texture(self, ptr: np.ndarray, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
w = w or self.W
h = h or self.H
if isinstance(ptr, torch.Tensor):
ptr = ptr.detach().cpu().numpy() # slow sync and copy operation # MARK: SYNC
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, x, y, w, h, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[y:h, x:w]) # to gpu, might slow down?
@property
def verts_data(self): # a heavy copy operation
verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
def render(self, camera: Camera = None):
self.draw() # no uploading needed
def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
"""
Upload the texture instead of the camera
This respects the OpenGL convension of lower left corners
"""
if not self.use_quad_draw:
self.blit(x, y, w, h)
return
w = w or self.W
h = h or self.H
_, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(x, y, w, h)
gl.glScissor(x, y, w, h) # only render in this small region of the viewport
gl.glUseProgram(self.quad_program) # use a different program
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)
gl.glBindVertexArray(self.vao)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))
gl.glBindVertexArray(0)
# Some house keepings
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H)
def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):
"""
This respects the OpenGL convension of lower left corners
"""
w = w or self.W
h = h or self.H
old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0
gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped
x, y, x + w, y + h, # the height is flipped
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old)
class UQuad(Mesh):
"""
Responsible for initializing textures with a single value
or blitting a texture to a framebuffer (possibly better done with blit instead of quad drawing)
Effectively clearing the texture for real, see: https://stackoverflow.com/questions/37335281/is-glcleargl-color-buffer-bit-preferred-before-a-whole-frame-buffer-overwritte
"""
def __init__(self):
self.n_blit_values = 3
self.vert_sizes = [3] # only position
self.vert_gl_types = [gl.GL_FLOAT] # only position
self.max_verts, self.max_faces = 0, 0
self.verts = torch.as_tensor([[-1., -1., 0.5],
[1., -1., 0.5],
[-1., 1., 0.5],
[1., 1., 0.5],])
self.compile_shaders()
self.uniforms = dotdict() # uniform values
self.use_gl_programs(self.quad_program)
self.update_gl_buffers()
@property
def n_faces_bytes(self): return 0
@property
def verts_data(self): # a heavy copy operation
verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C')
return verts
def use_gl_programs(self, program: shaders.ShaderProgram):
for i in range(self.n_blit_values):
self.uniforms[f'value{i}'] = gl.glGetUniformLocation(program, f'value{i}')
for i in range(self.n_blit_values):
self.uniforms[f'use_tex{i}'] = gl.glGetUniformLocation(program, f'use_tex{i}')
gl.glUseProgram(self.program) # use a different program
for i in range(self.n_blit_values):
self.uniforms[f'tex{i}'] = gl.glGetUniformLocation(program, f'tex{i}')
gl.glUniform1i(self.uniforms[f'tex{i}'], i)
def upload_gl_uniforms(self, values: List[List[float]], use_texs: List[bool]):
for i, v in enumerate(values):
v = vec4(v) # HACK: Hold the reference for this upload
gl.glUniform4fv(self.uniforms[f'value{i}'], 1, glm.value_ptr(v)) # as float array
for i, v in enumerate(use_texs):
gl.glUniform1i(self.uniforms[f'use_tex{i}'], v)
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('uquad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('uquad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def draw(self, values: List[List[float]] = [], use_texs=[]):
"""
This function will render 'value' to the currently bound framebuffer, up to six outputs
"""
old_prog = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM)
old_vao = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING)
gl.glUseProgram(self.quad_program)
self.upload_gl_uniforms(values, use_texs) # should be a noop
# Prepare to render to textures
gl.glBindVertexArray(self.vao)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) # number of vertices
gl.glBindVertexArray(old_vao)
gl.glUseProgram(old_prog)
class DQuad(UQuad):
def compile_shaders(self):
try:
self.quad_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('dquad.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('dquad.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def draw(self, values: List[List[float]] = [], use_texs=[]):
old_function = gl.glGetIntegerv(gl.GL_DEPTH_FUNC)
gl.glDepthFunc(gl.GL_ALWAYS)
super().draw(values, use_texs)
gl.glDepthFunc(old_function)
def hardware_rendering_framebuffer(H: int, W: int, gl_tex_dtype=gl.GL_RGBA16F):
# Prepare for write frame buffers
color_buffer = gl.glGenTextures(1)
depth_upper = gl.glGenTextures(1)
depth_lower = gl.glGenTextures(1)
depth_attach = gl.glGenTextures(1)
fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb
# Init the texture (call the resizing function), will simply allocate empty memory
# The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter).
gl.glBindTexture(gl.GL_TEXTURE_2D, color_buffer)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_tex_dtype, W, H, 0, gl.GL_RGBA, gl.GL_FLOAT, ctypes.c_void_p(0)) # 16 * 4
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_upper)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Bind texture to fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, color_buffer, 0) # location 0
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_upper, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT2, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0)
gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2])
# Check framebuffer status
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
log(red('Framebuffer not complete, exiting...'))
raise RuntimeError('Incomplete framebuffer')
# Restore the original state
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
return color_buffer, depth_upper, depth_lower, depth_attach, fbo
def hareward_peeling_framebuffer(H: int, W: int):
# Prepare for write frame buffers
index_buffer = gl.glGenTextures(1)
depth_lower = gl.glGenTextures(1)
depth_attach = gl.glGenTextures(1)
fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb
# Init the texture (call the resizing function), will simply allocate empty memory
# The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter).
gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
# Bind texture to fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0)
gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1])
# Check framebuffer status
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
log(red('Framebuffer not complete, exiting...'))
raise RuntimeError('Incomplete framebuffer')
# Restore the original state
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
return index_buffer, depth_lower, depth_attach, fbo
class Gaussian(Mesh):
def __init__(self,
filename: str = 'assets/meshes/zju3dv.npz',
gaussian_cfg: dotdict = dotdict(),
quad_cfg: dotdict = dotdict(),
view_depth: bool = False, # show depth or show color
dpt_cm: str = 'linear',
H: int = 1024,
W: int = 1024,
**kwargs,
):
# Import Gaussian Model
from easyvolcap.engine.registry import call_from_cfg
from easyvolcap.utils.gaussian_utils import GaussianModel
# Housekeeping
super().__init__(**kwargs)
self.name = split(filename)[-1]
# Init Gaussian related models, for now only the first gaussian model is supported
if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'):
# Load from GaussianTSampler
pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe)
pretrained = pretrained.model
state_dict = dotdict()
for k, v in pretrained.items():
if k.startswith('sampler.pcds.0'):
state_dict[k.replace('sampler.pcds.0.', '')] = v
# Load the parameters into the gaussian model
self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model
self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model
self.gaussian_model.cuda() # move the parameters to GPU
elif filename.endswith('.ply'):
# Load raw GaussianModel
# pts, rgb, norm, scalars = load_pts(filename)
self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model
self.gaussian_model.load_ply(filename) # load the original gaussian model
self.gaussian_model.cuda()
else:
raise NotImplementedError
# Init rendering quad
self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W)
# Other configurations
self.view_depth = view_depth
self.dpt_cm = dpt_cm
del self.shade_flat
del self.point_radius
del self.render_normal
# Disabling initialization
def load_from_file(self, *args, **kwargs):
pass
def load_from_data(self, *args, **kwargs):
pass
def compile_shaders(self):
pass
def update_gl_buffers(self):
pass
def resize_textures(self, H: int, W: int):
self.quad.resize_textures(H, W)
# The actual rendering function
@torch.no_grad()
def render(self, camera: Camera):
# Perform actual gaussian rendering
batch = add_batch(to_cuda(camera.to_batch()))
rgb, acc, dpt = self.gaussian_model.render(batch)
if self.view_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
else:
rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4
# Copy rendered tensor to screen
rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform
self.quad.copy_to_texture(rgba)
self.quad.render()
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
super().render_imgui(viewer, batch)
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import push_button_color, pop_button_color
i = batch.i
imgui.same_line()
push_button_color(0x55cc33ff if not mesh.view_depth else 0x8855aaff)
if imgui.button(f'Color##{i}' if not mesh.view_depth else f' Depth ##{i}'):
mesh.view_depth = not mesh.view_depth
pop_button_color()
class PointSplat(Gaussian, nn.Module):
def __init__(self,
filename: str = 'assets/meshes/zju3dv.ply',
quad_cfg: dotdict = dotdict(),
view_depth: bool = False, # show depth or show color
dpt_cm: str = 'linear',
H: int = 1024,
W: int = 1024,
**kwargs,
):
# Import Gaussian Model
from easyvolcap.engine.registry import call_from_cfg
from easyvolcap.utils.data_utils import load_pts
from easyvolcap.utils.net_utils import make_buffer
from easyvolcap.models.samplers.gaussiant_sampler import GaussianTSampler
# Housekeeping
super(Gaussian, self).__init__(**kwargs)
self.name = split(filename)[-1]
self.render_radius = MethodType(GaussianTSampler.render_radius, self) # override the method
# Init PointSplat related models, for now only the first gaussian model is supported
if filename.endswith('.ply'):
# Load raw GaussianModel
pts, rgb, norms, scalars = load_pts(filename)
occ, rad = scalars.alpha, scalars.radius
self.pts = make_buffer(torch.from_numpy(pts)) # N, 3
self.rgb = make_buffer(torch.from_numpy(rgb)) # N, 3
self.occ = make_buffer(torch.from_numpy(occ)) # N, 1
self.rad = make_buffer(torch.from_numpy(rad)) # N, 1
else:
raise NotImplementedError
# Init rendering quad
self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W)
self.cuda() # move to cuda
# Other configurations
self.view_depth = view_depth
self.dpt_cm = dpt_cm
self.radius_mult = 1.0
self.alpha_mult = 1.0
# The actual rendering function
@torch.no_grad()
def render(self, camera: Camera):
# Perform actual gaussian rendering
batch = add_batch(to_cuda(camera.to_batch()))
sh0 = rgb2sh0(self.rgb[..., None])
xyz = self.pts
occ = (self.occ * self.alpha_mult).clip(0, 1)
rad = self.rad * self.radius_mult
rgb, acc, dpt = self.render_radius(*add_batch([xyz, sh0, rad, occ]), batch)
rgb, acc, dpt = rgb[0], acc[0], dpt[0]
if self.view_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
else:
rgba = torch.cat([rgb, acc], dim=-1) # H, W, 4
# Copy rendered tensor to screen
rgba = (rgba.clip(0, 1) * 255).type(torch.uint8).flip(0) # transform
self.quad.copy_to_texture(rgba)
self.quad.render()
def render_imgui(mesh, viewer: 'VolumetricVideoViewer', batch: dotdict):
super().render_imgui(viewer, batch)
i = batch.i
from imgui_bundle import imgui
mesh.radius_mult = imgui.slider_float(f'Point radius multiplier##{i}', mesh.radius_mult, 0.1, 3.0)[1] # 0.1mm
mesh.alpha_mult = imgui.slider_float(f'Point alpha multiplier##{i}', mesh.alpha_mult, 0.1, 3.0)[1] # 0.1mm
class Splat(Mesh): # FIXME: Not rendering, need to debug this
def __init__(self,
*args,
H: int = 512,
W: int = 512,
tex_dtype: str = torch.half,
pts_per_pix: int = 24, # render less for the static background since we're only doing a demo
blit_last_ratio: float = 0.0,
volume_rendering: bool = True,
radii_mult_volume: float = 1.00, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better
radii_mult_solid: float = 0.85, # 2 / 3 is the right integration, but will leave holes, 1.0 will make it bloat, 0.85 looks visually better
point_smooth: bool = True,
alpha_blending: bool = True,
**kwargs):
kwargs = dotdict(kwargs)
kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1])
self.tex_dtype = getattr(torch, tex_dtype) if isinstance(tex_dtype, str) else tex_dtype
self.gl_tex_dtype = gl.GL_RGBA16F if self.tex_dtype == torch.half else gl.GL_RGBA32F
super().__init__(*args, **kwargs)
self.use_gl_program(self.splat_program)
self.pts_per_pix = pts_per_pix
self.blit_last_ratio = blit_last_ratio
self.volume_rendering = volume_rendering
self.radii_mult_volume = radii_mult_volume
self.radii_mult_solid = radii_mult_solid
self.point_smooth = point_smooth
self.alpha_blending = alpha_blending
self.max_H, self.max_W = H, W
self.H, self.W = H, W
self.init_textures()
from easyvolcap.models.samplers.gaussiant_sampler import GaussianTSampler
self.render_radius = MethodType(GaussianTSampler.render_radius, self) # override the method
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=np.float32, order='C') # this should only be invoked once
return verts
def use_gl_program(self, program: shaders.ShaderProgram):
super().use_gl_program(program)
# Special controlling variables
self.uniforms.alpha_blending = gl.glGetUniformLocation(program, f'alpha_blending')
self.uniforms.point_smooth = gl.glGetUniformLocation(program, f'point_smooth')
self.uniforms.radii_mult = gl.glGetUniformLocation(program, f'radii_mult')
# Special rendering variables
self.uniforms.pass_index = gl.glGetUniformLocation(program, f'pass_index')
self.uniforms.read_color = gl.glGetUniformLocation(program, f'read_color')
self.uniforms.read_upper = gl.glGetUniformLocation(program, f'read_upper')
self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower')
gl.glUniform1i(self.uniforms.read_color, 0)
gl.glUniform1i(self.uniforms.read_upper, 1)
gl.glUniform1i(self.uniforms.read_lower, 2)
def compile_shaders(self):
try:
self.splat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('splat.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('splat.frag'), gl.GL_FRAGMENT_SHADER)
)
self.usplat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('usplat.vert'), gl.GL_VERTEX_SHADER),
shaders.compileShader(load_shader_source('usplat.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def rasterize(self, camera: Camera = None, length: int = None):
if self.volume_rendering:
return self.rasterize_volume(camera, length)
else:
return self.rasterize_solid(camera, length)
def rasterize_volume(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera
"""
Let's try to analyze what's happening here
We want to:
1. Render the front-most color to color buffer
2. UNUSED: Render the front-most depth + some large margin to a depth upper limit buffer
3. Render the front-most depth + some small margin to a depth lower limit buffer
4. Switch between the render target and sampling target
5. Use the previous rendered color, depth upper limit and lower limit as textures
6. When current depth is smaller than the lower limit, we've already rendered this in the first pass, discard
7. UNUSED: When current depth is larger than the upper limit, it will probabily not contribute much to final results, discard
8. UNUSED: When the accumulated opacity reaches almost 1, subsequent rendering would not have much effect, return directly
9. When the point coordinates falls out of bound of the current sphere, dicard (this could be optimized with finutining in rectangle)
10. Finally, try to render the final color using the volume rendering equation (by accumulating alpha values from front to back)
Required cleanup checklist:
1. Before rendering the first pass, we need to clear the color and depth texture, this is not done, need to check multi-frame accumulation on this
2. Before rendering next pass, it's also recommended to blit color and depth values from previous pass to avoid assign them in the shader
"""
front_fbo, front_color, front_upper, front_lower = self.read_fbo, self.read_color, self.read_upper, self.read_lower
back_fbo, back_color, back_upper, back_lower = self.write_fbo, self.write_color, self.write_upper, self.write_lower
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0])
# gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9])
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0])
# gl.glClearBufferfv(gl.GL_COLOR, 1, [1e9])
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.splat_program) # TODO: Implement this with a mapping and a lazy modification
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
# The actual multi pass rendering process happens here
for pass_index in range(self.pts_per_pix):
# Swap buffers to render the next pass
front_fbo, front_color, front_upper, front_lower, back_fbo, back_color, back_upper, back_lower = \
back_fbo, back_color, back_upper, back_lower, front_fbo, front_color, front_upper, front_lower
# Bind the read texture and bind the write render frame buffer
gl.glBindTextures(0, 3, [front_color, front_upper, front_lower])
# Move content from write_fbo to screen fbo
if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo)
for i in range(3):
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + i)
gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + i)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2])
# Clear depth buffer for depth testing
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
gl.glUniform1i(self.uniforms.pass_index, pass_index) # pass index
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return back_fbo
def upload_gl_uniforms(self, camera: Camera):
super().upload_gl_uniforms(camera)
gl.glUniform1i(self.uniforms.point_smooth, self.point_smooth)
gl.glUniform1i(self.uniforms.alpha_blending, self.alpha_blending)
if self.volume_rendering:
gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_volume) # radii mult
else:
gl.glUniform1f(self.uniforms.radii_mult, self.radii_mult_solid) # radii mult
def rasterize_solid(self, camera: Camera = None, length: int = None):
# Only clear the output once
back_fbo = self.write_fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferfv(gl.GL_COLOR, 0, [0.0, 0.0, 0.0, 0.0]) # color
# gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0]) # depth upper
gl.glClearBufferfv(gl.GL_COLOR, 2, [0.0, 0.0, 0.0, 0.0]) # depth lower
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.usplat_program)
self.upload_gl_uniforms(camera)
gl.glUniform1i(self.uniforms.pass_index, 0) # pass index
gl.glBindVertexArray(self.vao)
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return back_fbo
def show(self, back_fbo: int):
# Move content from write_fbo to screen fbo
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, back_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, 0) # render the final content onto screen
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
def render(self, camera):
if not self.visible: return
self.show(self.rasterize(camera))
def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers
self.H, self.W = H, W
if self.H > self.max_H or self.W > self.max_W: # max got updated
self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)
self.init_textures()
def init_textures(self):
if hasattr(self, 'write_fbo'):
gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo])
gl.glDeleteTextures(8, [self.write_color, self.write_upper, self.write_lower, self.write_attach, self.read_color, self.read_upper, self.read_lower, self.read_attach])
self.write_color, self.write_upper, self.write_lower, self.write_attach, self.write_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype)
self.read_color, self.read_upper, self.read_lower, self.read_attach, self.read_fbo = hardware_rendering_framebuffer(self.max_H, self.max_W, self.gl_tex_dtype)
log(f'Created texture of h, w: {self.max_H}, {self.max_W}')
class HardwareRendering(Splat):
def __init__(self,
dtype=torch.half,
**kwargs,
):
self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype
self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT
kwargs = dotdict(kwargs)
kwargs.blit_last_ratio = kwargs.get('blit_last_ratio', 0.90)
kwargs.vert_sizes = kwargs.get('vert_sizes', [3, 3, 1, 1])
super().__init__(**kwargs) # verts, color, radius, alpha
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.colors, self.radius, self.alpha], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once
return verts
def init_gl_buffers(self, v: int = 0, f: int = 0):
from cuda import cudart
if hasattr(self, 'cu_vbo'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo))
super().init_gl_buffers(v, f)
# Register vertex buffer obejct
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
try:
self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags))
except RuntimeError as e:
log(red(f'Your system does not support CUDA-GL interop, please use pytorch3d\'s implementation instead'))
log(red(f'This can be done by specifying {blue("model_cfg.sampler_cfg.use_cudagl=False model_cfg.sampler_cfg.use_diffgl=False")} at the end of your command'))
log(red(f'Note that this implementation is extremely slow, we recommend running on a native system that support the interop'))
# raise RuntimeError(str(e) + ": This unrecoverable, please read the error message above")
raise e
def init_textures(self):
from cuda import cudart
if hasattr(self, 'cu_read_color'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_color))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_color))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower))
super().init_textures()
# Register image to read from
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly
self.cu_read_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_color, gl.GL_TEXTURE_2D, flags))
self.cu_write_color = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_color, gl.GL_TEXTURE_2D, flags))
self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags))
self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags))
def forward(self, xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor, batch: dotdict):
"""
Renders a 3D point cloud using OpenGL and returns the rendered RGB image, accumulated alpha image, and depth map.
Args:
xyz (torch.Tensor): A tensor of shape (B, N, 3) containing the 3D coordinates of the points.
rgb (torch.Tensor): A tensor of shape (B, N, 3) containing the RGB color values of the points.
rad (torch.Tensor): A tensor of shape (B, N, 1) containing the radii of the points.
batch (dotdict): A dictionary containing the camera parameters and other metadata for the batch.
Returns:
A tuple containing the rendered RGB image, accumulated alpha image, and depth map, all as torch.Tensors.
The RGB image has shape (1, H, W, 3), the alpha image has shape (1, H, W, 1), and the depth map has shape (1, H, W, 1).
The method first resizes the OpenGL texture to match the height and width of the output image. It then sets the OpenGL viewport and scissor to only render in the region of the viewport specified by the output image size.
It concatenates the `xyz`, `rgb`, and `rad` tensors along the last dimension and flattens the result into a 1D tensor.
The method then uploads the input data to OpenGL for rendering and performs depth peeling using OpenGL. The method uploads the camera parameters to OpenGL and renders the point cloud, saving the output buffer to the `back_fbo` attribute of the class.
Finally, the method copies the rendered image and depth back to the CPU as torch.Tensors and reshapes them to match the output image size. The RGB image is returned with shape (1, H, W, 3), the accumulated alpha image is returned with shape (1, H, W, 1), and the depth map is returned with shape (1, H, W, 1).
"""
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
# !: BATCH
H, W = batch.meta.H[0].item(), batch.meta.W[0].item()
self.resize_textures(H, W) # maybe resize the texture
self.resize_buffers(xyz.shape[1]) # maybe resize the buffer
_, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H) # only render in this small region of the viewport
# Prepare for input data
data = torch.cat([xyz, rgb, rad, occ], dim=-1).type(self.dtype).ravel()
# Upload to opengl for rendering
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo))
assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side'
CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr,
data.data_ptr(),
data.numel() * data.element_size(),
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
# Perform rasterization (depth peeling using OpenGL)
if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish
back_fbo = self.rasterize(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo
# Copy rendered image and depth back as tensor
cu_tex = self.cu_write_color if back_fbo == self.write_fbo else self.cu_read_color # double buffered depth peeling
cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling
# Prepare the output # !: BATCH
rgb_map = torch.empty((H, W, 4), dtype=self.tex_dtype, device='cuda') # to hold the data from opengl
dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl
# The resources in resources may be accessed by CUDA until they are unmapped.
# The graphics API from which resources were registered should not access any resources while they are mapped by CUDA.
# If an application does so, the results are undefined.
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0))
cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0))
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(rgb_map.data_ptr(), # dst
W * 4 * rgb_map.element_size(), # dpitch
cu_tex_arr, # src
0, # wOffset
0, # hOffset
W * 4 * rgb_map.element_size(), # width Width of matrix transfer (columns in bytes)
H, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(),
W * 1 * dpt_map.element_size(),
cu_dpt_arr,
0,
0,
W * 1 * dpt_map.element_size(),
H,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
# Ouput reshaping
rgb_map, dpt_map = rgb_map[None].flip(1), dpt_map[None].flip(1)
rgb_map, acc_map = rgb_map[..., :3], rgb_map[..., 3:]
dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map)
# Some house keepings
gl.glViewport(0, 0, old_W, old_H)
gl.glScissor(0, 0, old_W, old_H)
return rgb_map, acc_map, dpt_map
class HardwarePeeling(Splat):
def __init__(self,
dtype=torch.float,
**kwargs):
self.dtype = getattr(torch, dtype) if isinstance(dtype, str) else dtype
self.gl_dtype = gl.GL_HALF_FLOAT if self.dtype == torch.half else gl.GL_FLOAT
super().__init__(**kwargs,
blit_last_ratio=-10.0,
vert_sizes=[3, 1],
) # verts, radius, index
# from pytorch3d.renderer import AlphaCompositor
# self.compositor = AlphaCompositor() # this the key to convergence, this is differentiable
@property
def verts_data(self): # a heavy copy operation
verts = torch.cat([self.verts, self.radius], dim=-1).ravel().numpy() # MARK: Maybe sync
verts = np.asarray(verts, dtype=torch_dtype_to_numpy_dtype(self.dtype), order='C') # this should only be invoked once
return verts
def init_gl_buffers(self, v: int = 0, f: int = 0):
from cuda import cudart
if hasattr(self, 'cu_vbo'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_vbo))
super().init_gl_buffers(v, f)
# Register vertex buffer obejct
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard
self.cu_vbo = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterBuffer(self.vbo, flags))\
def use_gl_program(self, program):
super().use_gl_program(program)
gl.glUseProgram(self.splat_program) # use a different program
self.uniforms.read_index = gl.glGetUniformLocation(program, f'read_index')
self.uniforms.read_lower = gl.glGetUniformLocation(program, f'read_lower')
gl.glUniform1i(self.uniforms.read_index, 0)
gl.glUniform1i(self.uniforms.read_lower, 1)
def upload_gl_uniforms(self, camera: Camera):
super().upload_gl_uniforms(camera)
def compile_shaders(self):
try:
self.splat_program = shaders.compileProgram(
shaders.compileShader(load_shader_source('idx_splat.vert'), gl.GL_VERTEX_SHADER), # use the pass through quad shader
shaders.compileShader(load_shader_source('idx_splat.frag'), gl.GL_FRAGMENT_SHADER)
)
except Exception as e:
print(str(e).encode('utf-8').decode('unicode_escape'))
raise e
def init_textures(self):
from cuda import cudart
if hasattr(self, 'cu_read_index'):
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_index))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_index))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_read_lower))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_write_lower))
if hasattr(self, 'write_fbo'):
gl.glDeleteFramebuffers(2, [self.write_fbo, self.read_fbo])
gl.glDeleteTextures(6, [self.write_index, self.write_lower, self.write_attach, self.read_index, self.read_lower, self.read_attach])
self.write_index, self.write_lower, self.write_attach, self.write_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W)
self.read_index, self.read_lower, self.read_attach, self.read_fbo = hareward_peeling_framebuffer(self.max_H, self.max_W)
# Register image to read from
flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly
self.cu_read_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_index, gl.GL_TEXTURE_2D, flags))
self.cu_write_index = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_index, gl.GL_TEXTURE_2D, flags))
self.cu_read_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.read_lower, gl.GL_TEXTURE_2D, flags))
self.cu_write_lower = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.write_lower, gl.GL_TEXTURE_2D, flags))
log(f'Created texture of h, w: {self.max_H}, {self.max_W}')
def rasterize_generator(self, camera: Camera = None, length: int = None): # some implementation requires no uploading of camera
front_fbo, front_index, front_lower = self.read_fbo, self.read_index, self.read_lower
back_fbo, back_index, back_lower = self.write_fbo, self.write_index, self.write_lower
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, front_fbo) # for offscreen rendering to textures
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1])
gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1])
gl.glClearBufferfv(gl.GL_COLOR, 1, [0.0])
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# Prepare for the actual rendering, previous operations could rebind the vertex array
self.use_gl_program(self.splat_program)
self.upload_gl_uniforms(camera)
gl.glBindVertexArray(self.vao)
# The actual multi pass rendering process happens here
for pass_index in range(self.pts_per_pix):
# Swap buffers to render the next pass
front_fbo, front_index, front_lower, back_fbo, back_index, back_lower = \
back_fbo, back_index, back_lower, front_fbo, front_index, front_lower
# Bind the read texture and bind the write render frame buffer
gl.glBindTextures(0, 2, [front_index, front_lower])
# Move content from write_fbo to screen fbo
if pass_index > self.pts_per_pix * self.blit_last_ratio: # no blitting almost has no effect on the rendering
gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, front_fbo)
gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, back_fbo)
gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0 + 1)
gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0 + 1)
gl.glBlitFramebuffer(0, 0, self.W, self.H, 0, 0, self.W, self.H, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1])
else:
# Only clear the output once
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, back_fbo) # for offscreen rendering to textures
# Clear depth buffer for depth testing
gl.glClearBufferiv(gl.GL_COLOR, 0, [-1]) # clear the indices buffer for later rendering and retrieving
gl.glClearBufferfv(gl.GL_DEPTH, 0, [1e9]) # this is for depth testing
# The actual drawing pass with render things out to the write_fbo
gl.glDrawArrays(gl.GL_POINTS, 0, length if length is not None else len(self.verts)) # number of vertices
yield back_fbo # give the CUDA end a chance to read from this frame buffer after rendering
# Restore states of things
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindVertexArray(0)
return
def forward(self,
xyz: torch.Tensor, rgb: torch.Tensor, rad: torch.Tensor, occ: torch.Tensor,
batch: dotdict,
return_frags: bool = False,
return_full: bool = False,
):
"""
Get all indices from the depth peeling passes
Compute the vertex weight here in torch(cuda)
Use the indices to pass through a compositor
The backward pass should only be valid on the torch side, and it should've been enough
TODO: This function is too memory intensive
TODO: Performing IBR is too memory intensive
"""
# This the slow part, but not differentiable
idx, _, _ = self.forward_idx(xyz, rad, batch) # B, H, W, K
msk = idx != -1 # B, H, W, K
idx = torch.where(msk, idx, 0).long()
# Sample things needed for computing screen space weight
H, W, K, R, T, C = get_opencv_camera_params(batch)
K, R, T, C = K.to(xyz.dtype), R.to(xyz.dtype), T.to(xyz.dtype), C.to(xyz.dtype)
pix_xyz = (xyz @ R.mT + T.mT) @ K.mT # B, P, 3
pix_xyz_xy = pix_xyz[..., :-1] / (pix_xyz[..., -1:] + 1e-10)
pix_rad = abs(K[..., 1, 1][..., None] * rad[..., 0] / (pix_xyz[..., -1] + 1e-10)) # z: B, 1 * B, N, world space radius
mean_xy = multi_gather(pix_xyz_xy, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, 2) # B, HWK, 2 -> B, H, W, K, 2
xy = create_meshgrid(H, W, idx.device, dtype=xyz.dtype).flip(-1)[None].expand(idx.shape[0], H, W, 2) # create screen space xy (opencv)
dists = (xy[..., None, :] - mean_xy).pow(2).sum(-1) # B, H, W, K
# Point values
dpt = (xyz - C.mT).norm(dim=-1, keepdim=True) # B, N, 1
pix_occ = multi_gather(occ, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape)
pix_rad = multi_gather(pix_rad, idx.view(idx.shape[0], -1), dim=-1).view(*idx.shape) # -> B, H, W, K
pix_occ = pix_occ * (1 - dists / (pix_rad * pix_rad + 1e-10)) # B, H, W, K
pix_occ = pix_occ.clip(0, 1)
pix_occ = torch.where(msk, pix_occ, 0)
if return_frags:
return idx, pix_occ # B, H, W, K
# The actual computation
rgb = torch.cat([rgb, occ, dpt], dim=-1) # B, N, 3 + C
pix_rgb = multi_gather(rgb, idx.view(idx.shape[0], -1), dim=-2).view(*idx.shape, rgb.shape[-1]) # B, H, W, K, -1
_, rgb, _ = volume_rendering(pix_rgb, pix_occ[..., None]) # B, H, W, -1
rgb, acc, dpt = rgb[..., :-2], rgb[..., -2:-1], rgb[..., -1:]
dpt = dpt + (1 - acc) * dpt.max() # only for the looks (rendered depth are already premultiplied)
if return_full:
return rgb, acc, dpt, idx, pix_occ
else:
return rgb, acc, dpt
def forward_idx(self, xyz: torch.Tensor, rad: torch.Tensor, batch: dotdict):
from cuda import cudart
kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice
# !: BATCH
H, W = batch.meta.H[0].item(), batch.meta.W[0].item()
self.resize_textures(H, W) # maybe resize the texture
self.resize_buffers(xyz.shape[1]) # maybe resize the buffer
_, _, old_W, old_H = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glViewport(0, 0, W, H)
gl.glScissor(0, 0, W, H) # only render in this small region of the viewport
# Prepare for input data
data = torch.cat([xyz, rad], dim=-1).type(self.dtype).ravel()
# Upload to opengl for rendering
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
cu_vbo_ptr, cu_vbo_size = CHECK_CUDART_ERROR(cudart.cudaGraphicsResourceGetMappedPointer(self.cu_vbo))
assert cu_vbo_size >= data.numel() * data.element_size(), f'PyTorch(CUDA) and OpenGL vertex buffer size mismatch ({data.numel() * data.element_size()} v.s. {cu_vbo_size}), CUDA side should be less than or equal to the OpenGL side'
CHECK_CUDART_ERROR(cudart.cudaMemcpyAsync(cu_vbo_ptr,
data.data_ptr(),
data.numel() * data.element_size(),
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_vbo, torch.cuda.current_stream().cuda_stream))
# Perform rasterization (depth peeling using OpenGL)
if 'meta_stream' in batch.meta: batch.meta.meta_stream.synchronize() # wait for gpu -> cpu copy to finish
# FIXME: Strange bug occurs if batch parameter is passed in directly for the construction of Camera(batch=batch.meta)
gen = self.rasterize_generator(Camera(batch=batch.meta), xyz.shape[-2]) # will upload and render, save output buffer to back_fbo
ind_maps = []
dpt_maps = []
acc_maps = []
for back_fbo in gen:
# Copy rendered image and depth back as tensor
cu_tex = self.cu_write_index if back_fbo == self.write_fbo else self.cu_read_index # double buffered depth peeling
cu_dpt = self.cu_write_lower if back_fbo == self.write_fbo else self.cu_read_lower # double buffered depth peeling
# Prepare the output # !: BATCH
ind_map = torch.empty((H, W, 1), dtype=torch.int, device='cuda') # to hold the data from opengl
dpt_map = torch.empty((H, W, 1), dtype=torch.float, device='cuda') # to hold the data from opengl
# The resources in resources may be accessed by CUDA until they are unmapped.
# The graphics API from which resources were registered should not access any resources while they are mapped by CUDA.
# If an application does so, the results are undefined.
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream))
cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_tex, 0, 0))
cu_dpt_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(cu_dpt, 0, 0))
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(ind_map.data_ptr(), # dst
W * ind_map.shape[-1] * ind_map.element_size(), # dpitch
cu_tex_arr, # src
0, # wOffset
0, # hOffset
W * ind_map.shape[-1] * ind_map.element_size(), # width Width of matrix transfer (columns in bytes)
H, # height
kind, # kind
torch.cuda.current_stream().cuda_stream)) # stream
CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(dpt_map.data_ptr(),
W * dpt_map.shape[-1] * dpt_map.element_size(),
cu_dpt_arr,
0,
0,
W * dpt_map.shape[-1] * dpt_map.element_size(),
H,
kind,
torch.cuda.current_stream().cuda_stream))
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_tex, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, cu_dpt, torch.cuda.current_stream().cuda_stream)) # MARK: SYNC
# Ouput reshaping
ind_map, dpt_map = ind_map[None].flip(1), dpt_map[None].flip(1)
acc_map = ind_map != -1
dpt_map = torch.where(dpt_map == 0, dpt_map.max(), dpt_map)
ind_maps.append(ind_map)
acc_maps.append(acc_map)
dpt_maps.append(dpt_map)
ind_map = torch.cat(ind_maps, dim=-1) # B, H, W, K
acc_map = torch.cat(acc_maps, dim=-1) # B, H, W, K
dpt_map = torch.cat(dpt_maps, dim=-1) # B, H, W, K
# Some house keepings
gl.glViewport(0, 0, old_W, old_H)
gl.glScissor(0, 0, old_W, old_H)
return ind_map, acc_map, dpt_map
|
evocodebench_data_83
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.vgg as vgg
from collections import namedtuple
from easyvolcap.utils.prop_utils import searchsorted, matchup_channels
from enum import Enum, auto
class ElasticLossReduceType(Enum):
WEIGHT = auto()
MEDIAN = auto()
class ImgLossType(Enum):
PERC = auto() # lpips
CHARB = auto()
HUBER = auto()
L1 = auto()
L2 = auto()
SSIM = auto()
class DptLossType(Enum):
SMOOTHL1 = auto()
L1 = auto()
L2 = auto()
SSIMSE = auto()
SSIMAE = auto()
SILOG = auto()
CONTINUITY = auto()
RANKING = auto()
# from mipnerf360
def inner_outer(t0, t1, y1):
"""Construct inner and outer measures on (t1, y1) for t0."""
cy1 = torch.cat([torch.zeros_like(y1[..., :1]), torch.cumsum(y1, dim=-1)], dim=-1) # 129
idx_lo, idx_hi = searchsorted(t1, t0)
cy1_lo = torch.take_along_dim(cy1, idx_lo, dim=-1) # 128
cy1_hi = torch.take_along_dim(cy1, idx_hi, dim=-1)
y0_outer = cy1_hi[..., 1:] - cy1_lo[..., :-1] # 127
y0_inner = torch.where(idx_hi[..., :-1] <= idx_lo[..., 1:], cy1_lo[..., 1:] - cy1_hi[..., :-1], 0)
return y0_inner, y0_outer
# from mipnerf360
def lossfun_outer(t: torch.Tensor, w: torch.Tensor, t_env: torch.Tensor, w_env: torch.Tensor, eps=torch.finfo(torch.float32).eps):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
t_env, w_env = matchup_channels(t_env, w_env)
"""The proposal weight should be an upper envelope on the nerf weight."""
_, w_outer = inner_outer(t, t_env, w_env)
# We assume w_inner <= w <= w_outer. We don't penalize w_inner because it's
# more effective to pull w_outer up than it is to push w_inner down.
# Scaled half-quadratic loss that gives a constant gradient at w_outer = 0.
return (w - w_outer).clip(0.).pow(2) / (w + eps)
def blur_stepfun(x, y, r):
xr, xr_idx = torch.sort(torch.cat([x - r, x + r], dim=-1))
y1 = (torch.cat([y, torch.zeros_like(y[..., :1])], dim=-1) -
torch.cat([torch.zeros_like(y[..., :1]), y], dim=-1)) / (2 * r)
y2 = torch.cat([y1, -y1], dim=-1).take_along_dim(xr_idx[..., :-1], dim=-1)
yr = torch.cumsum((xr[..., 1:] - xr[..., :-1]) *
torch.cumsum(y2, dim=-1), dim=-1).clamp_min(0)
yr = torch.cat([torch.zeros_like(yr[..., :1]), yr], dim=-1)
return xr, yr
def sorted_interp_quad(x, xp, fpdf, fcdf):
"""interp in quadratic"""
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[..., None, :] >= xp[..., :, None]
def find_interval(x, return_idx=False):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0, x0_idx = torch.max(torch.where(mask, x[..., None], x[..., :1, None]), -2)
x1, x1_idx = torch.min(torch.where(~mask, x[..., None], x[..., -1:, None]), -2)
if return_idx:
return x0, x1, x0_idx, x1_idx
return x0, x1
fcdf0, fcdf1, fcdf0_idx, fcdf1_idx = find_interval(fcdf, return_idx=True)
fpdf0 = fpdf.take_along_dim(fcdf0_idx, dim=-1)
fpdf1 = fpdf.take_along_dim(fcdf1_idx, dim=-1)
xp0, xp1 = find_interval(xp)
offset = torch.clip(torch.nan_to_num((x - xp0) / (xp1 - xp0), 0), 0, 1)
ret = fcdf0 + (x - xp0) * (fpdf0 + fpdf1 * offset + fpdf0 * (1 - offset)) / 2
return ret
def lossfun_zip_outer(t, w, t_env, w_env, pulse_width, eps=1e-6):
t, w = matchup_channels(t, w)
t_env, w_env = matchup_channels(t_env, w_env)
w_normalize = w / torch.clamp_min(t[..., 1:] - t[..., :-1], eps)
t_, w_ = blur_stepfun(t, w_normalize, pulse_width)
w_ = torch.clip(w_, min=0.)
assert (w_ >= 0.0).all()
# piecewise linear pdf to piecewise quadratic cdf
area = 0.5 * (w_[..., 1:] + w_[..., :-1]) * (t_[..., 1:] - t_[..., :-1])
cdf = torch.cat([torch.zeros_like(area[..., :1]), torch.cumsum(area, dim=-1)], dim=-1)
# query piecewise quadratic interpolation
cdf_interp = sorted_interp_quad(t_env, t_, w_, cdf)
# difference between adjacent interpolated values
w_s = torch.diff(cdf_interp, dim=-1)
return ((w_s - w_env).clip(0.).pow(2) / (w_env + eps)).mean()
def lossfun_distortion(t: torch.Tensor, w: torch.Tensor):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
# The loss incurred between all pairs of intervals.
ut = (t[..., 1:] + t[..., :-1]) / 2 # 64
dut = torch.abs(ut[..., :, None] - ut[..., None, :]) # 64
loss_inter = torch.sum(w * torch.sum(w[..., None, :] * dut, dim=-1), dim=-1)
# The loss incurred within each individual interval with itself.
loss_intra = torch.sum(w**2 * (t[..., 1:] - t[..., :-1]), dim=-1) / 3
return loss_inter + loss_intra
def interval_distortion(t0_lo, t0_hi, t1_lo, t1_hi):
"""Compute mean(abs(x-y); x in [t0_lo, t0_hi], y in [t1_lo, t1_hi])."""
# Distortion when the intervals do not overlap.
d_disjoint = torch.abs((t1_lo + t1_hi) / 2 - (t0_lo + t0_hi) / 2)
# Distortion when the intervals overlap.
d_overlap = (2 *
(torch.minimum(t0_hi, t1_hi)**3 - torch.maximum(t0_lo, t1_lo)**3) +
3 * (t1_hi * t0_hi * torch.abs(t1_hi - t0_hi) +
t1_lo * t0_lo * torch.abs(t1_lo - t0_lo) + t1_hi * t0_lo *
(t0_lo - t1_hi) + t1_lo * t0_hi *
(t1_lo - t0_hi))) / (6 * (t0_hi - t0_lo) * (t1_hi - t1_lo))
# Are the two intervals not overlapping?
are_disjoint = (t0_lo > t1_hi) | (t1_lo > t0_hi)
return torch.where(are_disjoint, d_disjoint, d_overlap)
def anneal_loss_weight(weight: float, gamma: float, iter: int, mile: int):
# exponentially anneal the loss weight
return weight * gamma ** min(iter / mile, 1)
def gaussian_entropy_relighting4d(albedo_pred):
albedo_entropy = 0
for i in range(3):
channel = albedo_pred[..., i]
hist = GaussianHistogram(15, 0., 1., sigma=torch.var(channel))
h = hist(channel)
if h.sum() > 1e-6:
h = h.div(h.sum()) + 1e-6
else:
h = torch.ones_like(h)
albedo_entropy += torch.sum(-h * torch.log(h))
return albedo_entropy
class GaussianHistogram(nn.Module):
def __init__(self, bins, min, max, sigma):
super(GaussianHistogram, self).__init__()
self.bins = bins
self.min = min
self.max = max
self.sigma = sigma
self.delta = float(max - min) / float(bins)
self.centers = float(min) + self.delta * (torch.arange(bins, device=sigma.device).float() + 0.5)
def forward(self, x):
x = torch.unsqueeze(x, 0) - torch.unsqueeze(self.centers, 1)
x = torch.exp(-0.5 * (x / self.sigma)**2) / (self.sigma * np.sqrt(np.pi * 2)) * self.delta
x = x.sum(dim=1)
return x
def gaussian_entropy(x: torch.Tensor, *args, **kwargs):
eps = 1e-6
hps = 1e-9
h = gaussian_histogram(x, *args, **kwargs)
# h = (h / (h.sum(dim=0) + hps)).clip(eps) # 3,
# entropy = (-h * h.log()).sum(dim=0).sum(dim=0) # per channel entropy summed
entropy = 0
for i in range(3):
hi = h[..., i]
if hi.sum() > eps:
hi = hi / hi.sum() + eps
else:
hi = torch.ones_like(hi)
entropy += torch.sum(-hi * torch.log(hi))
return entropy
def gaussian_histogram(x: torch.Tensor, bins: int = 15, min: float = 0.0, max: float = 1.0):
x = x.view(-1, x.shape[-1]) # N, 3
sigma = x.var(dim=0) # 3,
delta = (max - min) / bins
centers = min + delta * (torch.arange(bins, device=x.device, dtype=x.dtype) + 0.5) # BIN
x = x[None] - centers[:, None, None] # BIN, N, 3
x = (-0.5 * (x / sigma).pow(2)).exp() / (sigma * np.sqrt(np.pi * 2)) * delta # BIN, N, 3
x = x.sum(dim=1)
return x # BIN, 3
def reg_diff_crit(x: torch.Tensor, iter_step: int, max_weight: float = 1e-4, ann_iter: int = 100 * 500):
weight = min(iter_step, ann_iter) * max_weight / ann_iter
return reg(x), weight
def reg_raw_crit(x: torch.Tensor, iter_step: int, max_weight: float = 1e-4, ann_iter: int = 100 * 500):
weight = min(iter_step, ann_iter) * max_weight / ann_iter
n_batch, n_pts_x2, D = x.shape
n_pts = n_pts_x2 // 2
length = x.norm(dim=-1, keepdim=True) # length
vector = x / (length + 1e-8) # vector direction (normalized to unit sphere)
# loss_length = mse(length[:, n_pts:, :], length[:, :n_pts, :])
loss_vector = reg((vector[:, n_pts:, :] - vector[:, :n_pts, :]))
# loss = loss_length + loss_vector
loss = loss_vector
return loss, weight
class LossNetwork(torch.nn.Module):
"""Reference:
https://discuss.pytorch.org/t/how-to-extract-features-of-an-image-from-a-trained-model/119/3
"""
def __init__(self):
super(LossNetwork, self).__init__()
try:
from torchvision.models import VGG19_Weights
self.vgg_layers = vgg.vgg19(weights=VGG19_Weights.DEFAULT).features
except ImportError:
self.vgg_layers = vgg.vgg19(pretrained=True).features
for param in self.vgg_layers.parameters():
param.requires_grad = False
'''
self.layer_name_mapping = {
'3': "relu1",
'8': "relu2",
'17': "relu3",
'26': "relu4",
'35': "relu5",
}
'''
self.layer_name_mapping = {'3': "relu1", '8': "relu2"}
def forward(self, x):
output = {}
for name, module in self.vgg_layers._modules.items():
x = module(x)
if name in self.layer_name_mapping:
output[self.layer_name_mapping[name]] = x
if name == '8':
break
LossOutput = namedtuple("LossOutput", ["relu1", "relu2"])
return LossOutput(**output)
class PerceptualLoss(torch.nn.Module):
def __init__(self):
super(PerceptualLoss, self).__init__()
self.model = LossNetwork()
self.model.cuda()
self.model.eval()
self.mse_loss = torch.nn.MSELoss(reduction='mean')
self.l1_loss = torch.nn.L1Loss(reduction='mean')
def forward(self, x, target):
x_feature = self.model(x[:, 0:3, :, :])
target_feature = self.model(target[:, 0:3, :, :])
feature_loss = (
self.l1_loss(x_feature.relu1, target_feature.relu1) +
self.l1_loss(x_feature.relu2, target_feature.relu2)) / 2.0
l1_loss = self.l1_loss(x, target)
l2_loss = self.mse_loss(x, target)
loss = feature_loss + l1_loss + l2_loss
return loss
class VGGPerceptualLoss(torch.nn.Module):
def __init__(self, resize=False):
super(VGGPerceptualLoss, self).__init__()
blocks = []
import torchvision
vgg16 = torchvision.models.vgg16(pretrained=True)
blocks.append(vgg16.features[:4].eval())
blocks.append(vgg16.features[4:9].eval())
blocks.append(vgg16.features[9:16].eval())
blocks.append(vgg16.features[16:23].eval())
for bl in blocks:
for p in bl.parameters():
p.requires_grad = False
self.blocks = nn.ModuleList(blocks)
self.transform = F.interpolate
self.resize = resize
self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def forward(self, input, target, feature_layers=[0, 1, 2, 3], style_layers=[]):
if input.shape[1] != 3:
input = input.repeat(1, 3, 1, 1)
target = target.repeat(1, 3, 1, 1)
input = (input - self.mean) / self.std
target = (target - self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
loss = 0.0
x = input
y = target
for i, block in enumerate(self.blocks):
x = block(x)
y = block(y)
if i in feature_layers:
loss += F.l1_loss(x, y)
if i in style_layers:
act_x = x.reshape(x.shape[0], x.shape[1], -1)
act_y = y.reshape(y.shape[0], y.shape[1], -1)
gram_x = act_x @ act_x.permute(0, 2, 1)
gram_y = act_y @ act_y.permute(0, 2, 1)
loss += F.l1_loss(gram_x, gram_y)
return loss
def eikonal(x: torch.Tensor, th=1.0) -> torch.Tensor:
return ((x.norm(dim=-1) - th)**2).mean()
def sdf_mask_crit(ret, batch):
msk_sdf = ret['msk_sdf']
msk_label = ret['msk_label']
alpha = 50
alpha_factor = 2
alpha_milestones = [10000, 20000, 30000, 40000, 50000]
for milestone in alpha_milestones:
if batch['iter_step'] > milestone:
alpha = alpha * alpha_factor
msk_sdf = -alpha * msk_sdf
mask_loss = F.binary_cross_entropy_with_logits(msk_sdf, msk_label) / alpha
return mask_loss
def cross_entropy(x: torch.Tensor, y: torch.Tensor):
# x: unormalized input logits
# channel last cross entropy loss
x = x.view(-1, x.shape[-1]) # N, C
y = y.view(-1, y.shape[-1]) # N, C
return F.cross_entropy(x, y)
def huber(x: torch.Tensor, y: torch.Tensor):
return F.huber_loss(x, y, reduction='mean')
def smoothl1(x: torch.Tensor, y: torch.Tensor):
return F.smooth_l1_loss(x, y)
def mse(x: torch.Tensor, y: torch.Tensor):
return ((x.float() - y.float())**2).mean()
def dot(x: torch.Tensor, y: torch.Tensor):
return (x * y).sum(dim=-1)
def l1(x: torch.Tensor, y: torch.Tensor):
return l1_reg(x - y)
def l2(x: torch.Tensor, y: torch.Tensor):
return l2_reg(x - y)
def l1_reg(x: torch.Tensor):
return x.abs().sum(dim=-1).mean()
def l2_reg(x: torch.Tensor) -> torch.Tensor:
return (x**2).sum(dim=-1).mean()
def bce_loss(x: torch.Tensor, y: torch.Tensor):
return F.binary_cross_entropy(x, y)
def mIoU_loss(x: torch.Tensor, y: torch.Tensor):
"""
Compute the mean intersection of union loss over masked regions
x, y: B, N, 1
"""
I = (x * y).sum(-1).sum(-1)
U = (x + y).sum(-1).sum(-1) - I
mIoU = (I / U.detach()).mean()
return 1 - mIoU
def reg(x: torch.Tensor) -> torch.Tensor:
return x.norm(dim=-1).mean()
def thresh(x: torch.Tensor, a: torch.Tensor, eps: float = 1e-8):
return 1 / (l2(x, a) + eps)
def elastic_crit(jac: torch.Tensor) -> torch.Tensor:
"""Compute the raw 'log_svals' type elastic energy, and
remap it using the Geman-McClure type of robust loss.
Args:
jac (torch.Tensor): (B, N, 3, 3), the gradient of warpped xyz with respect to the original xyz
Return:
elastic_loss (torch.Tensor): (B, N),
"""
# !: CUDA IMPLEMENTATION OF SVD IS EXTREMELY SLOW
# old_device = jac.device
# jac = jac.cpu()
# svd_backward: Setting compute_uv to false in torch.svd doesn't compute singular matrices, and hence we cannot compute backward. Please use torch.svd(compute_uv=True)
_, S, _ = torch.svd(jac, compute_uv=True) # (B, N, 3)
# S = S.to(old_device)
log_svals = torch.log(torch.clamp(S, min=1e-6)) # (B, N, 3)
sq_residual = torch.sum(log_svals**2, dim=-1) # (B, N)
# TODO: determine whether it is a good choice to compute the robust loss here
elastic_loss = general_loss_with_squared_residual(sq_residual, alpha=-2.0, scale=0.03)
return elastic_loss
def general_loss_with_squared_residual(squared_x, alpha, scale):
r"""The general loss that takes a squared residual.
This fuses the sqrt operation done to compute many residuals while preserving
the square in the loss formulation.
This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
squared_x: The residual for which the loss is being computed. x can have
any shape, and alpha and scale will be broadcasted to match x's shape if
necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
# https://pytorch.org/docs/stable/type_info.html
eps = torch.tensor(torch.finfo(torch.float32).eps)
# convert the float to torch.tensor
alpha = torch.tensor(alpha).to(squared_x.device)
scale = torch.tensor(scale).to(squared_x.device)
# This will be used repeatedly.
squared_scaled_x = squared_x / (scale ** 2)
# The loss when alpha == 2.
loss_two = 0.5 * squared_scaled_x
# The loss when alpha == 0.
loss_zero = log1p_safe(0.5 * squared_scaled_x)
# The loss when alpha == -infinity.
loss_neginf = -torch.expm1(-0.5 * squared_scaled_x)
# The loss when alpha == +infinity.
loss_posinf = expm1_safe(0.5 * squared_scaled_x)
# The loss when not in one of the above special cases.
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
beta_safe = torch.maximum(eps, torch.abs(alpha - 2.))
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
alpha_safe = torch.where(
torch.greater_equal(alpha, torch.tensor(0.)), torch.ones_like(alpha),
-torch.ones_like(alpha)) * torch.maximum(eps, torch.abs(alpha))
loss_otherwise = (beta_safe / alpha_safe) * (
torch.pow(squared_scaled_x / beta_safe + 1., 0.5 * alpha) - 1.)
# Select which of the cases of the loss to return.
loss = torch.where(
alpha == -torch.inf, loss_neginf,
torch.where(
alpha == 0, loss_zero,
torch.where(
alpha == 2, loss_two,
torch.where(alpha == torch.inf, loss_posinf, loss_otherwise))))
return scale * loss
def log1p_safe(x):
"""The same as torch.log1p(x), but clamps the input to prevent NaNs."""
return torch.log1p(torch.minimum(x, torch.tensor(3e37)))
def expm1_safe(x):
"""The same as torch.expm1(x), but clamps the input to prevent NaNs."""
return torch.expm1(torch.minimum(x, torch.tensor(87.5)))
def compute_plane_tv(t):
batch_size, c, h, w = t.shape
count_h = batch_size * c * (h - 1) * w
count_w = batch_size * c * h * (w - 1)
h_tv = torch.square(t[..., 1:, :] - t[..., :h - 1, :]).sum()
w_tv = torch.square(t[..., :, 1:] - t[..., :, :w - 1]).sum()
return 2 * (h_tv / count_h + w_tv / count_w) # This is summing over batch and c instead of avg
def compute_planes_tv(embedding):
tv_loss = 0
for emb in embedding:
tv_loss += compute_plane_tv(emb)
return tv_loss
def compute_plane_smoothness(t):
batch_size, c, h, w = t.shape
# Convolve with a second derivative filter, in the time dimension which is dimension 2
first_difference = t[..., 1:] - t[..., :w - 1] # [batch, c, h-1, w]
second_difference = first_difference[..., 1:] - first_difference[..., :w - 2] # [batch, c, h-2, w]
# Take the L2 norm of the result
return torch.square(second_difference).mean()
def compute_time_planes_smooth(embedding):
loss = 0.
for emb in embedding:
loss += compute_plane_smoothness(emb)
return loss
def compute_ssim(x: torch.Tensor, y: torch.Tensor):
from pytorch_msssim import ssim
return ssim(x, y, data_range=1.0, win_size=11, win_sigma=1.5, K=(0.01, 0.03))
# from MonoSDF
def compute_scale_and_shift(prediction, target, mask):
# System matrix: A = [[a_00, a_01], [a_10, a_11]]
a_00 = torch.sum(mask * prediction * prediction, (1, 2))
a_01 = torch.sum(mask * prediction, (1, 2))
a_11 = torch.sum(mask, (1, 2))
# Right hand side: b = [b_0, b_1]
b_0 = torch.sum(mask * prediction * target, (1, 2))
b_1 = torch.sum(mask * target, (1, 2))
# Solution: x = A^-1 . b = [[a_11, -a_01], [-a_10, a_00]] / (a_00 * a_11 - a_01 * a_10) . b
x_0 = torch.zeros_like(b_0)
x_1 = torch.zeros_like(b_1)
det = a_00 * a_11 - a_01 * a_01
valid = det.nonzero()
x_0[valid] = ( a_11[valid] * b_0[valid] - a_01[valid] * b_1[valid]) / det[valid]
x_1[valid] = (-a_01[valid] * b_0[valid] + a_00[valid] * b_1[valid]) / det[valid]
return x_0, x_1
def reduction_batch_based(image_loss, M):
# Average of all valid pixels of the batch
# Avoid division by 0 (if sum(M) = sum(sum(mask)) = 0: sum(image_loss) = 0)
divisor = torch.sum(M)
if divisor == 0: return 0
else: return torch.sum(image_loss) / divisor
def reduction_image_based(image_loss, M):
# Mean of average of valid pixels of an image
# Avoid division by 0 (if M = sum(mask) = 0: image_loss = 0)
valid = M.nonzero()
image_loss[valid] = image_loss[valid] / M[valid]
return torch.mean(image_loss)
def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
# Number of valid pixels
M = torch.sum(mask, (1, 2)) # (B,)
# L2 loss
res = prediction - target # (B, H, W)
image_loss = torch.sum(mask * res * res, (1, 2)) # (B,)
return reduction(image_loss, 2 * M)
def gradient_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
diff = prediction - target
diff = torch.mul(mask, diff)
grad_x = torch.abs(diff[:, :, 1:] - diff[:, :, :-1])
mask_x = torch.mul(mask[:, :, 1:], mask[:, :, :-1])
grad_x = torch.mul(mask_x, grad_x)
grad_y = torch.abs(diff[:, 1:, :] - diff[:, :-1, :])
mask_y = torch.mul(mask[:, 1:, :], mask[:, :-1, :])
grad_y = torch.mul(mask_y, grad_y)
image_loss = torch.sum(grad_x, (1, 2)) + torch.sum(grad_y, (1, 2))
return reduction(image_loss, M)
class MSELoss(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, prediction, target, mask):
return mse_loss(prediction, target, mask, reduction=self.__reduction)
class GradientLoss(nn.Module):
def __init__(self, scales=1, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
self.__scales = scales
def forward(self, prediction, target, mask):
total = 0
for scale in range(self.__scales):
step = pow(2, scale)
total += gradient_loss(prediction[:, ::step, ::step], target[:, ::step, ::step],
mask[:, ::step, ::step], reduction=self.__reduction)
return total
class ScaleAndShiftInvariantMSELoss(nn.Module):
def __init__(self, alpha=0.5, scales=4, reduction='batch-based'):
super().__init__()
self.__data_loss = MSELoss(reduction=reduction)
self.__regularization_loss = GradientLoss(scales=scales, reduction=reduction)
self.__alpha = alpha
self.__prediction_ssi = None
def forward(self, prediction, target, mask):
# Deal with the channel dimension, the input dimension may have (B, C, H, W) or (B, H, W)
if prediction.ndim == 4: prediction = prediction[:, 0] # (B, H, W)
if target.ndim == 4: target = target[:, 0] # (B, H, W)
if mask.ndim == 4: mask = mask[:, 0] # (B, H, W)
# Compute scale and shift
scale, shift = compute_scale_and_shift(prediction, target, mask)
self.__prediction_ssi = scale.view(-1, 1, 1) * prediction + shift.view(-1, 1, 1)
total = self.__data_loss(self.__prediction_ssi, target, mask)
# Add regularization if needed
if self.__alpha > 0:
total += self.__alpha * self.__regularization_loss(self.__prediction_ssi, target, mask)
return total
def __get_prediction_ssi(self):
return self.__prediction_ssi
prediction_ssi = property(__get_prediction_ssi)
# from MonoSDF
def median_normalize(x, mask):
""" Median normalize a tensor for all valid pixels.
This operation is performed without batch dimension.
Args:
x (torch.Tensor): (H, W), original tensor
mask (torch.Tensor): (H, W), mask tensor
Return:
y (torch.Tensor): (H, W), median normalized tensor
"""
M = torch.sum(mask)
# Return original tensor if there is no valid pixel
if M == 0:
return x
# Compute median and scale
t = torch.quantile(x[mask == 1], q=0.5) # scalar
s = torch.sum(x[mask == 1] - t) / M # scalar
# Return median normalized tensor
return (x - t) / s
def mae_loss(prediction, target, mask, reduction=reduction_batch_based):
# Number of valid pixels
M = torch.sum(mask, (1, 2)) # (B,)
# L1 loss
res = (prediction - target).abs() # (B, H, W)
image_loss = torch.sum(mask * res, (1, 2)) # (B,)
return reduction(image_loss, 2 * M)
class MAELoss(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, prediction, target, mask):
return mae_loss(prediction, target, mask, reduction=self.__reduction)
class ScaleAndShiftInvariantMAELoss(nn.Module):
def __init__(self, alpha=0.5, scales=4, reduction='batch-based'):
super().__init__()
self.__data_loss = MAELoss(reduction=reduction)
self.__regularization_loss = GradientLoss(scales=scales, reduction=reduction)
self.__alpha = alpha
def forward(self, prediction, target, mask):
# Deal with the channel dimension, the input dimension may have (B, C, H, W) or (B, H, W)
if prediction.ndim == 4: prediction = prediction[:, 0] # (B, H, W)
if target.ndim == 4: target = target[:, 0] # (B, H, W)
if mask.ndim == 4: mask = mask[:, 0] # (B, H, W)
# TODO: Maybe there is a better way to do the batching
# But `torch.quantile` does not support multiple `dim` argument for now
for i in range(prediction.shape[0]):
prediction[i] = median_normalize(prediction[i], mask[i]) # (H, W)
target[i] = median_normalize(target[i], mask[i]) # (H, W)
# Compute the scale-and-shift invariant MAE loss
total = self.__data_loss(prediction, target, mask)
# Add regularization if needed
if self.__alpha > 0:
total += self.__alpha * self.__regularization_loss(self.prediction, target, mask)
return total
# Modified version of Adabins repository
# https://github.com/shariqfarooq123/AdaBins/blob/0952d91e9e762be310bb4cd055cbfe2448c0ce20/loss.py#L7
class ScaleInvariantLogLoss(nn.Module):
def __init__(self, alpha=10.0, beta=0.15, eps=0.0):
super(ScaleInvariantLogLoss, self).__init__()
self.alpha = alpha
self.beta = beta
# The eps is added to avoid log(0) and division by zero
# But it should be gauranteed that the network output is always non-negative
self.eps = eps
def forward(self, prediction, target, mask):
# Deal with the channel dimension, the input dimension may have (B, C, H, W) or (B, H, W)
if prediction.ndim == 4: prediction = prediction[:, 0] # (B, H, W)
if target.ndim == 4: target = target[:, 0] # (B, H, W)
if mask.ndim == 4: mask = mask[:, 0] # (B, H, W)
total = 0
# Maybe there is a better way to do the batching
for i in range(prediction.shape[0]):
g = torch.log(prediction[i][mask[i]] + self.eps) - torch.log(target[i][mask[i]] + self.eps) # (N,)
Dg = torch.var(g) + self.beta * torch.pow(torch.mean(g), 2) # scalar
total += self.alpha * torch.sqrt(Dg)
return total
|
evocodebench_data_84
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.vgg as vgg
from collections import namedtuple
from easyvolcap.utils.prop_utils import searchsorted, matchup_channels
from enum import Enum, auto
class ElasticLossReduceType(Enum):
WEIGHT = auto()
MEDIAN = auto()
class ImgLossType(Enum):
PERC = auto() # lpips
CHARB = auto()
HUBER = auto()
L1 = auto()
L2 = auto()
SSIM = auto()
class DptLossType(Enum):
SMOOTHL1 = auto()
L1 = auto()
L2 = auto()
SSIMSE = auto()
SSIMAE = auto()
SILOG = auto()
CONTINUITY = auto()
RANKING = auto()
# from mipnerf360
def inner_outer(t0, t1, y1):
"""Construct inner and outer measures on (t1, y1) for t0."""
cy1 = torch.cat([torch.zeros_like(y1[..., :1]), torch.cumsum(y1, dim=-1)], dim=-1) # 129
idx_lo, idx_hi = searchsorted(t1, t0)
cy1_lo = torch.take_along_dim(cy1, idx_lo, dim=-1) # 128
cy1_hi = torch.take_along_dim(cy1, idx_hi, dim=-1)
y0_outer = cy1_hi[..., 1:] - cy1_lo[..., :-1] # 127
y0_inner = torch.where(idx_hi[..., :-1] <= idx_lo[..., 1:], cy1_lo[..., 1:] - cy1_hi[..., :-1], 0)
return y0_inner, y0_outer
# from mipnerf360
def lossfun_outer(t: torch.Tensor, w: torch.Tensor, t_env: torch.Tensor, w_env: torch.Tensor, eps=torch.finfo(torch.float32).eps):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
t_env, w_env = matchup_channels(t_env, w_env)
"""The proposal weight should be an upper envelope on the nerf weight."""
_, w_outer = inner_outer(t, t_env, w_env)
# We assume w_inner <= w <= w_outer. We don't penalize w_inner because it's
# more effective to pull w_outer up than it is to push w_inner down.
# Scaled half-quadratic loss that gives a constant gradient at w_outer = 0.
return (w - w_outer).clip(0.).pow(2) / (w + eps)
def blur_stepfun(x, y, r):
xr, xr_idx = torch.sort(torch.cat([x - r, x + r], dim=-1))
y1 = (torch.cat([y, torch.zeros_like(y[..., :1])], dim=-1) -
torch.cat([torch.zeros_like(y[..., :1]), y], dim=-1)) / (2 * r)
y2 = torch.cat([y1, -y1], dim=-1).take_along_dim(xr_idx[..., :-1], dim=-1)
yr = torch.cumsum((xr[..., 1:] - xr[..., :-1]) *
torch.cumsum(y2, dim=-1), dim=-1).clamp_min(0)
yr = torch.cat([torch.zeros_like(yr[..., :1]), yr], dim=-1)
return xr, yr
def sorted_interp_quad(x, xp, fpdf, fcdf):
"""interp in quadratic"""
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[..., None, :] >= xp[..., :, None]
def find_interval(x, return_idx=False):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0, x0_idx = torch.max(torch.where(mask, x[..., None], x[..., :1, None]), -2)
x1, x1_idx = torch.min(torch.where(~mask, x[..., None], x[..., -1:, None]), -2)
if return_idx:
return x0, x1, x0_idx, x1_idx
return x0, x1
fcdf0, fcdf1, fcdf0_idx, fcdf1_idx = find_interval(fcdf, return_idx=True)
fpdf0 = fpdf.take_along_dim(fcdf0_idx, dim=-1)
fpdf1 = fpdf.take_along_dim(fcdf1_idx, dim=-1)
xp0, xp1 = find_interval(xp)
offset = torch.clip(torch.nan_to_num((x - xp0) / (xp1 - xp0), 0), 0, 1)
ret = fcdf0 + (x - xp0) * (fpdf0 + fpdf1 * offset + fpdf0 * (1 - offset)) / 2
return ret
def lossfun_zip_outer(t, w, t_env, w_env, pulse_width, eps=1e-6):
t, w = matchup_channels(t, w)
t_env, w_env = matchup_channels(t_env, w_env)
w_normalize = w / torch.clamp_min(t[..., 1:] - t[..., :-1], eps)
t_, w_ = blur_stepfun(t, w_normalize, pulse_width)
w_ = torch.clip(w_, min=0.)
assert (w_ >= 0.0).all()
# piecewise linear pdf to piecewise quadratic cdf
area = 0.5 * (w_[..., 1:] + w_[..., :-1]) * (t_[..., 1:] - t_[..., :-1])
cdf = torch.cat([torch.zeros_like(area[..., :1]), torch.cumsum(area, dim=-1)], dim=-1)
# query piecewise quadratic interpolation
cdf_interp = sorted_interp_quad(t_env, t_, w_, cdf)
# difference between adjacent interpolated values
w_s = torch.diff(cdf_interp, dim=-1)
return ((w_s - w_env).clip(0.).pow(2) / (w_env + eps)).mean()
def lossfun_distortion(t: torch.Tensor, w: torch.Tensor):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
# The loss incurred between all pairs of intervals.
ut = (t[..., 1:] + t[..., :-1]) / 2 # 64
dut = torch.abs(ut[..., :, None] - ut[..., None, :]) # 64
loss_inter = torch.sum(w * torch.sum(w[..., None, :] * dut, dim=-1), dim=-1)
# The loss incurred within each individual interval with itself.
loss_intra = torch.sum(w**2 * (t[..., 1:] - t[..., :-1]), dim=-1) / 3
return loss_inter + loss_intra
def interval_distortion(t0_lo, t0_hi, t1_lo, t1_hi):
"""Compute mean(abs(x-y); x in [t0_lo, t0_hi], y in [t1_lo, t1_hi])."""
# Distortion when the intervals do not overlap.
d_disjoint = torch.abs((t1_lo + t1_hi) / 2 - (t0_lo + t0_hi) / 2)
# Distortion when the intervals overlap.
d_overlap = (2 *
(torch.minimum(t0_hi, t1_hi)**3 - torch.maximum(t0_lo, t1_lo)**3) +
3 * (t1_hi * t0_hi * torch.abs(t1_hi - t0_hi) +
t1_lo * t0_lo * torch.abs(t1_lo - t0_lo) + t1_hi * t0_lo *
(t0_lo - t1_hi) + t1_lo * t0_hi *
(t1_lo - t0_hi))) / (6 * (t0_hi - t0_lo) * (t1_hi - t1_lo))
# Are the two intervals not overlapping?
are_disjoint = (t0_lo > t1_hi) | (t1_lo > t0_hi)
return torch.where(are_disjoint, d_disjoint, d_overlap)
def anneal_loss_weight(weight: float, gamma: float, iter: int, mile: int):
# exponentially anneal the loss weight
return weight * gamma ** min(iter / mile, 1)
def gaussian_entropy_relighting4d(albedo_pred):
albedo_entropy = 0
for i in range(3):
channel = albedo_pred[..., i]
hist = GaussianHistogram(15, 0., 1., sigma=torch.var(channel))
h = hist(channel)
if h.sum() > 1e-6:
h = h.div(h.sum()) + 1e-6
else:
h = torch.ones_like(h)
albedo_entropy += torch.sum(-h * torch.log(h))
return albedo_entropy
class GaussianHistogram(nn.Module):
def __init__(self, bins, min, max, sigma):
super(GaussianHistogram, self).__init__()
self.bins = bins
self.min = min
self.max = max
self.sigma = sigma
self.delta = float(max - min) / float(bins)
self.centers = float(min) + self.delta * (torch.arange(bins, device=sigma.device).float() + 0.5)
def forward(self, x):
x = torch.unsqueeze(x, 0) - torch.unsqueeze(self.centers, 1)
x = torch.exp(-0.5 * (x / self.sigma)**2) / (self.sigma * np.sqrt(np.pi * 2)) * self.delta
x = x.sum(dim=1)
return x
def gaussian_entropy(x: torch.Tensor, *args, **kwargs):
eps = 1e-6
hps = 1e-9
h = gaussian_histogram(x, *args, **kwargs)
# h = (h / (h.sum(dim=0) + hps)).clip(eps) # 3,
# entropy = (-h * h.log()).sum(dim=0).sum(dim=0) # per channel entropy summed
entropy = 0
for i in range(3):
hi = h[..., i]
if hi.sum() > eps:
hi = hi / hi.sum() + eps
else:
hi = torch.ones_like(hi)
entropy += torch.sum(-hi * torch.log(hi))
return entropy
def gaussian_histogram(x: torch.Tensor, bins: int = 15, min: float = 0.0, max: float = 1.0):
x = x.view(-1, x.shape[-1]) # N, 3
sigma = x.var(dim=0) # 3,
delta = (max - min) / bins
centers = min + delta * (torch.arange(bins, device=x.device, dtype=x.dtype) + 0.5) # BIN
x = x[None] - centers[:, None, None] # BIN, N, 3
x = (-0.5 * (x / sigma).pow(2)).exp() / (sigma * np.sqrt(np.pi * 2)) * delta # BIN, N, 3
x = x.sum(dim=1)
return x # BIN, 3
def reg_diff_crit(x: torch.Tensor, iter_step: int, max_weight: float = 1e-4, ann_iter: int = 100 * 500):
weight = min(iter_step, ann_iter) * max_weight / ann_iter
return reg(x), weight
def reg_raw_crit(x: torch.Tensor, iter_step: int, max_weight: float = 1e-4, ann_iter: int = 100 * 500):
weight = min(iter_step, ann_iter) * max_weight / ann_iter
n_batch, n_pts_x2, D = x.shape
n_pts = n_pts_x2 // 2
length = x.norm(dim=-1, keepdim=True) # length
vector = x / (length + 1e-8) # vector direction (normalized to unit sphere)
# loss_length = mse(length[:, n_pts:, :], length[:, :n_pts, :])
loss_vector = reg((vector[:, n_pts:, :] - vector[:, :n_pts, :]))
# loss = loss_length + loss_vector
loss = loss_vector
return loss, weight
class LossNetwork(torch.nn.Module):
"""Reference:
https://discuss.pytorch.org/t/how-to-extract-features-of-an-image-from-a-trained-model/119/3
"""
def __init__(self):
super(LossNetwork, self).__init__()
try:
from torchvision.models import VGG19_Weights
self.vgg_layers = vgg.vgg19(weights=VGG19_Weights.DEFAULT).features
except ImportError:
self.vgg_layers = vgg.vgg19(pretrained=True).features
for param in self.vgg_layers.parameters():
param.requires_grad = False
'''
self.layer_name_mapping = {
'3': "relu1",
'8': "relu2",
'17': "relu3",
'26': "relu4",
'35': "relu5",
}
'''
self.layer_name_mapping = {'3': "relu1", '8': "relu2"}
def forward(self, x):
output = {}
for name, module in self.vgg_layers._modules.items():
x = module(x)
if name in self.layer_name_mapping:
output[self.layer_name_mapping[name]] = x
if name == '8':
break
LossOutput = namedtuple("LossOutput", ["relu1", "relu2"])
return LossOutput(**output)
class PerceptualLoss(torch.nn.Module):
def __init__(self):
super(PerceptualLoss, self).__init__()
self.model = LossNetwork()
self.model.cuda()
self.model.eval()
self.mse_loss = torch.nn.MSELoss(reduction='mean')
self.l1_loss = torch.nn.L1Loss(reduction='mean')
def forward(self, x, target):
x_feature = self.model(x[:, 0:3, :, :])
target_feature = self.model(target[:, 0:3, :, :])
feature_loss = (
self.l1_loss(x_feature.relu1, target_feature.relu1) +
self.l1_loss(x_feature.relu2, target_feature.relu2)) / 2.0
l1_loss = self.l1_loss(x, target)
l2_loss = self.mse_loss(x, target)
loss = feature_loss + l1_loss + l2_loss
return loss
class VGGPerceptualLoss(torch.nn.Module):
def __init__(self, resize=False):
super(VGGPerceptualLoss, self).__init__()
blocks = []
import torchvision
vgg16 = torchvision.models.vgg16(pretrained=True)
blocks.append(vgg16.features[:4].eval())
blocks.append(vgg16.features[4:9].eval())
blocks.append(vgg16.features[9:16].eval())
blocks.append(vgg16.features[16:23].eval())
for bl in blocks:
for p in bl.parameters():
p.requires_grad = False
self.blocks = nn.ModuleList(blocks)
self.transform = F.interpolate
self.resize = resize
self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def forward(self, input, target, feature_layers=[0, 1, 2, 3], style_layers=[]):
if input.shape[1] != 3:
input = input.repeat(1, 3, 1, 1)
target = target.repeat(1, 3, 1, 1)
input = (input - self.mean) / self.std
target = (target - self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
loss = 0.0
x = input
y = target
for i, block in enumerate(self.blocks):
x = block(x)
y = block(y)
if i in feature_layers:
loss += F.l1_loss(x, y)
if i in style_layers:
act_x = x.reshape(x.shape[0], x.shape[1], -1)
act_y = y.reshape(y.shape[0], y.shape[1], -1)
gram_x = act_x @ act_x.permute(0, 2, 1)
gram_y = act_y @ act_y.permute(0, 2, 1)
loss += F.l1_loss(gram_x, gram_y)
return loss
def eikonal(x: torch.Tensor, th=1.0) -> torch.Tensor:
return ((x.norm(dim=-1) - th)**2).mean()
def sdf_mask_crit(ret, batch):
msk_sdf = ret['msk_sdf']
msk_label = ret['msk_label']
alpha = 50
alpha_factor = 2
alpha_milestones = [10000, 20000, 30000, 40000, 50000]
for milestone in alpha_milestones:
if batch['iter_step'] > milestone:
alpha = alpha * alpha_factor
msk_sdf = -alpha * msk_sdf
mask_loss = F.binary_cross_entropy_with_logits(msk_sdf, msk_label) / alpha
return mask_loss
def cross_entropy(x: torch.Tensor, y: torch.Tensor):
# x: unormalized input logits
# channel last cross entropy loss
x = x.view(-1, x.shape[-1]) # N, C
y = y.view(-1, y.shape[-1]) # N, C
return F.cross_entropy(x, y)
def huber(x: torch.Tensor, y: torch.Tensor):
return F.huber_loss(x, y, reduction='mean')
def smoothl1(x: torch.Tensor, y: torch.Tensor):
return F.smooth_l1_loss(x, y)
def mse(x: torch.Tensor, y: torch.Tensor):
return ((x.float() - y.float())**2).mean()
def dot(x: torch.Tensor, y: torch.Tensor):
return (x * y).sum(dim=-1)
def l1(x: torch.Tensor, y: torch.Tensor):
return l1_reg(x - y)
def l2(x: torch.Tensor, y: torch.Tensor):
return l2_reg(x - y)
def l1_reg(x: torch.Tensor):
return x.abs().sum(dim=-1).mean()
def l2_reg(x: torch.Tensor) -> torch.Tensor:
return (x**2).sum(dim=-1).mean()
def bce_loss(x: torch.Tensor, y: torch.Tensor):
return F.binary_cross_entropy(x, y)
def mIoU_loss(x: torch.Tensor, y: torch.Tensor):
"""
Compute the mean intersection of union loss over masked regions
x, y: B, N, 1
"""
I = (x * y).sum(-1).sum(-1)
U = (x + y).sum(-1).sum(-1) - I
mIoU = (I / U.detach()).mean()
return 1 - mIoU
def reg(x: torch.Tensor) -> torch.Tensor:
return x.norm(dim=-1).mean()
def thresh(x: torch.Tensor, a: torch.Tensor, eps: float = 1e-8):
return 1 / (l2(x, a) + eps)
def elastic_crit(jac: torch.Tensor) -> torch.Tensor:
"""Compute the raw 'log_svals' type elastic energy, and
remap it using the Geman-McClure type of robust loss.
Args:
jac (torch.Tensor): (B, N, 3, 3), the gradient of warpped xyz with respect to the original xyz
Return:
elastic_loss (torch.Tensor): (B, N),
"""
# !: CUDA IMPLEMENTATION OF SVD IS EXTREMELY SLOW
# old_device = jac.device
# jac = jac.cpu()
# svd_backward: Setting compute_uv to false in torch.svd doesn't compute singular matrices, and hence we cannot compute backward. Please use torch.svd(compute_uv=True)
_, S, _ = torch.svd(jac, compute_uv=True) # (B, N, 3)
# S = S.to(old_device)
log_svals = torch.log(torch.clamp(S, min=1e-6)) # (B, N, 3)
sq_residual = torch.sum(log_svals**2, dim=-1) # (B, N)
# TODO: determine whether it is a good choice to compute the robust loss here
elastic_loss = general_loss_with_squared_residual(sq_residual, alpha=-2.0, scale=0.03)
return elastic_loss
def general_loss_with_squared_residual(squared_x, alpha, scale):
r"""The general loss that takes a squared residual.
This fuses the sqrt operation done to compute many residuals while preserving
the square in the loss formulation.
This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
squared_x: The residual for which the loss is being computed. x can have
any shape, and alpha and scale will be broadcasted to match x's shape if
necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
# https://pytorch.org/docs/stable/type_info.html
eps = torch.tensor(torch.finfo(torch.float32).eps)
# convert the float to torch.tensor
alpha = torch.tensor(alpha).to(squared_x.device)
scale = torch.tensor(scale).to(squared_x.device)
# This will be used repeatedly.
squared_scaled_x = squared_x / (scale ** 2)
# The loss when alpha == 2.
loss_two = 0.5 * squared_scaled_x
# The loss when alpha == 0.
loss_zero = log1p_safe(0.5 * squared_scaled_x)
# The loss when alpha == -infinity.
loss_neginf = -torch.expm1(-0.5 * squared_scaled_x)
# The loss when alpha == +infinity.
loss_posinf = expm1_safe(0.5 * squared_scaled_x)
# The loss when not in one of the above special cases.
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
beta_safe = torch.maximum(eps, torch.abs(alpha - 2.))
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
alpha_safe = torch.where(
torch.greater_equal(alpha, torch.tensor(0.)), torch.ones_like(alpha),
-torch.ones_like(alpha)) * torch.maximum(eps, torch.abs(alpha))
loss_otherwise = (beta_safe / alpha_safe) * (
torch.pow(squared_scaled_x / beta_safe + 1., 0.5 * alpha) - 1.)
# Select which of the cases of the loss to return.
loss = torch.where(
alpha == -torch.inf, loss_neginf,
torch.where(
alpha == 0, loss_zero,
torch.where(
alpha == 2, loss_two,
torch.where(alpha == torch.inf, loss_posinf, loss_otherwise))))
return scale * loss
def log1p_safe(x):
"""The same as torch.log1p(x), but clamps the input to prevent NaNs."""
return torch.log1p(torch.minimum(x, torch.tensor(3e37)))
def expm1_safe(x):
"""The same as torch.expm1(x), but clamps the input to prevent NaNs."""
return torch.expm1(torch.minimum(x, torch.tensor(87.5)))
def compute_plane_tv(t):
batch_size, c, h, w = t.shape
count_h = batch_size * c * (h - 1) * w
count_w = batch_size * c * h * (w - 1)
h_tv = torch.square(t[..., 1:, :] - t[..., :h - 1, :]).sum()
w_tv = torch.square(t[..., :, 1:] - t[..., :, :w - 1]).sum()
return 2 * (h_tv / count_h + w_tv / count_w) # This is summing over batch and c instead of avg
def compute_planes_tv(embedding):
tv_loss = 0
for emb in embedding:
tv_loss += compute_plane_tv(emb)
return tv_loss
def compute_plane_smoothness(t):
batch_size, c, h, w = t.shape
# Convolve with a second derivative filter, in the time dimension which is dimension 2
first_difference = t[..., 1:] - t[..., :w - 1] # [batch, c, h-1, w]
second_difference = first_difference[..., 1:] - first_difference[..., :w - 2] # [batch, c, h-2, w]
# Take the L2 norm of the result
return torch.square(second_difference).mean()
def compute_time_planes_smooth(embedding):
loss = 0.
for emb in embedding:
loss += compute_plane_smoothness(emb)
return loss
def compute_ssim(x: torch.Tensor, y: torch.Tensor):
from pytorch_msssim import ssim
return ssim(x, y, data_range=1.0, win_size=11, win_sigma=1.5, K=(0.01, 0.03))
# from MonoSDF
def compute_scale_and_shift(prediction, target, mask):
# System matrix: A = [[a_00, a_01], [a_10, a_11]]
a_00 = torch.sum(mask * prediction * prediction, (1, 2))
a_01 = torch.sum(mask * prediction, (1, 2))
a_11 = torch.sum(mask, (1, 2))
# Right hand side: b = [b_0, b_1]
b_0 = torch.sum(mask * prediction * target, (1, 2))
b_1 = torch.sum(mask * target, (1, 2))
# Solution: x = A^-1 . b = [[a_11, -a_01], [-a_10, a_00]] / (a_00 * a_11 - a_01 * a_10) . b
x_0 = torch.zeros_like(b_0)
x_1 = torch.zeros_like(b_1)
det = a_00 * a_11 - a_01 * a_01
valid = det.nonzero()
x_0[valid] = ( a_11[valid] * b_0[valid] - a_01[valid] * b_1[valid]) / det[valid]
x_1[valid] = (-a_01[valid] * b_0[valid] + a_00[valid] * b_1[valid]) / det[valid]
return x_0, x_1
def reduction_batch_based(image_loss, M):
# Average of all valid pixels of the batch
# Avoid division by 0 (if sum(M) = sum(sum(mask)) = 0: sum(image_loss) = 0)
divisor = torch.sum(M)
if divisor == 0: return 0
else: return torch.sum(image_loss) / divisor
def reduction_image_based(image_loss, M):
# Mean of average of valid pixels of an image
# Avoid division by 0 (if M = sum(mask) = 0: image_loss = 0)
valid = M.nonzero()
image_loss[valid] = image_loss[valid] / M[valid]
return torch.mean(image_loss)
def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
# Number of valid pixels
M = torch.sum(mask, (1, 2)) # (B,)
# L2 loss
res = prediction - target # (B, H, W)
image_loss = torch.sum(mask * res * res, (1, 2)) # (B,)
return reduction(image_loss, 2 * M)
def gradient_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
diff = prediction - target
diff = torch.mul(mask, diff)
grad_x = torch.abs(diff[:, :, 1:] - diff[:, :, :-1])
mask_x = torch.mul(mask[:, :, 1:], mask[:, :, :-1])
grad_x = torch.mul(mask_x, grad_x)
grad_y = torch.abs(diff[:, 1:, :] - diff[:, :-1, :])
mask_y = torch.mul(mask[:, 1:, :], mask[:, :-1, :])
grad_y = torch.mul(mask_y, grad_y)
image_loss = torch.sum(grad_x, (1, 2)) + torch.sum(grad_y, (1, 2))
return reduction(image_loss, M)
class MSELoss(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, prediction, target, mask):
return mse_loss(prediction, target, mask, reduction=self.__reduction)
class GradientLoss(nn.Module):
def __init__(self, scales=1, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
self.__scales = scales
def forward(self, prediction, target, mask):
total = 0
for scale in range(self.__scales):
step = pow(2, scale)
total += gradient_loss(prediction[:, ::step, ::step], target[:, ::step, ::step],
mask[:, ::step, ::step], reduction=self.__reduction)
return total
class ScaleAndShiftInvariantMSELoss(nn.Module):
def __init__(self, alpha=0.5, scales=4, reduction='batch-based'):
super().__init__()
self.__data_loss = MSELoss(reduction=reduction)
self.__regularization_loss = GradientLoss(scales=scales, reduction=reduction)
self.__alpha = alpha
self.__prediction_ssi = None
def forward(self, prediction, target, mask):
# Deal with the channel dimension, the input dimension may have (B, C, H, W) or (B, H, W)
if prediction.ndim == 4: prediction = prediction[:, 0] # (B, H, W)
if target.ndim == 4: target = target[:, 0] # (B, H, W)
if mask.ndim == 4: mask = mask[:, 0] # (B, H, W)
# Compute scale and shift
scale, shift = compute_scale_and_shift(prediction, target, mask)
self.__prediction_ssi = scale.view(-1, 1, 1) * prediction + shift.view(-1, 1, 1)
total = self.__data_loss(self.__prediction_ssi, target, mask)
# Add regularization if needed
if self.__alpha > 0:
total += self.__alpha * self.__regularization_loss(self.__prediction_ssi, target, mask)
return total
def __get_prediction_ssi(self):
return self.__prediction_ssi
prediction_ssi = property(__get_prediction_ssi)
# from MonoSDF
def median_normalize(x, mask):
""" Median normalize a tensor for all valid pixels.
This operation is performed without batch dimension.
Args:
x (torch.Tensor): (H, W), original tensor
mask (torch.Tensor): (H, W), mask tensor
Return:
y (torch.Tensor): (H, W), median normalized tensor
"""
M = torch.sum(mask)
# Return original tensor if there is no valid pixel
if M == 0:
return x
# Compute median and scale
t = torch.quantile(x[mask == 1], q=0.5) # scalar
s = torch.sum(x[mask == 1] - t) / M # scalar
# Return median normalized tensor
return (x - t) / s
def mae_loss(prediction, target, mask, reduction=reduction_batch_based):
# Number of valid pixels
M = torch.sum(mask, (1, 2)) # (B,)
# L1 loss
res = (prediction - target).abs() # (B, H, W)
image_loss = torch.sum(mask * res, (1, 2)) # (B,)
return reduction(image_loss, 2 * M)
class MAELoss(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, prediction, target, mask):
return mae_loss(prediction, target, mask, reduction=self.__reduction)
class ScaleAndShiftInvariantMAELoss(nn.Module):
def __init__(self, alpha=0.5, scales=4, reduction='batch-based'):
super().__init__()
self.__data_loss = MAELoss(reduction=reduction)
self.__regularization_loss = GradientLoss(scales=scales, reduction=reduction)
self.__alpha = alpha
def forward(self, prediction, target, mask):
# Deal with the channel dimension, the input dimension may have (B, C, H, W) or (B, H, W)
if prediction.ndim == 4: prediction = prediction[:, 0] # (B, H, W)
if target.ndim == 4: target = target[:, 0] # (B, H, W)
if mask.ndim == 4: mask = mask[:, 0] # (B, H, W)
# TODO: Maybe there is a better way to do the batching
# But `torch.quantile` does not support multiple `dim` argument for now
for i in range(prediction.shape[0]):
prediction[i] = median_normalize(prediction[i], mask[i]) # (H, W)
target[i] = median_normalize(target[i], mask[i]) # (H, W)
# Compute the scale-and-shift invariant MAE loss
total = self.__data_loss(prediction, target, mask)
# Add regularization if needed
if self.__alpha > 0:
total += self.__alpha * self.__regularization_loss(self.prediction, target, mask)
return total
# Modified version of Adabins repository
# https://github.com/shariqfarooq123/AdaBins/blob/0952d91e9e762be310bb4cd055cbfe2448c0ce20/loss.py#L7
class ScaleInvariantLogLoss(nn.Module):
def __init__(self, alpha=10.0, beta=0.15, eps=0.0):
super(ScaleInvariantLogLoss, self).__init__()
self.alpha = alpha
self.beta = beta
# The eps is added to avoid log(0) and division by zero
# But it should be gauranteed that the network output is always non-negative
self.eps = eps
def forward(self, prediction, target, mask):
# Deal with the channel dimension, the input dimension may have (B, C, H, W) or (B, H, W)
if prediction.ndim == 4: prediction = prediction[:, 0] # (B, H, W)
if target.ndim == 4: target = target[:, 0] # (B, H, W)
if mask.ndim == 4: mask = mask[:, 0] # (B, H, W)
total = 0
# Maybe there is a better way to do the batching
for i in range(prediction.shape[0]):
g = torch.log(prediction[i][mask[i]] + self.eps) - torch.log(target[i][mask[i]] + self.eps) # (N,)
Dg = torch.var(g) + self.beta * torch.pow(torch.mean(g), 2) # scalar
total += self.alpha * torch.sqrt(Dg)
return total
|
evocodebench_data_85
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.vgg as vgg
from collections import namedtuple
from easyvolcap.utils.prop_utils import searchsorted, matchup_channels
from enum import Enum, auto
class ElasticLossReduceType(Enum):
WEIGHT = auto()
MEDIAN = auto()
class ImgLossType(Enum):
PERC = auto() # lpips
CHARB = auto()
HUBER = auto()
L1 = auto()
L2 = auto()
SSIM = auto()
class DptLossType(Enum):
SMOOTHL1 = auto()
L1 = auto()
L2 = auto()
SSIMSE = auto()
SSIMAE = auto()
SILOG = auto()
CONTINUITY = auto()
RANKING = auto()
# from mipnerf360
def inner_outer(t0, t1, y1):
"""Construct inner and outer measures on (t1, y1) for t0."""
cy1 = torch.cat([torch.zeros_like(y1[..., :1]), torch.cumsum(y1, dim=-1)], dim=-1) # 129
idx_lo, idx_hi = searchsorted(t1, t0)
cy1_lo = torch.take_along_dim(cy1, idx_lo, dim=-1) # 128
cy1_hi = torch.take_along_dim(cy1, idx_hi, dim=-1)
y0_outer = cy1_hi[..., 1:] - cy1_lo[..., :-1] # 127
y0_inner = torch.where(idx_hi[..., :-1] <= idx_lo[..., 1:], cy1_lo[..., 1:] - cy1_hi[..., :-1], 0)
return y0_inner, y0_outer
# from mipnerf360
def lossfun_outer(t: torch.Tensor, w: torch.Tensor, t_env: torch.Tensor, w_env: torch.Tensor, eps=torch.finfo(torch.float32).eps):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
t_env, w_env = matchup_channels(t_env, w_env)
"""The proposal weight should be an upper envelope on the nerf weight."""
_, w_outer = inner_outer(t, t_env, w_env)
# We assume w_inner <= w <= w_outer. We don't penalize w_inner because it's
# more effective to pull w_outer up than it is to push w_inner down.
# Scaled half-quadratic loss that gives a constant gradient at w_outer = 0.
return (w - w_outer).clip(0.).pow(2) / (w + eps)
def blur_stepfun(x, y, r):
xr, xr_idx = torch.sort(torch.cat([x - r, x + r], dim=-1))
y1 = (torch.cat([y, torch.zeros_like(y[..., :1])], dim=-1) -
torch.cat([torch.zeros_like(y[..., :1]), y], dim=-1)) / (2 * r)
y2 = torch.cat([y1, -y1], dim=-1).take_along_dim(xr_idx[..., :-1], dim=-1)
yr = torch.cumsum((xr[..., 1:] - xr[..., :-1]) *
torch.cumsum(y2, dim=-1), dim=-1).clamp_min(0)
yr = torch.cat([torch.zeros_like(yr[..., :1]), yr], dim=-1)
return xr, yr
def sorted_interp_quad(x, xp, fpdf, fcdf):
"""interp in quadratic"""
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[..., None, :] >= xp[..., :, None]
def find_interval(x, return_idx=False):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0, x0_idx = torch.max(torch.where(mask, x[..., None], x[..., :1, None]), -2)
x1, x1_idx = torch.min(torch.where(~mask, x[..., None], x[..., -1:, None]), -2)
if return_idx:
return x0, x1, x0_idx, x1_idx
return x0, x1
fcdf0, fcdf1, fcdf0_idx, fcdf1_idx = find_interval(fcdf, return_idx=True)
fpdf0 = fpdf.take_along_dim(fcdf0_idx, dim=-1)
fpdf1 = fpdf.take_along_dim(fcdf1_idx, dim=-1)
xp0, xp1 = find_interval(xp)
offset = torch.clip(torch.nan_to_num((x - xp0) / (xp1 - xp0), 0), 0, 1)
ret = fcdf0 + (x - xp0) * (fpdf0 + fpdf1 * offset + fpdf0 * (1 - offset)) / 2
return ret
def lossfun_zip_outer(t, w, t_env, w_env, pulse_width, eps=1e-6):
t, w = matchup_channels(t, w)
t_env, w_env = matchup_channels(t_env, w_env)
w_normalize = w / torch.clamp_min(t[..., 1:] - t[..., :-1], eps)
t_, w_ = blur_stepfun(t, w_normalize, pulse_width)
w_ = torch.clip(w_, min=0.)
assert (w_ >= 0.0).all()
# piecewise linear pdf to piecewise quadratic cdf
area = 0.5 * (w_[..., 1:] + w_[..., :-1]) * (t_[..., 1:] - t_[..., :-1])
cdf = torch.cat([torch.zeros_like(area[..., :1]), torch.cumsum(area, dim=-1)], dim=-1)
# query piecewise quadratic interpolation
cdf_interp = sorted_interp_quad(t_env, t_, w_, cdf)
# difference between adjacent interpolated values
w_s = torch.diff(cdf_interp, dim=-1)
return ((w_s - w_env).clip(0.).pow(2) / (w_env + eps)).mean()
def lossfun_distortion(t: torch.Tensor, w: torch.Tensor):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
# The loss incurred between all pairs of intervals.
ut = (t[..., 1:] + t[..., :-1]) / 2 # 64
dut = torch.abs(ut[..., :, None] - ut[..., None, :]) # 64
loss_inter = torch.sum(w * torch.sum(w[..., None, :] * dut, dim=-1), dim=-1)
# The loss incurred within each individual interval with itself.
loss_intra = torch.sum(w**2 * (t[..., 1:] - t[..., :-1]), dim=-1) / 3
return loss_inter + loss_intra
def interval_distortion(t0_lo, t0_hi, t1_lo, t1_hi):
"""Compute mean(abs(x-y); x in [t0_lo, t0_hi], y in [t1_lo, t1_hi])."""
# Distortion when the intervals do not overlap.
d_disjoint = torch.abs((t1_lo + t1_hi) / 2 - (t0_lo + t0_hi) / 2)
# Distortion when the intervals overlap.
d_overlap = (2 *
(torch.minimum(t0_hi, t1_hi)**3 - torch.maximum(t0_lo, t1_lo)**3) +
3 * (t1_hi * t0_hi * torch.abs(t1_hi - t0_hi) +
t1_lo * t0_lo * torch.abs(t1_lo - t0_lo) + t1_hi * t0_lo *
(t0_lo - t1_hi) + t1_lo * t0_hi *
(t1_lo - t0_hi))) / (6 * (t0_hi - t0_lo) * (t1_hi - t1_lo))
# Are the two intervals not overlapping?
are_disjoint = (t0_lo > t1_hi) | (t1_lo > t0_hi)
return torch.where(are_disjoint, d_disjoint, d_overlap)
def anneal_loss_weight(weight: float, gamma: float, iter: int, mile: int):
# exponentially anneal the loss weight
return weight * gamma ** min(iter / mile, 1)
def gaussian_entropy_relighting4d(albedo_pred):
albedo_entropy = 0
for i in range(3):
channel = albedo_pred[..., i]
hist = GaussianHistogram(15, 0., 1., sigma=torch.var(channel))
h = hist(channel)
if h.sum() > 1e-6:
h = h.div(h.sum()) + 1e-6
else:
h = torch.ones_like(h)
albedo_entropy += torch.sum(-h * torch.log(h))
return albedo_entropy
class GaussianHistogram(nn.Module):
def __init__(self, bins, min, max, sigma):
super(GaussianHistogram, self).__init__()
self.bins = bins
self.min = min
self.max = max
self.sigma = sigma
self.delta = float(max - min) / float(bins)
self.centers = float(min) + self.delta * (torch.arange(bins, device=sigma.device).float() + 0.5)
def forward(self, x):
x = torch.unsqueeze(x, 0) - torch.unsqueeze(self.centers, 1)
x = torch.exp(-0.5 * (x / self.sigma)**2) / (self.sigma * np.sqrt(np.pi * 2)) * self.delta
x = x.sum(dim=1)
return x
def gaussian_entropy(x: torch.Tensor, *args, **kwargs):
eps = 1e-6
hps = 1e-9
h = gaussian_histogram(x, *args, **kwargs)
# h = (h / (h.sum(dim=0) + hps)).clip(eps) # 3,
# entropy = (-h * h.log()).sum(dim=0).sum(dim=0) # per channel entropy summed
entropy = 0
for i in range(3):
hi = h[..., i]
if hi.sum() > eps:
hi = hi / hi.sum() + eps
else:
hi = torch.ones_like(hi)
entropy += torch.sum(-hi * torch.log(hi))
return entropy
def gaussian_histogram(x: torch.Tensor, bins: int = 15, min: float = 0.0, max: float = 1.0):
x = x.view(-1, x.shape[-1]) # N, 3
sigma = x.var(dim=0) # 3,
delta = (max - min) / bins
centers = min + delta * (torch.arange(bins, device=x.device, dtype=x.dtype) + 0.5) # BIN
x = x[None] - centers[:, None, None] # BIN, N, 3
x = (-0.5 * (x / sigma).pow(2)).exp() / (sigma * np.sqrt(np.pi * 2)) * delta # BIN, N, 3
x = x.sum(dim=1)
return x # BIN, 3
def reg_diff_crit(x: torch.Tensor, iter_step: int, max_weight: float = 1e-4, ann_iter: int = 100 * 500):
weight = min(iter_step, ann_iter) * max_weight / ann_iter
return reg(x), weight
def reg_raw_crit(x: torch.Tensor, iter_step: int, max_weight: float = 1e-4, ann_iter: int = 100 * 500):
weight = min(iter_step, ann_iter) * max_weight / ann_iter
n_batch, n_pts_x2, D = x.shape
n_pts = n_pts_x2 // 2
length = x.norm(dim=-1, keepdim=True) # length
vector = x / (length + 1e-8) # vector direction (normalized to unit sphere)
# loss_length = mse(length[:, n_pts:, :], length[:, :n_pts, :])
loss_vector = reg((vector[:, n_pts:, :] - vector[:, :n_pts, :]))
# loss = loss_length + loss_vector
loss = loss_vector
return loss, weight
class LossNetwork(torch.nn.Module):
"""Reference:
https://discuss.pytorch.org/t/how-to-extract-features-of-an-image-from-a-trained-model/119/3
"""
def __init__(self):
super(LossNetwork, self).__init__()
try:
from torchvision.models import VGG19_Weights
self.vgg_layers = vgg.vgg19(weights=VGG19_Weights.DEFAULT).features
except ImportError:
self.vgg_layers = vgg.vgg19(pretrained=True).features
for param in self.vgg_layers.parameters():
param.requires_grad = False
'''
self.layer_name_mapping = {
'3': "relu1",
'8': "relu2",
'17': "relu3",
'26': "relu4",
'35': "relu5",
}
'''
self.layer_name_mapping = {'3': "relu1", '8': "relu2"}
def forward(self, x):
output = {}
for name, module in self.vgg_layers._modules.items():
x = module(x)
if name in self.layer_name_mapping:
output[self.layer_name_mapping[name]] = x
if name == '8':
break
LossOutput = namedtuple("LossOutput", ["relu1", "relu2"])
return LossOutput(**output)
class PerceptualLoss(torch.nn.Module):
def __init__(self):
super(PerceptualLoss, self).__init__()
self.model = LossNetwork()
self.model.cuda()
self.model.eval()
self.mse_loss = torch.nn.MSELoss(reduction='mean')
self.l1_loss = torch.nn.L1Loss(reduction='mean')
def forward(self, x, target):
x_feature = self.model(x[:, 0:3, :, :])
target_feature = self.model(target[:, 0:3, :, :])
feature_loss = (
self.l1_loss(x_feature.relu1, target_feature.relu1) +
self.l1_loss(x_feature.relu2, target_feature.relu2)) / 2.0
l1_loss = self.l1_loss(x, target)
l2_loss = self.mse_loss(x, target)
loss = feature_loss + l1_loss + l2_loss
return loss
class VGGPerceptualLoss(torch.nn.Module):
def __init__(self, resize=False):
super(VGGPerceptualLoss, self).__init__()
blocks = []
import torchvision
vgg16 = torchvision.models.vgg16(pretrained=True)
blocks.append(vgg16.features[:4].eval())
blocks.append(vgg16.features[4:9].eval())
blocks.append(vgg16.features[9:16].eval())
blocks.append(vgg16.features[16:23].eval())
for bl in blocks:
for p in bl.parameters():
p.requires_grad = False
self.blocks = nn.ModuleList(blocks)
self.transform = F.interpolate
self.resize = resize
self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def forward(self, input, target, feature_layers=[0, 1, 2, 3], style_layers=[]):
if input.shape[1] != 3:
input = input.repeat(1, 3, 1, 1)
target = target.repeat(1, 3, 1, 1)
input = (input - self.mean) / self.std
target = (target - self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
loss = 0.0
x = input
y = target
for i, block in enumerate(self.blocks):
x = block(x)
y = block(y)
if i in feature_layers:
loss += F.l1_loss(x, y)
if i in style_layers:
act_x = x.reshape(x.shape[0], x.shape[1], -1)
act_y = y.reshape(y.shape[0], y.shape[1], -1)
gram_x = act_x @ act_x.permute(0, 2, 1)
gram_y = act_y @ act_y.permute(0, 2, 1)
loss += F.l1_loss(gram_x, gram_y)
return loss
def eikonal(x: torch.Tensor, th=1.0) -> torch.Tensor:
return ((x.norm(dim=-1) - th)**2).mean()
def sdf_mask_crit(ret, batch):
msk_sdf = ret['msk_sdf']
msk_label = ret['msk_label']
alpha = 50
alpha_factor = 2
alpha_milestones = [10000, 20000, 30000, 40000, 50000]
for milestone in alpha_milestones:
if batch['iter_step'] > milestone:
alpha = alpha * alpha_factor
msk_sdf = -alpha * msk_sdf
mask_loss = F.binary_cross_entropy_with_logits(msk_sdf, msk_label) / alpha
return mask_loss
def cross_entropy(x: torch.Tensor, y: torch.Tensor):
# x: unormalized input logits
# channel last cross entropy loss
x = x.view(-1, x.shape[-1]) # N, C
y = y.view(-1, y.shape[-1]) # N, C
return F.cross_entropy(x, y)
def huber(x: torch.Tensor, y: torch.Tensor):
return F.huber_loss(x, y, reduction='mean')
def smoothl1(x: torch.Tensor, y: torch.Tensor):
return F.smooth_l1_loss(x, y)
def mse(x: torch.Tensor, y: torch.Tensor):
return ((x.float() - y.float())**2).mean()
def dot(x: torch.Tensor, y: torch.Tensor):
return (x * y).sum(dim=-1)
def l1(x: torch.Tensor, y: torch.Tensor):
return l1_reg(x - y)
def l2(x: torch.Tensor, y: torch.Tensor):
return l2_reg(x - y)
def l1_reg(x: torch.Tensor):
return x.abs().sum(dim=-1).mean()
def l2_reg(x: torch.Tensor) -> torch.Tensor:
return (x**2).sum(dim=-1).mean()
def bce_loss(x: torch.Tensor, y: torch.Tensor):
return F.binary_cross_entropy(x, y)
def mIoU_loss(x: torch.Tensor, y: torch.Tensor):
"""
Compute the mean intersection of union loss over masked regions
x, y: B, N, 1
"""
I = (x * y).sum(-1).sum(-1)
U = (x + y).sum(-1).sum(-1) - I
mIoU = (I / U.detach()).mean()
return 1 - mIoU
def reg(x: torch.Tensor) -> torch.Tensor:
return x.norm(dim=-1).mean()
def thresh(x: torch.Tensor, a: torch.Tensor, eps: float = 1e-8):
return 1 / (l2(x, a) + eps)
def elastic_crit(jac: torch.Tensor) -> torch.Tensor:
"""Compute the raw 'log_svals' type elastic energy, and
remap it using the Geman-McClure type of robust loss.
Args:
jac (torch.Tensor): (B, N, 3, 3), the gradient of warpped xyz with respect to the original xyz
Return:
elastic_loss (torch.Tensor): (B, N),
"""
# !: CUDA IMPLEMENTATION OF SVD IS EXTREMELY SLOW
# old_device = jac.device
# jac = jac.cpu()
# svd_backward: Setting compute_uv to false in torch.svd doesn't compute singular matrices, and hence we cannot compute backward. Please use torch.svd(compute_uv=True)
_, S, _ = torch.svd(jac, compute_uv=True) # (B, N, 3)
# S = S.to(old_device)
log_svals = torch.log(torch.clamp(S, min=1e-6)) # (B, N, 3)
sq_residual = torch.sum(log_svals**2, dim=-1) # (B, N)
# TODO: determine whether it is a good choice to compute the robust loss here
elastic_loss = general_loss_with_squared_residual(sq_residual, alpha=-2.0, scale=0.03)
return elastic_loss
def general_loss_with_squared_residual(squared_x, alpha, scale):
r"""The general loss that takes a squared residual.
This fuses the sqrt operation done to compute many residuals while preserving
the square in the loss formulation.
This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
squared_x: The residual for which the loss is being computed. x can have
any shape, and alpha and scale will be broadcasted to match x's shape if
necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
# https://pytorch.org/docs/stable/type_info.html
eps = torch.tensor(torch.finfo(torch.float32).eps)
# convert the float to torch.tensor
alpha = torch.tensor(alpha).to(squared_x.device)
scale = torch.tensor(scale).to(squared_x.device)
# This will be used repeatedly.
squared_scaled_x = squared_x / (scale ** 2)
# The loss when alpha == 2.
loss_two = 0.5 * squared_scaled_x
# The loss when alpha == 0.
loss_zero = log1p_safe(0.5 * squared_scaled_x)
# The loss when alpha == -infinity.
loss_neginf = -torch.expm1(-0.5 * squared_scaled_x)
# The loss when alpha == +infinity.
loss_posinf = expm1_safe(0.5 * squared_scaled_x)
# The loss when not in one of the above special cases.
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
beta_safe = torch.maximum(eps, torch.abs(alpha - 2.))
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
alpha_safe = torch.where(
torch.greater_equal(alpha, torch.tensor(0.)), torch.ones_like(alpha),
-torch.ones_like(alpha)) * torch.maximum(eps, torch.abs(alpha))
loss_otherwise = (beta_safe / alpha_safe) * (
torch.pow(squared_scaled_x / beta_safe + 1., 0.5 * alpha) - 1.)
# Select which of the cases of the loss to return.
loss = torch.where(
alpha == -torch.inf, loss_neginf,
torch.where(
alpha == 0, loss_zero,
torch.where(
alpha == 2, loss_two,
torch.where(alpha == torch.inf, loss_posinf, loss_otherwise))))
return scale * loss
def log1p_safe(x):
"""The same as torch.log1p(x), but clamps the input to prevent NaNs."""
return torch.log1p(torch.minimum(x, torch.tensor(3e37)))
def expm1_safe(x):
"""The same as torch.expm1(x), but clamps the input to prevent NaNs."""
return torch.expm1(torch.minimum(x, torch.tensor(87.5)))
def compute_plane_tv(t):
batch_size, c, h, w = t.shape
count_h = batch_size * c * (h - 1) * w
count_w = batch_size * c * h * (w - 1)
h_tv = torch.square(t[..., 1:, :] - t[..., :h - 1, :]).sum()
w_tv = torch.square(t[..., :, 1:] - t[..., :, :w - 1]).sum()
return 2 * (h_tv / count_h + w_tv / count_w) # This is summing over batch and c instead of avg
def compute_planes_tv(embedding):
tv_loss = 0
for emb in embedding:
tv_loss += compute_plane_tv(emb)
return tv_loss
def compute_plane_smoothness(t):
batch_size, c, h, w = t.shape
# Convolve with a second derivative filter, in the time dimension which is dimension 2
first_difference = t[..., 1:] - t[..., :w - 1] # [batch, c, h-1, w]
second_difference = first_difference[..., 1:] - first_difference[..., :w - 2] # [batch, c, h-2, w]
# Take the L2 norm of the result
return torch.square(second_difference).mean()
def compute_time_planes_smooth(embedding):
loss = 0.
for emb in embedding:
loss += compute_plane_smoothness(emb)
return loss
def compute_ssim(x: torch.Tensor, y: torch.Tensor):
from pytorch_msssim import ssim
return ssim(x, y, data_range=1.0, win_size=11, win_sigma=1.5, K=(0.01, 0.03))
# from MonoSDF
def compute_scale_and_shift(prediction, target, mask):
# System matrix: A = [[a_00, a_01], [a_10, a_11]]
a_00 = torch.sum(mask * prediction * prediction, (1, 2))
a_01 = torch.sum(mask * prediction, (1, 2))
a_11 = torch.sum(mask, (1, 2))
# Right hand side: b = [b_0, b_1]
b_0 = torch.sum(mask * prediction * target, (1, 2))
b_1 = torch.sum(mask * target, (1, 2))
# Solution: x = A^-1 . b = [[a_11, -a_01], [-a_10, a_00]] / (a_00 * a_11 - a_01 * a_10) . b
x_0 = torch.zeros_like(b_0)
x_1 = torch.zeros_like(b_1)
det = a_00 * a_11 - a_01 * a_01
valid = det.nonzero()
x_0[valid] = ( a_11[valid] * b_0[valid] - a_01[valid] * b_1[valid]) / det[valid]
x_1[valid] = (-a_01[valid] * b_0[valid] + a_00[valid] * b_1[valid]) / det[valid]
return x_0, x_1
def reduction_batch_based(image_loss, M):
# Average of all valid pixels of the batch
# Avoid division by 0 (if sum(M) = sum(sum(mask)) = 0: sum(image_loss) = 0)
divisor = torch.sum(M)
if divisor == 0: return 0
else: return torch.sum(image_loss) / divisor
def reduction_image_based(image_loss, M):
# Mean of average of valid pixels of an image
# Avoid division by 0 (if M = sum(mask) = 0: image_loss = 0)
valid = M.nonzero()
image_loss[valid] = image_loss[valid] / M[valid]
return torch.mean(image_loss)
def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
# Number of valid pixels
M = torch.sum(mask, (1, 2)) # (B,)
# L2 loss
res = prediction - target # (B, H, W)
image_loss = torch.sum(mask * res * res, (1, 2)) # (B,)
return reduction(image_loss, 2 * M)
def gradient_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
diff = prediction - target
diff = torch.mul(mask, diff)
grad_x = torch.abs(diff[:, :, 1:] - diff[:, :, :-1])
mask_x = torch.mul(mask[:, :, 1:], mask[:, :, :-1])
grad_x = torch.mul(mask_x, grad_x)
grad_y = torch.abs(diff[:, 1:, :] - diff[:, :-1, :])
mask_y = torch.mul(mask[:, 1:, :], mask[:, :-1, :])
grad_y = torch.mul(mask_y, grad_y)
image_loss = torch.sum(grad_x, (1, 2)) + torch.sum(grad_y, (1, 2))
return reduction(image_loss, M)
class MSELoss(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, prediction, target, mask):
return mse_loss(prediction, target, mask, reduction=self.__reduction)
class GradientLoss(nn.Module):
def __init__(self, scales=1, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
self.__scales = scales
def forward(self, prediction, target, mask):
total = 0
for scale in range(self.__scales):
step = pow(2, scale)
total += gradient_loss(prediction[:, ::step, ::step], target[:, ::step, ::step],
mask[:, ::step, ::step], reduction=self.__reduction)
return total
class ScaleAndShiftInvariantMSELoss(nn.Module):
def __init__(self, alpha=0.5, scales=4, reduction='batch-based'):
super().__init__()
self.__data_loss = MSELoss(reduction=reduction)
self.__regularization_loss = GradientLoss(scales=scales, reduction=reduction)
self.__alpha = alpha
self.__prediction_ssi = None
def forward(self, prediction, target, mask):
# Deal with the channel dimension, the input dimension may have (B, C, H, W) or (B, H, W)
if prediction.ndim == 4: prediction = prediction[:, 0] # (B, H, W)
if target.ndim == 4: target = target[:, 0] # (B, H, W)
if mask.ndim == 4: mask = mask[:, 0] # (B, H, W)
# Compute scale and shift
scale, shift = compute_scale_and_shift(prediction, target, mask)
self.__prediction_ssi = scale.view(-1, 1, 1) * prediction + shift.view(-1, 1, 1)
total = self.__data_loss(self.__prediction_ssi, target, mask)
# Add regularization if needed
if self.__alpha > 0:
total += self.__alpha * self.__regularization_loss(self.__prediction_ssi, target, mask)
return total
def __get_prediction_ssi(self):
return self.__prediction_ssi
prediction_ssi = property(__get_prediction_ssi)
# from MonoSDF
def median_normalize(x, mask):
""" Median normalize a tensor for all valid pixels.
This operation is performed without batch dimension.
Args:
x (torch.Tensor): (H, W), original tensor
mask (torch.Tensor): (H, W), mask tensor
Return:
y (torch.Tensor): (H, W), median normalized tensor
"""
M = torch.sum(mask)
# Return original tensor if there is no valid pixel
if M == 0:
return x
# Compute median and scale
t = torch.quantile(x[mask == 1], q=0.5) # scalar
s = torch.sum(x[mask == 1] - t) / M # scalar
# Return median normalized tensor
return (x - t) / s
def mae_loss(prediction, target, mask, reduction=reduction_batch_based):
# Number of valid pixels
M = torch.sum(mask, (1, 2)) # (B,)
# L1 loss
res = (prediction - target).abs() # (B, H, W)
image_loss = torch.sum(mask * res, (1, 2)) # (B,)
return reduction(image_loss, 2 * M)
class MAELoss(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, prediction, target, mask):
return mae_loss(prediction, target, mask, reduction=self.__reduction)
class ScaleAndShiftInvariantMAELoss(nn.Module):
def __init__(self, alpha=0.5, scales=4, reduction='batch-based'):
super().__init__()
self.__data_loss = MAELoss(reduction=reduction)
self.__regularization_loss = GradientLoss(scales=scales, reduction=reduction)
self.__alpha = alpha
def forward(self, prediction, target, mask):
# Deal with the channel dimension, the input dimension may have (B, C, H, W) or (B, H, W)
if prediction.ndim == 4: prediction = prediction[:, 0] # (B, H, W)
if target.ndim == 4: target = target[:, 0] # (B, H, W)
if mask.ndim == 4: mask = mask[:, 0] # (B, H, W)
# TODO: Maybe there is a better way to do the batching
# But `torch.quantile` does not support multiple `dim` argument for now
for i in range(prediction.shape[0]):
prediction[i] = median_normalize(prediction[i], mask[i]) # (H, W)
target[i] = median_normalize(target[i], mask[i]) # (H, W)
# Compute the scale-and-shift invariant MAE loss
total = self.__data_loss(prediction, target, mask)
# Add regularization if needed
if self.__alpha > 0:
total += self.__alpha * self.__regularization_loss(self.prediction, target, mask)
return total
# Modified version of Adabins repository
# https://github.com/shariqfarooq123/AdaBins/blob/0952d91e9e762be310bb4cd055cbfe2448c0ce20/loss.py#L7
class ScaleInvariantLogLoss(nn.Module):
def __init__(self, alpha=10.0, beta=0.15, eps=0.0):
super(ScaleInvariantLogLoss, self).__init__()
self.alpha = alpha
self.beta = beta
# The eps is added to avoid log(0) and division by zero
# But it should be gauranteed that the network output is always non-negative
self.eps = eps
def forward(self, prediction, target, mask):
# Deal with the channel dimension, the input dimension may have (B, C, H, W) or (B, H, W)
if prediction.ndim == 4: prediction = prediction[:, 0] # (B, H, W)
if target.ndim == 4: target = target[:, 0] # (B, H, W)
if mask.ndim == 4: mask = mask[:, 0] # (B, H, W)
total = 0
# Maybe there is a better way to do the batching
for i in range(prediction.shape[0]):
g = torch.log(prediction[i][mask[i]] + self.eps) - torch.log(target[i][mask[i]] + self.eps) # (N,)
Dg = torch.var(g) + self.beta * torch.pow(torch.mean(g), 2) # scalar
total += self.alpha * torch.sqrt(Dg)
return total
|
evocodebench_data_86
|
import torch
from typing import Tuple, Callable, List
def matchup_channels(t: torch.Tensor, w: torch.Tensor):
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dimension
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1) # 65
return t, w
@torch.jit.script
def interpolate(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor:
"""One-dimensional linear interpolation for monotonically increasing sample
points.
Returns the one-dimensional piecewise linear interpolant to a function with
given discrete data points :math:`(xp, fp)`, evaluated at :math:`x`.
Args:
x: the :math:`x`-coordinates at which to evaluate the interpolated
values.
xp: the :math:`x`-coordinates of the data points, must be increasing.
fp: the :math:`y`-coordinates of the data points, same length as `xp`.
Returns:
the interpolated values, same size as `x`.
"""
if x.ndim == xp.ndim - 1:
x = x[None]
m = (fp[..., 1:] - fp[..., :-1]) / (xp[..., 1:] - xp[..., :-1] + 1e-8) # slope
b = fp[..., :-1] - (m * xp[..., :-1])
indices = torch.sum(torch.ge(x[..., :, None], xp[..., None, :]), -1) - 1 # torch.ge: x[i] >= xp[i] ? true: false
indices = torch.clamp(indices, 0, m.shape[-1] - 1)
return m.gather(dim=-1, index=indices) * x + b.gather(dim=-1, index=indices)
@torch.jit.script
def integrate_weights(w: torch.Tensor):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = torch.cumsum(w[..., :-1], dim=-1).clip(max=1.0)
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = torch.cat([cw.new_zeros(shape), cw, cw.new_ones(shape)], dim=-1)
return cw0
@torch.jit.script
def weighted_percentile(t: torch.Tensor, w: torch.Tensor, ps: List[float]):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
t, w = matchup_channels(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
# Vmap fn to an arbitrary number of leading dimensions.
cw_mat = cw.reshape([-1, cw.shape[-1]])
t_mat = t.reshape([-1, t.shape[-1]])
wprctile_mat = interpolate(torch.as_tensor(ps).to(t, non_blocking=True),
cw_mat,
t_mat)
wprctile = wprctile_mat.reshape(cw.shape[:-1] + (len(ps),))
return wprctile
def s_vals_to_z_vals(s: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
ig: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from s space to t space (with inverse of g)
return ig(s * g(tf) + (1 - s) * g(tn))
def z_vals_to_s_vals(t: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from t space back to s space (with function g)
return (g(t) - g(tn)) / (g(tf) - g(tn) + 1e-8)
# Hierarchical sampling (section 5.2)
def searchsorted(a: torch.Tensor, v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Find indices where v should be inserted into a to maintain order.
This behaves like jnp.searchsorted (its second output is the same as
jnp.searchsorted's output if all elements of v are in [a[0], a[-1]]) but is
faster because it wastes memory to save some compute.
Args:
a: tensor, the sorted reference points that we are scanning to see where v
should lie.
v: tensor, the query points that we are pretending to insert into a. Does
not need to be sorted. All but the last dimensions should match or expand
to those of a, the last dimension can differ.
Returns:
(idx_lo, idx_hi), where a[idx_lo] <= v < a[idx_hi], unless v is out of the
range [a[0], a[-1]] in which case idx_lo and idx_hi are both the first or
last index of a.
"""
i = torch.arange(a.shape[-1], device=a.device) # 128
v_ge_a = v[..., None, :] >= a[..., :, None]
idx_lo = torch.max(torch.where(v_ge_a, i[..., :, None], i[..., :1, None]), -2)[0] # 128
idx_hi = torch.min(torch.where(~v_ge_a, i[..., :, None], i[..., -1:, None]), -2)[0]
return idx_lo, idx_hi
def invert_cdf(u, t, w):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
# Compute the PDF and CDF for each weight vector.
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = interpolate(u, cw, t)
return t_new
def importance_sampling(t: torch.Tensor,
w: torch.Tensor,
num_samples: int,
perturb=True,
single_jitter=False,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
use_gpu_resampling: bool, If True this resamples the rays based on a
"gather" instruction, which is fast on GPUs but slow on TPUs. If False,
this resamples the rays based on brute-force searches, which is fast on
TPUs, but slow on GPUs.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dim
# preparing for size change
sh = *t.shape[:-1], num_samples # B, P, I
t = t.reshape(-1, t.shape[-1])
w = w.reshape(-1, w.shape[-1])
# assuming sampling in s space
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1)
# eps = torch.finfo(torch.float32).eps
eps = 1e-8
# Draw uniform samples.
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps if perturb else 0
d = 1 if single_jitter else num_samples
u = (
torch.linspace(0, 1 - u_max, num_samples, device=t.device, dtype=t.dtype) +
torch.rand(t.shape[:-1] + (d,), device=t.device, dtype=t.dtype) * max_jitter
)
u = invert_cdf(u, t, w)
# preparing for size change
u = u.reshape(sh)
return u
def weight_to_pdf(t: torch.Tensor, w: torch.Tensor, eps=torch.finfo(torch.float32).eps**2):
t, w = matchup_channels(t, w)
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
return w / (t[..., 1:] - t[..., :-1]).clip(eps)
def pdf_to_weight(t: torch.Tensor, p: torch.Tensor):
t, p = matchup_channels(t, p)
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
return p * (t[..., 1:] - t[..., :-1])
def max_dilate(t, w, dilation, domain=(-torch.inf, torch.inf)):
t, w = matchup_channels(t, w)
"""Dilate (via max-pooling) a non-negative step function."""
t0 = t[..., :-1] - dilation
t1 = t[..., 1:] + dilation
t_dilate = torch.sort(torch.cat([t, t0, t1], dim=-1), dim=-1)[0]
t_dilate = t_dilate.clip(*domain)
w_dilate = torch.max(
torch.where(
(t0[..., None, :] <= t_dilate[..., None])
& (t1[..., None, :] > t_dilate[..., None]),
w[..., None, :],
0,
),
dim=-1)[0][..., :-1]
return t_dilate, w_dilate
def max_dilate_weights(t: torch.Tensor,
w: torch.Tensor,
dilation: float,
domain=(-torch.inf, torch.inf),
renormalize=False,
eps=torch.finfo(torch.float32).eps**2):
"""Dilate (via max-pooling) a set of weights."""
p = weight_to_pdf(t, w)
t_dilate, p_dilate = max_dilate(t, p, dilation, domain=domain)
w_dilate = pdf_to_weight(t_dilate, p_dilate)
if renormalize:
w_dilate /= torch.sum(w_dilate, dim=-1, keepdim=True).clip(eps)
return t_dilate, w_dilate
def anneal_weights(t: torch.Tensor,
w: torch.Tensor,
train_frac: float,
anneal_slope: float = 10.0,
eps=torch.finfo(torch.float32).eps ** 2):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
# Optionally anneal the weights as a function of training iteration.
if anneal_slope > 0:
# Schlick's bias function, see https://arxiv.org/abs/2010.09714
def bias(x, s): return (s * x) / ((s - 1) * x + 1)
anneal = bias(train_frac, anneal_slope)
else:
anneal = 1.
# A slightly more stable way to compute weights**anneal. If the distance
# between adjacent intervals is zero then its weight is fixed to 0.
logits_resample = torch.where(
t[..., 1:] > t[..., :-1],
anneal * torch.log(w.clip(eps)), -torch.inf) # MARK: prone to nan
# If all samples are -inf, softmax will produce a nan (all -torch.inf)
w = torch.softmax(logits_resample, dim=-1)
return w
def query(tq, t, y, outside_value=0):
"""Look up the values of the step function (t, y) at locations tq."""
idx_lo, idx_hi = searchsorted(t, tq)
yq = torch.where(idx_lo == idx_hi, outside_value,
torch.take_along_dim(torch.cat([y, torch.full_like(y[..., :1], outside_value)], dim=-1), idx_lo, dim=-1)) # ?
return yq
|
evocodebench_data_87
|
import torch
from typing import Tuple, Callable, List
def matchup_channels(t: torch.Tensor, w: torch.Tensor):
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dimension
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1) # 65
return t, w
@torch.jit.script
def interpolate(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor:
"""One-dimensional linear interpolation for monotonically increasing sample
points.
Returns the one-dimensional piecewise linear interpolant to a function with
given discrete data points :math:`(xp, fp)`, evaluated at :math:`x`.
Args:
x: the :math:`x`-coordinates at which to evaluate the interpolated
values.
xp: the :math:`x`-coordinates of the data points, must be increasing.
fp: the :math:`y`-coordinates of the data points, same length as `xp`.
Returns:
the interpolated values, same size as `x`.
"""
if x.ndim == xp.ndim - 1:
x = x[None]
m = (fp[..., 1:] - fp[..., :-1]) / (xp[..., 1:] - xp[..., :-1] + 1e-8) # slope
b = fp[..., :-1] - (m * xp[..., :-1])
indices = torch.sum(torch.ge(x[..., :, None], xp[..., None, :]), -1) - 1 # torch.ge: x[i] >= xp[i] ? true: false
indices = torch.clamp(indices, 0, m.shape[-1] - 1)
return m.gather(dim=-1, index=indices) * x + b.gather(dim=-1, index=indices)
@torch.jit.script
def integrate_weights(w: torch.Tensor):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = torch.cumsum(w[..., :-1], dim=-1).clip(max=1.0)
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = torch.cat([cw.new_zeros(shape), cw, cw.new_ones(shape)], dim=-1)
return cw0
@torch.jit.script
def weighted_percentile(t: torch.Tensor, w: torch.Tensor, ps: List[float]):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
t, w = matchup_channels(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
# Vmap fn to an arbitrary number of leading dimensions.
cw_mat = cw.reshape([-1, cw.shape[-1]])
t_mat = t.reshape([-1, t.shape[-1]])
wprctile_mat = interpolate(torch.as_tensor(ps).to(t, non_blocking=True),
cw_mat,
t_mat)
wprctile = wprctile_mat.reshape(cw.shape[:-1] + (len(ps),))
return wprctile
def s_vals_to_z_vals(s: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
ig: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from s space to t space (with inverse of g)
return ig(s * g(tf) + (1 - s) * g(tn))
def z_vals_to_s_vals(t: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from t space back to s space (with function g)
return (g(t) - g(tn)) / (g(tf) - g(tn) + 1e-8)
# Hierarchical sampling (section 5.2)
def searchsorted(a: torch.Tensor, v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Find indices where v should be inserted into a to maintain order.
This behaves like jnp.searchsorted (its second output is the same as
jnp.searchsorted's output if all elements of v are in [a[0], a[-1]]) but is
faster because it wastes memory to save some compute.
Args:
a: tensor, the sorted reference points that we are scanning to see where v
should lie.
v: tensor, the query points that we are pretending to insert into a. Does
not need to be sorted. All but the last dimensions should match or expand
to those of a, the last dimension can differ.
Returns:
(idx_lo, idx_hi), where a[idx_lo] <= v < a[idx_hi], unless v is out of the
range [a[0], a[-1]] in which case idx_lo and idx_hi are both the first or
last index of a.
"""
i = torch.arange(a.shape[-1], device=a.device) # 128
v_ge_a = v[..., None, :] >= a[..., :, None]
idx_lo = torch.max(torch.where(v_ge_a, i[..., :, None], i[..., :1, None]), -2)[0] # 128
idx_hi = torch.min(torch.where(~v_ge_a, i[..., :, None], i[..., -1:, None]), -2)[0]
return idx_lo, idx_hi
def invert_cdf(u, t, w):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
# Compute the PDF and CDF for each weight vector.
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = interpolate(u, cw, t)
return t_new
def importance_sampling(t: torch.Tensor,
w: torch.Tensor,
num_samples: int,
perturb=True,
single_jitter=False,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
use_gpu_resampling: bool, If True this resamples the rays based on a
"gather" instruction, which is fast on GPUs but slow on TPUs. If False,
this resamples the rays based on brute-force searches, which is fast on
TPUs, but slow on GPUs.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dim
# preparing for size change
sh = *t.shape[:-1], num_samples # B, P, I
t = t.reshape(-1, t.shape[-1])
w = w.reshape(-1, w.shape[-1])
# assuming sampling in s space
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1)
# eps = torch.finfo(torch.float32).eps
eps = 1e-8
# Draw uniform samples.
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps if perturb else 0
d = 1 if single_jitter else num_samples
u = (
torch.linspace(0, 1 - u_max, num_samples, device=t.device, dtype=t.dtype) +
torch.rand(t.shape[:-1] + (d,), device=t.device, dtype=t.dtype) * max_jitter
)
u = invert_cdf(u, t, w)
# preparing for size change
u = u.reshape(sh)
return u
def weight_to_pdf(t: torch.Tensor, w: torch.Tensor, eps=torch.finfo(torch.float32).eps**2):
t, w = matchup_channels(t, w)
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
return w / (t[..., 1:] - t[..., :-1]).clip(eps)
def pdf_to_weight(t: torch.Tensor, p: torch.Tensor):
t, p = matchup_channels(t, p)
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
return p * (t[..., 1:] - t[..., :-1])
def max_dilate(t, w, dilation, domain=(-torch.inf, torch.inf)):
t, w = matchup_channels(t, w)
"""Dilate (via max-pooling) a non-negative step function."""
t0 = t[..., :-1] - dilation
t1 = t[..., 1:] + dilation
t_dilate = torch.sort(torch.cat([t, t0, t1], dim=-1), dim=-1)[0]
t_dilate = t_dilate.clip(*domain)
w_dilate = torch.max(
torch.where(
(t0[..., None, :] <= t_dilate[..., None])
& (t1[..., None, :] > t_dilate[..., None]),
w[..., None, :],
0,
),
dim=-1)[0][..., :-1]
return t_dilate, w_dilate
def max_dilate_weights(t: torch.Tensor,
w: torch.Tensor,
dilation: float,
domain=(-torch.inf, torch.inf),
renormalize=False,
eps=torch.finfo(torch.float32).eps**2):
"""Dilate (via max-pooling) a set of weights."""
p = weight_to_pdf(t, w)
t_dilate, p_dilate = max_dilate(t, p, dilation, domain=domain)
w_dilate = pdf_to_weight(t_dilate, p_dilate)
if renormalize:
w_dilate /= torch.sum(w_dilate, dim=-1, keepdim=True).clip(eps)
return t_dilate, w_dilate
def anneal_weights(t: torch.Tensor,
w: torch.Tensor,
train_frac: float,
anneal_slope: float = 10.0,
eps=torch.finfo(torch.float32).eps ** 2):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
# Optionally anneal the weights as a function of training iteration.
if anneal_slope > 0:
# Schlick's bias function, see https://arxiv.org/abs/2010.09714
def bias(x, s): return (s * x) / ((s - 1) * x + 1)
anneal = bias(train_frac, anneal_slope)
else:
anneal = 1.
# A slightly more stable way to compute weights**anneal. If the distance
# between adjacent intervals is zero then its weight is fixed to 0.
logits_resample = torch.where(
t[..., 1:] > t[..., :-1],
anneal * torch.log(w.clip(eps)), -torch.inf) # MARK: prone to nan
# If all samples are -inf, softmax will produce a nan (all -torch.inf)
w = torch.softmax(logits_resample, dim=-1)
return w
def query(tq, t, y, outside_value=0):
"""Look up the values of the step function (t, y) at locations tq."""
idx_lo, idx_hi = searchsorted(t, tq)
yq = torch.where(idx_lo == idx_hi, outside_value,
torch.take_along_dim(torch.cat([y, torch.full_like(y[..., :1], outside_value)], dim=-1), idx_lo, dim=-1)) # ?
return yq
|
evocodebench_data_88
|
import torch
from typing import Tuple, Callable, List
def matchup_channels(t: torch.Tensor, w: torch.Tensor):
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dimension
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1) # 65
return t, w
@torch.jit.script
def interpolate(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor:
"""One-dimensional linear interpolation for monotonically increasing sample
points.
Returns the one-dimensional piecewise linear interpolant to a function with
given discrete data points :math:`(xp, fp)`, evaluated at :math:`x`.
Args:
x: the :math:`x`-coordinates at which to evaluate the interpolated
values.
xp: the :math:`x`-coordinates of the data points, must be increasing.
fp: the :math:`y`-coordinates of the data points, same length as `xp`.
Returns:
the interpolated values, same size as `x`.
"""
if x.ndim == xp.ndim - 1:
x = x[None]
m = (fp[..., 1:] - fp[..., :-1]) / (xp[..., 1:] - xp[..., :-1] + 1e-8) # slope
b = fp[..., :-1] - (m * xp[..., :-1])
indices = torch.sum(torch.ge(x[..., :, None], xp[..., None, :]), -1) - 1 # torch.ge: x[i] >= xp[i] ? true: false
indices = torch.clamp(indices, 0, m.shape[-1] - 1)
return m.gather(dim=-1, index=indices) * x + b.gather(dim=-1, index=indices)
@torch.jit.script
def integrate_weights(w: torch.Tensor):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = torch.cumsum(w[..., :-1], dim=-1).clip(max=1.0)
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = torch.cat([cw.new_zeros(shape), cw, cw.new_ones(shape)], dim=-1)
return cw0
@torch.jit.script
def weighted_percentile(t: torch.Tensor, w: torch.Tensor, ps: List[float]):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
t, w = matchup_channels(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
# Vmap fn to an arbitrary number of leading dimensions.
cw_mat = cw.reshape([-1, cw.shape[-1]])
t_mat = t.reshape([-1, t.shape[-1]])
wprctile_mat = interpolate(torch.as_tensor(ps).to(t, non_blocking=True),
cw_mat,
t_mat)
wprctile = wprctile_mat.reshape(cw.shape[:-1] + (len(ps),))
return wprctile
def s_vals_to_z_vals(s: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
ig: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from s space to t space (with inverse of g)
return ig(s * g(tf) + (1 - s) * g(tn))
def z_vals_to_s_vals(t: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from t space back to s space (with function g)
return (g(t) - g(tn)) / (g(tf) - g(tn) + 1e-8)
# Hierarchical sampling (section 5.2)
def searchsorted(a: torch.Tensor, v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Find indices where v should be inserted into a to maintain order.
This behaves like jnp.searchsorted (its second output is the same as
jnp.searchsorted's output if all elements of v are in [a[0], a[-1]]) but is
faster because it wastes memory to save some compute.
Args:
a: tensor, the sorted reference points that we are scanning to see where v
should lie.
v: tensor, the query points that we are pretending to insert into a. Does
not need to be sorted. All but the last dimensions should match or expand
to those of a, the last dimension can differ.
Returns:
(idx_lo, idx_hi), where a[idx_lo] <= v < a[idx_hi], unless v is out of the
range [a[0], a[-1]] in which case idx_lo and idx_hi are both the first or
last index of a.
"""
i = torch.arange(a.shape[-1], device=a.device) # 128
v_ge_a = v[..., None, :] >= a[..., :, None]
idx_lo = torch.max(torch.where(v_ge_a, i[..., :, None], i[..., :1, None]), -2)[0] # 128
idx_hi = torch.min(torch.where(~v_ge_a, i[..., :, None], i[..., -1:, None]), -2)[0]
return idx_lo, idx_hi
def invert_cdf(u, t, w):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
# Compute the PDF and CDF for each weight vector.
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = interpolate(u, cw, t)
return t_new
def importance_sampling(t: torch.Tensor,
w: torch.Tensor,
num_samples: int,
perturb=True,
single_jitter=False,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
use_gpu_resampling: bool, If True this resamples the rays based on a
"gather" instruction, which is fast on GPUs but slow on TPUs. If False,
this resamples the rays based on brute-force searches, which is fast on
TPUs, but slow on GPUs.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dim
# preparing for size change
sh = *t.shape[:-1], num_samples # B, P, I
t = t.reshape(-1, t.shape[-1])
w = w.reshape(-1, w.shape[-1])
# assuming sampling in s space
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1)
# eps = torch.finfo(torch.float32).eps
eps = 1e-8
# Draw uniform samples.
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps if perturb else 0
d = 1 if single_jitter else num_samples
u = (
torch.linspace(0, 1 - u_max, num_samples, device=t.device, dtype=t.dtype) +
torch.rand(t.shape[:-1] + (d,), device=t.device, dtype=t.dtype) * max_jitter
)
u = invert_cdf(u, t, w)
# preparing for size change
u = u.reshape(sh)
return u
def weight_to_pdf(t: torch.Tensor, w: torch.Tensor, eps=torch.finfo(torch.float32).eps**2):
t, w = matchup_channels(t, w)
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
return w / (t[..., 1:] - t[..., :-1]).clip(eps)
def pdf_to_weight(t: torch.Tensor, p: torch.Tensor):
t, p = matchup_channels(t, p)
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
return p * (t[..., 1:] - t[..., :-1])
def max_dilate(t, w, dilation, domain=(-torch.inf, torch.inf)):
t, w = matchup_channels(t, w)
"""Dilate (via max-pooling) a non-negative step function."""
t0 = t[..., :-1] - dilation
t1 = t[..., 1:] + dilation
t_dilate = torch.sort(torch.cat([t, t0, t1], dim=-1), dim=-1)[0]
t_dilate = t_dilate.clip(*domain)
w_dilate = torch.max(
torch.where(
(t0[..., None, :] <= t_dilate[..., None])
& (t1[..., None, :] > t_dilate[..., None]),
w[..., None, :],
0,
),
dim=-1)[0][..., :-1]
return t_dilate, w_dilate
def max_dilate_weights(t: torch.Tensor,
w: torch.Tensor,
dilation: float,
domain=(-torch.inf, torch.inf),
renormalize=False,
eps=torch.finfo(torch.float32).eps**2):
"""Dilate (via max-pooling) a set of weights."""
p = weight_to_pdf(t, w)
t_dilate, p_dilate = max_dilate(t, p, dilation, domain=domain)
w_dilate = pdf_to_weight(t_dilate, p_dilate)
if renormalize:
w_dilate /= torch.sum(w_dilate, dim=-1, keepdim=True).clip(eps)
return t_dilate, w_dilate
def anneal_weights(t: torch.Tensor,
w: torch.Tensor,
train_frac: float,
anneal_slope: float = 10.0,
eps=torch.finfo(torch.float32).eps ** 2):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
# Optionally anneal the weights as a function of training iteration.
if anneal_slope > 0:
# Schlick's bias function, see https://arxiv.org/abs/2010.09714
def bias(x, s): return (s * x) / ((s - 1) * x + 1)
anneal = bias(train_frac, anneal_slope)
else:
anneal = 1.
# A slightly more stable way to compute weights**anneal. If the distance
# between adjacent intervals is zero then its weight is fixed to 0.
logits_resample = torch.where(
t[..., 1:] > t[..., :-1],
anneal * torch.log(w.clip(eps)), -torch.inf) # MARK: prone to nan
# If all samples are -inf, softmax will produce a nan (all -torch.inf)
w = torch.softmax(logits_resample, dim=-1)
return w
def query(tq, t, y, outside_value=0):
"""Look up the values of the step function (t, y) at locations tq."""
idx_lo, idx_hi = searchsorted(t, tq)
yq = torch.where(idx_lo == idx_hi, outside_value,
torch.take_along_dim(torch.cat([y, torch.full_like(y[..., :1], outside_value)], dim=-1), idx_lo, dim=-1)) # ?
return yq
|
evocodebench_data_89
|
import torch
from typing import Tuple, Callable, List
def matchup_channels(t: torch.Tensor, w: torch.Tensor):
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dimension
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1) # 65
return t, w
@torch.jit.script
def interpolate(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor:
"""One-dimensional linear interpolation for monotonically increasing sample
points.
Returns the one-dimensional piecewise linear interpolant to a function with
given discrete data points :math:`(xp, fp)`, evaluated at :math:`x`.
Args:
x: the :math:`x`-coordinates at which to evaluate the interpolated
values.
xp: the :math:`x`-coordinates of the data points, must be increasing.
fp: the :math:`y`-coordinates of the data points, same length as `xp`.
Returns:
the interpolated values, same size as `x`.
"""
if x.ndim == xp.ndim - 1:
x = x[None]
m = (fp[..., 1:] - fp[..., :-1]) / (xp[..., 1:] - xp[..., :-1] + 1e-8) # slope
b = fp[..., :-1] - (m * xp[..., :-1])
indices = torch.sum(torch.ge(x[..., :, None], xp[..., None, :]), -1) - 1 # torch.ge: x[i] >= xp[i] ? true: false
indices = torch.clamp(indices, 0, m.shape[-1] - 1)
return m.gather(dim=-1, index=indices) * x + b.gather(dim=-1, index=indices)
@torch.jit.script
def integrate_weights(w: torch.Tensor):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = torch.cumsum(w[..., :-1], dim=-1).clip(max=1.0)
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = torch.cat([cw.new_zeros(shape), cw, cw.new_ones(shape)], dim=-1)
return cw0
@torch.jit.script
def weighted_percentile(t: torch.Tensor, w: torch.Tensor, ps: List[float]):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
t, w = matchup_channels(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
# Vmap fn to an arbitrary number of leading dimensions.
cw_mat = cw.reshape([-1, cw.shape[-1]])
t_mat = t.reshape([-1, t.shape[-1]])
wprctile_mat = interpolate(torch.as_tensor(ps).to(t, non_blocking=True),
cw_mat,
t_mat)
wprctile = wprctile_mat.reshape(cw.shape[:-1] + (len(ps),))
return wprctile
def s_vals_to_z_vals(s: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
ig: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from s space to t space (with inverse of g)
return ig(s * g(tf) + (1 - s) * g(tn))
def z_vals_to_s_vals(t: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from t space back to s space (with function g)
return (g(t) - g(tn)) / (g(tf) - g(tn) + 1e-8)
# Hierarchical sampling (section 5.2)
def searchsorted(a: torch.Tensor, v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Find indices where v should be inserted into a to maintain order.
This behaves like jnp.searchsorted (its second output is the same as
jnp.searchsorted's output if all elements of v are in [a[0], a[-1]]) but is
faster because it wastes memory to save some compute.
Args:
a: tensor, the sorted reference points that we are scanning to see where v
should lie.
v: tensor, the query points that we are pretending to insert into a. Does
not need to be sorted. All but the last dimensions should match or expand
to those of a, the last dimension can differ.
Returns:
(idx_lo, idx_hi), where a[idx_lo] <= v < a[idx_hi], unless v is out of the
range [a[0], a[-1]] in which case idx_lo and idx_hi are both the first or
last index of a.
"""
i = torch.arange(a.shape[-1], device=a.device) # 128
v_ge_a = v[..., None, :] >= a[..., :, None]
idx_lo = torch.max(torch.where(v_ge_a, i[..., :, None], i[..., :1, None]), -2)[0] # 128
idx_hi = torch.min(torch.where(~v_ge_a, i[..., :, None], i[..., -1:, None]), -2)[0]
return idx_lo, idx_hi
def invert_cdf(u, t, w):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
# Compute the PDF and CDF for each weight vector.
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = interpolate(u, cw, t)
return t_new
def importance_sampling(t: torch.Tensor,
w: torch.Tensor,
num_samples: int,
perturb=True,
single_jitter=False,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
use_gpu_resampling: bool, If True this resamples the rays based on a
"gather" instruction, which is fast on GPUs but slow on TPUs. If False,
this resamples the rays based on brute-force searches, which is fast on
TPUs, but slow on GPUs.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dim
# preparing for size change
sh = *t.shape[:-1], num_samples # B, P, I
t = t.reshape(-1, t.shape[-1])
w = w.reshape(-1, w.shape[-1])
# assuming sampling in s space
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1)
# eps = torch.finfo(torch.float32).eps
eps = 1e-8
# Draw uniform samples.
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps if perturb else 0
d = 1 if single_jitter else num_samples
u = (
torch.linspace(0, 1 - u_max, num_samples, device=t.device, dtype=t.dtype) +
torch.rand(t.shape[:-1] + (d,), device=t.device, dtype=t.dtype) * max_jitter
)
u = invert_cdf(u, t, w)
# preparing for size change
u = u.reshape(sh)
return u
def weight_to_pdf(t: torch.Tensor, w: torch.Tensor, eps=torch.finfo(torch.float32).eps**2):
t, w = matchup_channels(t, w)
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
return w / (t[..., 1:] - t[..., :-1]).clip(eps)
def pdf_to_weight(t: torch.Tensor, p: torch.Tensor):
t, p = matchup_channels(t, p)
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
return p * (t[..., 1:] - t[..., :-1])
def max_dilate(t, w, dilation, domain=(-torch.inf, torch.inf)):
t, w = matchup_channels(t, w)
"""Dilate (via max-pooling) a non-negative step function."""
t0 = t[..., :-1] - dilation
t1 = t[..., 1:] + dilation
t_dilate = torch.sort(torch.cat([t, t0, t1], dim=-1), dim=-1)[0]
t_dilate = t_dilate.clip(*domain)
w_dilate = torch.max(
torch.where(
(t0[..., None, :] <= t_dilate[..., None])
& (t1[..., None, :] > t_dilate[..., None]),
w[..., None, :],
0,
),
dim=-1)[0][..., :-1]
return t_dilate, w_dilate
def max_dilate_weights(t: torch.Tensor,
w: torch.Tensor,
dilation: float,
domain=(-torch.inf, torch.inf),
renormalize=False,
eps=torch.finfo(torch.float32).eps**2):
"""Dilate (via max-pooling) a set of weights."""
p = weight_to_pdf(t, w)
t_dilate, p_dilate = max_dilate(t, p, dilation, domain=domain)
w_dilate = pdf_to_weight(t_dilate, p_dilate)
if renormalize:
w_dilate /= torch.sum(w_dilate, dim=-1, keepdim=True).clip(eps)
return t_dilate, w_dilate
def anneal_weights(t: torch.Tensor,
w: torch.Tensor,
train_frac: float,
anneal_slope: float = 10.0,
eps=torch.finfo(torch.float32).eps ** 2):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
# Optionally anneal the weights as a function of training iteration.
if anneal_slope > 0:
# Schlick's bias function, see https://arxiv.org/abs/2010.09714
def bias(x, s): return (s * x) / ((s - 1) * x + 1)
anneal = bias(train_frac, anneal_slope)
else:
anneal = 1.
# A slightly more stable way to compute weights**anneal. If the distance
# between adjacent intervals is zero then its weight is fixed to 0.
logits_resample = torch.where(
t[..., 1:] > t[..., :-1],
anneal * torch.log(w.clip(eps)), -torch.inf) # MARK: prone to nan
# If all samples are -inf, softmax will produce a nan (all -torch.inf)
w = torch.softmax(logits_resample, dim=-1)
return w
def query(tq, t, y, outside_value=0):
"""Look up the values of the step function (t, y) at locations tq."""
idx_lo, idx_hi = searchsorted(t, tq)
yq = torch.where(idx_lo == idx_hi, outside_value,
torch.take_along_dim(torch.cat([y, torch.full_like(y[..., :1], outside_value)], dim=-1), idx_lo, dim=-1)) # ?
return yq
|
evocodebench_data_90
|
import torch
from typing import Tuple, Callable, List
def matchup_channels(t: torch.Tensor, w: torch.Tensor):
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dimension
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1) # 65
return t, w
@torch.jit.script
def interpolate(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor:
"""One-dimensional linear interpolation for monotonically increasing sample
points.
Returns the one-dimensional piecewise linear interpolant to a function with
given discrete data points :math:`(xp, fp)`, evaluated at :math:`x`.
Args:
x: the :math:`x`-coordinates at which to evaluate the interpolated
values.
xp: the :math:`x`-coordinates of the data points, must be increasing.
fp: the :math:`y`-coordinates of the data points, same length as `xp`.
Returns:
the interpolated values, same size as `x`.
"""
if x.ndim == xp.ndim - 1:
x = x[None]
m = (fp[..., 1:] - fp[..., :-1]) / (xp[..., 1:] - xp[..., :-1] + 1e-8) # slope
b = fp[..., :-1] - (m * xp[..., :-1])
indices = torch.sum(torch.ge(x[..., :, None], xp[..., None, :]), -1) - 1 # torch.ge: x[i] >= xp[i] ? true: false
indices = torch.clamp(indices, 0, m.shape[-1] - 1)
return m.gather(dim=-1, index=indices) * x + b.gather(dim=-1, index=indices)
@torch.jit.script
def integrate_weights(w: torch.Tensor):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = torch.cumsum(w[..., :-1], dim=-1).clip(max=1.0)
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = torch.cat([cw.new_zeros(shape), cw, cw.new_ones(shape)], dim=-1)
return cw0
@torch.jit.script
def weighted_percentile(t: torch.Tensor, w: torch.Tensor, ps: List[float]):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
t, w = matchup_channels(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
# Vmap fn to an arbitrary number of leading dimensions.
cw_mat = cw.reshape([-1, cw.shape[-1]])
t_mat = t.reshape([-1, t.shape[-1]])
wprctile_mat = interpolate(torch.as_tensor(ps).to(t, non_blocking=True),
cw_mat,
t_mat)
wprctile = wprctile_mat.reshape(cw.shape[:-1] + (len(ps),))
return wprctile
def s_vals_to_z_vals(s: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
ig: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from s space to t space (with inverse of g)
return ig(s * g(tf) + (1 - s) * g(tn))
def z_vals_to_s_vals(t: torch.Tensor,
tn: torch.Tensor,
tf: torch.Tensor,
g: Callable[[torch.Tensor], torch.Tensor] = lambda x: 1 / x,
):
# transfer ray depth from t space back to s space (with function g)
return (g(t) - g(tn)) / (g(tf) - g(tn) + 1e-8)
# Hierarchical sampling (section 5.2)
def searchsorted(a: torch.Tensor, v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Find indices where v should be inserted into a to maintain order.
This behaves like jnp.searchsorted (its second output is the same as
jnp.searchsorted's output if all elements of v are in [a[0], a[-1]]) but is
faster because it wastes memory to save some compute.
Args:
a: tensor, the sorted reference points that we are scanning to see where v
should lie.
v: tensor, the query points that we are pretending to insert into a. Does
not need to be sorted. All but the last dimensions should match or expand
to those of a, the last dimension can differ.
Returns:
(idx_lo, idx_hi), where a[idx_lo] <= v < a[idx_hi], unless v is out of the
range [a[0], a[-1]] in which case idx_lo and idx_hi are both the first or
last index of a.
"""
i = torch.arange(a.shape[-1], device=a.device) # 128
v_ge_a = v[..., None, :] >= a[..., :, None]
idx_lo = torch.max(torch.where(v_ge_a, i[..., :, None], i[..., :1, None]), -2)[0] # 128
idx_hi = torch.min(torch.where(~v_ge_a, i[..., :, None], i[..., -1:, None]), -2)[0]
return idx_lo, idx_hi
def invert_cdf(u, t, w):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
# Compute the PDF and CDF for each weight vector.
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = interpolate(u, cw, t)
return t_new
def importance_sampling(t: torch.Tensor,
w: torch.Tensor,
num_samples: int,
perturb=True,
single_jitter=False,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
use_gpu_resampling: bool, If True this resamples the rays based on a
"gather" instruction, which is fast on GPUs but slow on TPUs. If False,
this resamples the rays based on brute-force searches, which is fast on
TPUs, but slow on GPUs.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
if t.ndim == w.ndim + 1:
t = t[..., 0] # remove last dim
# preparing for size change
sh = *t.shape[:-1], num_samples # B, P, I
t = t.reshape(-1, t.shape[-1])
w = w.reshape(-1, w.shape[-1])
# assuming sampling in s space
if t.shape[-1] != w.shape[-1] + 1:
t = torch.cat([t, torch.ones_like(t[..., -1:])], dim=-1)
# eps = torch.finfo(torch.float32).eps
eps = 1e-8
# Draw uniform samples.
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps if perturb else 0
d = 1 if single_jitter else num_samples
u = (
torch.linspace(0, 1 - u_max, num_samples, device=t.device, dtype=t.dtype) +
torch.rand(t.shape[:-1] + (d,), device=t.device, dtype=t.dtype) * max_jitter
)
u = invert_cdf(u, t, w)
# preparing for size change
u = u.reshape(sh)
return u
def weight_to_pdf(t: torch.Tensor, w: torch.Tensor, eps=torch.finfo(torch.float32).eps**2):
t, w = matchup_channels(t, w)
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
return w / (t[..., 1:] - t[..., :-1]).clip(eps)
def pdf_to_weight(t: torch.Tensor, p: torch.Tensor):
t, p = matchup_channels(t, p)
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
return p * (t[..., 1:] - t[..., :-1])
def max_dilate(t, w, dilation, domain=(-torch.inf, torch.inf)):
t, w = matchup_channels(t, w)
"""Dilate (via max-pooling) a non-negative step function."""
t0 = t[..., :-1] - dilation
t1 = t[..., 1:] + dilation
t_dilate = torch.sort(torch.cat([t, t0, t1], dim=-1), dim=-1)[0]
t_dilate = t_dilate.clip(*domain)
w_dilate = torch.max(
torch.where(
(t0[..., None, :] <= t_dilate[..., None])
& (t1[..., None, :] > t_dilate[..., None]),
w[..., None, :],
0,
),
dim=-1)[0][..., :-1]
return t_dilate, w_dilate
def max_dilate_weights(t: torch.Tensor,
w: torch.Tensor,
dilation: float,
domain=(-torch.inf, torch.inf),
renormalize=False,
eps=torch.finfo(torch.float32).eps**2):
"""Dilate (via max-pooling) a set of weights."""
p = weight_to_pdf(t, w)
t_dilate, p_dilate = max_dilate(t, p, dilation, domain=domain)
w_dilate = pdf_to_weight(t_dilate, p_dilate)
if renormalize:
w_dilate /= torch.sum(w_dilate, dim=-1, keepdim=True).clip(eps)
return t_dilate, w_dilate
def anneal_weights(t: torch.Tensor,
w: torch.Tensor,
train_frac: float,
anneal_slope: float = 10.0,
eps=torch.finfo(torch.float32).eps ** 2):
# accepts t.shape[-1] = w.shape[-1] + 1
t, w = matchup_channels(t, w)
# Optionally anneal the weights as a function of training iteration.
if anneal_slope > 0:
# Schlick's bias function, see https://arxiv.org/abs/2010.09714
def bias(x, s): return (s * x) / ((s - 1) * x + 1)
anneal = bias(train_frac, anneal_slope)
else:
anneal = 1.
# A slightly more stable way to compute weights**anneal. If the distance
# between adjacent intervals is zero then its weight is fixed to 0.
logits_resample = torch.where(
t[..., 1:] > t[..., :-1],
anneal * torch.log(w.clip(eps)), -torch.inf) # MARK: prone to nan
# If all samples are -inf, softmax will produce a nan (all -torch.inf)
w = torch.softmax(logits_resample, dim=-1)
return w
def query(tq, t, y, outside_value=0):
"""Look up the values of the step function (t, y) at locations tq."""
idx_lo, idx_hi = searchsorted(t, tq)
yq = torch.where(idx_lo == idx_hi, outside_value,
torch.take_along_dim(torch.cat([y, torch.full_like(y[..., :1], outside_value)], dim=-1), idx_lo, dim=-1)) # ?
return yq
|
evocodebench_data_91
|
import os
import re
import cv2
import h5py
import torch
import struct
import asyncio
import subprocess
import numpy as np
from PIL import Image
from io import BytesIO
from typing import overload
from functools import lru_cache
# from imgaug import augmenters as iaa
from typing import Tuple, Union, List, Dict
from torch.nn import functional as F
from torch.utils.data._utils.pin_memory import pin_memory
from torch.utils.data._utils.collate import default_collate, default_convert
from easyvolcap.utils.parallel_utils import parallel_execution
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.console_utils import *
from enum import Enum, auto
# Copied from enerf (maybe was in turn copied from dtu)
def read_pickle(name):
import pickle
with open(name, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data
def read_cam_file(filename):
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ')
extrinsics = extrinsics.reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ')
intrinsics = intrinsics.reshape((3, 3))
# depth_min & depth_interval: line 11
depth_min = float(lines[11].split()[0])
return intrinsics, extrinsics, depth_min
def read_pmn_cam_file(filename):
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ')
extrinsics = extrinsics.reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ')
intrinsics = intrinsics.reshape((3, 3))
# depth_min & depth_interval: line 11
depth_min = float(lines[11].split()[0])
depth_max = float(lines[11].split()[1])
return intrinsics, extrinsics, depth_min, depth_max
def read_pfm(filename):
file = open(filename, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
def generate_video(result_str: str,
output: str,
fps: int = 30,
crf: int = 17,
cqv: int = 19,
lookahead: int = 20,
hwaccel: str = 'cuda',
preset: str = 'p7',
tag: str = 'hvc1',
vcodec: str = 'hevc_nvenc',
pix_fmt: str = 'yuv420p', # chrome friendly
):
cmd = [
'ffmpeg',
'-hwaccel', hwaccel,
'-hide_banner',
'-loglevel', 'error',
'-framerate', fps,
'-f', 'image2',
'-pattern_type', 'glob',
'-nostdin', # otherwise you cannot chain commands together
'-y',
'-r', fps,
'-i', result_str,
'-c:v', vcodec,
'-preset', preset,
'-cq:v', cqv,
'-rc:v', 'vbr',
'-tag:v', tag,
'-crf', crf,
'-pix_fmt', pix_fmt,
'-rc-lookahead', lookahead,
'-vf', '"pad=ceil(iw/2)*2:ceil(ih/2)*2"', # avoid yuv420p odd number bug
output,
]
run(cmd)
return output
def numpy_to_video(numpy_array: np.ndarray,
output_filename: str,
fps: float = 30.0,
crf: int = 18,
cqv: int = 19,
lookahead: int = 20,
preset='veryslow',
vcodec='libx265',
):
"""
Convert a numpy array (T, H, W, C) to a video using ffmpeg.
Parameters:
- numpy_array: Numpy array to be converted.
- output_filename: The filename of the output video.
- framerate: Frame rate for the video.
"""
if isinstance(numpy_array, np.ndarray):
T, H, W, C = numpy_array.shape
else:
T = len(numpy_array)
H, W, C = numpy_array[0].shape
assert C == 3, "Expected 3 channels!"
cmd = [
'ffmpeg',
'-hwaccel', 'cuda',
'-v', 'quiet', '-stats',
'-y', # Overwrite output file if it exists
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-s', f'{W}x{H}', # Size of one frame
'-pix_fmt', 'rgb24',
'-r', fps, # Frame rate
'-i', '-', # Read from pipe
'-an', # No audio
'-vcodec', vcodec,
'-preset', preset,
'-cq:v', cqv,
'-crf', crf,
'-rc-lookahead', lookahead,
'-rc:v', 'vbr',
'-tag:v', 'hvc1',
output_filename
]
os.makedirs(dirname(output_filename), exist_ok=True)
process = subprocess.Popen(map(str, cmd), stdin=subprocess.PIPE)
# process.communicate(input=numpy_array.tobytes())
for frame in numpy_array:
process.stdin.write(frame.tobytes())
# process.stdin.flush()
process.stdin.close()
process.communicate()
def get_video_dimensions(input_filename):
"""
Extract the width and height of a video using ffprobe.
Parameters:
- input_filename: The filename of the input video.
Returns:
- width and height of the video.
"""
cmd = [
'ffprobe',
'-v', 'error',
'-select_streams', 'v:0',
'-show_entries', 'stream=width,height',
'-of', 'csv=s=x:p=0',
input_filename
]
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, _ = pipe.communicate()
width, height = map(int, out.decode('utf-8').strip().split('x'))
return width, height
def video_to_numpy(input_filename, hwaccel='cuda', vcodec='hevc_cuvid'):
"""
Convert a video file to a numpy array (T, H, W, C) using ffmpeg.
Parameters:
- input_filename: The filename of the input video.
Returns:
- Numpy array representing the video.
"""
W, H = get_video_dimensions(input_filename)
cmd = [
'ffmpeg',
]
if hwaccel != 'none':
cmd += ['-hwaccel', hwaccel,]
cmd += [
'-v', 'quiet', '-stats',
]
if vcodec != 'none':
cmd += ['-vcodec', vcodec,]
cmd += [
'-i', input_filename,
'-f', 'image2pipe',
'-pix_fmt', 'rgb24',
'-vcodec', 'rawvideo',
'-'
]
pipe = subprocess.Popen(map(str, cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=10**8)
raw_data, _ = pipe.communicate()
# Convert the raw data to numpy array and reshape
video_np = np.frombuffer(raw_data, dtype=np.uint8)
H2, W2 = (H + 1) // 2 * 2, (W + 1) // 2 * 2
try:
video_np = video_np.reshape(-1, H2, W2, 3)[:, :H, :W, :]
except ValueError as e:
video_np = video_np.reshape(-1, H, W, 3)
return video_np
class Visualization(Enum):
# Universal visualization
RENDER = auto() # plain rgb render output
SURFACE = auto() # surface position (similar to depth)
DEFORM = auto() # deformation magnitude (as in correspondence?)
DEPTH = auto() # needs a little bit extra computation
ALPHA = auto() # occupancy (rendered volume density)
NORMAL = auto() # needs extra computation
FEATURE = auto() # embedder results
SEMANTIC = auto() # semantic nerf related
SRCINPS = auto() # Souce input images for image based rendering
# jacobian related
JACOBIAN = auto()
# Relighting related
ENVMAP = auto()
ALBEDO = auto()
SHADING = auto()
ROUGHNESS = auto()
# Geometry related output
MESH = auto()
POINT = auto()
VOLUME = auto()
class DataSplit(Enum):
TRAIN = auto()
TEST = auto()
VAL = auto()
def variance_of_laplacian(image: np.ndarray):
if image.ndim == 3 and image.shape[-1] > 1:
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
if image.dtype == np.float32 or image.dtype == np.float64:
image = (image * 255).astype(np.uint8)
return cv2.Laplacian(image, cv2.CV_64F).var()
def alpha2sdf(alpha, beta, dists=0.005):
return beta * np.log(2 * beta * (-np.log(1 - alpha) / dists))
def h5_to_dotdict(h5: h5py.File) -> dotdict:
d = {key: h5_to_dotdict(h5[key]) if isinstance(h5[key], h5py.Group) else h5[key][:] for key in h5.keys()} # loaded as numpy array
d = dotdict(d)
return d
def h5_to_list_of_dotdict(h5: h5py.File) -> list:
return [h5_to_dotdict(h5[key]) for key in tqdm(h5)]
def to_h5py(value, h5: h5py.File, key: str = None, compression: str = 'gzip'):
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
if isinstance(value, np.ndarray):
h5.create_dataset(str(key), data=value, compression=compression)
elif isinstance(value, list):
if key is not None:
h5 = h5.create_group(str(key))
[to_h5py(v, h5, k) for k, v in enumerate(value)]
elif isinstance(value, dict):
if key is not None:
h5 = h5.create_group(str(key))
[to_h5py(v, h5, k) for k, v in value.items()]
else:
raise NotImplementedError(f'unsupported type to write to h5: {type(value)}')
def export_h5(batch: dotdict, filename):
with h5py.File(filename, 'w') as f:
to_h5py(batch, f)
def load_h5(filename):
with h5py.File(filename, 'r') as f:
return h5_to_dotdict(f)
def merge_faces(faces, *args):
# Copied from trimesh, this will select one uv coordinates for a particular vertex
"""
Textured meshes can come with faces referencing vertex
indices (`v`) and an array the same shape which references
vertex texture indices (`vt`) and sometimes even normal (`vn`).
Vertex locations with different values of any of these can't
be considered the "same" vertex, and for our simple data
model we need to not combine these vertices.
Parameters
-------------
faces : (n, d) int
References vertex indices
*args : (n, d) int
Various references of corresponding values
This is usually UV coordinates or normal indexes
maintain_faces : bool
Do not alter original faces and return no-op masks.
Returns
-------------
new_faces : (m, d) int
New faces for masked vertices
mask_v : (p,) int
A mask to apply to vertices
mask_* : (p,) int
A mask to apply to vt array to get matching UV coordinates
Returns as many of these as args were passed
"""
# start with not altering faces at all
result = [faces]
# find the maximum index referenced by faces
max_idx = faces.max()
# add a vertex mask which is just ordered
result.append(np.arange(max_idx + 1))
# now given the order is fixed do our best on the rest of the order
for arg in args:
# create a mask of the attribute-vertex mapping
# note that these might conflict since we're not unmerging
masks = np.zeros((3, max_idx + 1), dtype=np.int64)
# set the mask using the unmodified face indexes
for i, f, a in zip(range(3), faces.T, arg.T):
masks[i][f] = a
# find the most commonly occurring attribute (i.e. UV coordinate)
# and use that index note that this is doing a float conversion
# and then median before converting back to int: could also do this as
# a column diff and sort but this seemed easier and is fast enough
result.append(np.median(masks, axis=0).astype(np.int64))
return result
def get_mesh(verts: torch.Tensor, faces: torch.Tensor, uv: torch.Tensor = None, img: torch.Tensor = None, colors: torch.Tensor = None, normals: torch.Tensor = None, filename: str = "default.ply"):
from trimesh import Trimesh
from trimesh.visual import TextureVisuals
from trimesh.visual.material import PBRMaterial, SimpleMaterial
from easyvolcap.utils.mesh_utils import face_normals, loop_subdivision
verts, faces = to_numpy([verts, faces])
verts = verts.reshape(-1, 3)
faces = faces.reshape(-1, 3)
# MARK: used process=False here to preserve vertex order
mesh = Trimesh(verts, faces, process=False)
if colors is None:
# colors = verts
colors = face_normals(torch.from_numpy(verts), torch.from_numpy(faces).long()) * 0.5 + 0.5
colors = to_numpy(colors)
colors = colors.reshape(-1, 3)
colors = (np.concatenate([colors, np.ones([*colors.shape[:-1], 1])], axis=-1) * 255).astype(np.uint8)
if len(verts) == len(colors):
mesh.visual.vertex_colors = colors
elif len(faces) == len(colors):
mesh.visual.face_colors = colors
if normals is not None:
normals = to_numpy(normals)
mesh.vertex_normals = normals
if uv is not None:
uv = to_numpy(uv)
uv = uv.reshape(-1, 2)
img = to_numpy(img)
img = img.reshape(*img.shape[-3:])
img = Image.fromarray(np.uint8(img * 255))
mat = SimpleMaterial(
image=img,
diffuse=(0.8, 0.8, 0.8),
ambient=(1.0, 1.0, 1.0),
)
mat.name = os.path.splitext(os.path.split(filename)[1])[0]
texture = TextureVisuals(uv=uv, material=mat)
mesh.visual = texture
return mesh
def get_tensor_mesh_data(verts: torch.Tensor, faces: torch.Tensor, uv: torch.Tensor = None, img: torch.Tensor = None, uvfaces: torch.Tensor = None):
# pytorch3d wants a tensor
verts, faces, uv, img, uvfaces = to_tensor([verts, faces, uv, img, uvfaces])
verts = verts.reshape(-1, 3)
faces = faces.reshape(-1, 3)
uv = uv.reshape(-1, 2)
img = img.reshape(img.shape[-3:])
uvfaces = uvfaces.reshape(-1, 3)
# textures = TexturesUV(img, uvfaces, uv)
# meshes = Meshes(verts, faces, textures)
return verts, faces, uv, img, uvfaces
def export_npz(batch: dotdict, filename: struct):
export_dotdict(batch, filename)
def export_dotdict(batch: dotdict, filename: struct):
batch = to_numpy(batch)
np.savez_compressed(filename, **batch)
def load_mesh(filename: str, device='cuda', load_uv=False, load_aux=False, backend='pytorch3d'):
from pytorch3d.io import load_ply, load_obj
if backend == 'trimesh':
import trimesh
mesh: trimesh.Trimesh = trimesh.load(filename)
return mesh.vertices, mesh.faces
vm, fm = None, None
if filename.endswith('.npz'):
mesh = np.load(filename)
v = torch.from_numpy(mesh['verts'])
f = torch.from_numpy(mesh['faces'])
if load_uv:
vm = torch.from_numpy(mesh['uvs'])
fm = torch.from_numpy(mesh['uvfaces'])
else:
if filename.endswith('.ply'):
v, f = load_ply(filename)
elif filename.endswith('.obj'):
v, faces_attr, aux = load_obj(filename)
f = faces_attr.verts_idx
if load_uv:
vm = aux.verts_uvs
fm = faces_attr.textures_idx
else:
raise NotImplementedError(f'Unrecognized input format for: {filename}')
v = v.to(device, non_blocking=True).contiguous()
f = f.to(device, non_blocking=True).contiguous()
if load_uv:
vm = vm.to(device, non_blocking=True).contiguous()
fm = fm.to(device, non_blocking=True).contiguous()
if load_uv:
if load_aux:
return v, f, vm, fm, aux
else:
return v, f, vm, fm
else:
return v, f
def load_pts(filename: str):
from pyntcloud import PyntCloud
cloud = PyntCloud.from_file(filename)
verts = cloud.xyz
if 'red' in cloud.points and 'green' in cloud.points and 'blue' in cloud.points:
r = np.asarray(cloud.points['red'])
g = np.asarray(cloud.points['green'])
b = np.asarray(cloud.points['blue'])
colors = (np.stack([r, g, b], axis=-1) / 255).astype(np.float32)
elif 'r' in cloud.points and 'g' in cloud.points and 'b' in cloud.points:
r = np.asarray(cloud.points['r'])
g = np.asarray(cloud.points['g'])
b = np.asarray(cloud.points['b'])
colors = (np.stack([r, g, b], axis=-1) / 255).astype(np.float32)
else:
colors = None
if 'nx' in cloud.points and 'ny' in cloud.points and 'nz' in cloud.points:
nx = np.asarray(cloud.points['nx'])
ny = np.asarray(cloud.points['ny'])
nz = np.asarray(cloud.points['nz'])
norms = np.stack([nx, ny, nz], axis=-1)
else:
norms = None
# if 'alpha' in cloud.points:
# cloud.points['alpha'] = cloud.points['alpha'] / 255
reserved = ['x', 'y', 'z', 'red', 'green', 'blue', 'r', 'g', 'b', 'nx', 'ny', 'nz']
scalars = dotdict({k: np.asarray(cloud.points[k])[..., None] for k in cloud.points if k not in reserved}) # one extra dimension at the back added
return verts, colors, norms, scalars
def export_pts(pts: torch.Tensor, color: torch.Tensor = None, normal: torch.Tensor = None, scalars: dotdict = dotdict(), filename: str = "default.ply"):
from pandas import DataFrame
from pyntcloud import PyntCloud
data = dotdict()
pts = to_numpy(pts) # always blocking?
pts = pts.reshape(-1, 3)
data.x = pts[:, 0].astype(np.float32)
data.y = pts[:, 1].astype(np.float32)
data.z = pts[:, 2].astype(np.float32)
if color is not None:
color = to_numpy(color)
color = color.reshape(-1, 3)
data.red = (color[:, 0] * 255).astype(np.uint8)
data.green = (color[:, 1] * 255).astype(np.uint8)
data.blue = (color[:, 2] * 255).astype(np.uint8)
else:
data.red = (pts[:, 0] * 255).astype(np.uint8)
data.green = (pts[:, 1] * 255).astype(np.uint8)
data.blue = (pts[:, 2] * 255).astype(np.uint8)
# if 'alpha' in scalars:
# data.alpha = (scalars.alpha * 255).astype(np.uint8)
if normal is not None:
normal = to_numpy(normal)
normal = normal / (np.linalg.norm(normal, axis=-1, keepdims=True) + 1e-13)
normal = normal.reshape(-1, 3)
data.nx = normal[:, 0].astype(np.float32)
data.ny = normal[:, 1].astype(np.float32)
data.nz = normal[:, 2].astype(np.float32)
if scalars is not None:
scalars = to_numpy(scalars)
for k, v in scalars.items():
v = v.reshape(-1, 1)
data[k] = v[:, 0]
df = DataFrame(data)
cloud = PyntCloud(df) # construct the data
dir = dirname(filename)
if dir: os.makedirs(dir, exist_ok=True)
return cloud.to_file(filename)
def export_lines(verts: torch.Tensor, lines: torch.Tensor, color: torch.Tensor = None, filename: str = 'default.ply'):
if color is None:
color = verts
verts, lines, color = to_numpy([verts, lines, color]) # always blocking?
if color.dtype == np.float32:
color = (color * 255).astype(np.uint8)
verts = verts.reshape(-1, 3)
lines = lines.reshape(-1, 2)
color = color.reshape(-1, 3)
# Write to PLY
with open(filename, 'wb') as f:
# PLY header
f.write(b"ply\n")
f.write(b"format binary_little_endian 1.0\n")
f.write(f"element vertex {len(verts)}\n".encode())
f.write(b"property float x\n")
f.write(b"property float y\n")
f.write(b"property float z\n")
f.write(b"property uchar red\n")
f.write(b"property uchar green\n")
f.write(b"property uchar blue\n")
f.write(f"element edge {len(lines)}\n".encode())
f.write(b"property int vertex1\n")
f.write(b"property int vertex2\n")
f.write(b"end_header\n")
# Write vertices and colors
for v, c in zip(verts, color):
f.write(struct.pack('fffBBB', v[0], v[1], v[2], c[0], c[1], c[2]))
# Write lines
for l in lines:
f.write(struct.pack('ii', l[0], l[1]))
def export_camera(c2w: torch.Tensor, ixt: torch.Tensor = None, col: torch.Tensor = torch.tensor([50, 50, 200]), axis_size=0.10, filename: str = 'default.ply'):
verts = []
lines = []
rgbs = []
def add_line(p0: torch.Tensor, p1: torch.Tensor, col: torch.Tensor):
# Add a and b vertices
verts.append(p0) # N, M, 3
verts.append(p1) # N, M, 3
sh = p0.shape[:-1]
# Add the vertex colors
col = torch.broadcast_to(col, sh + (3,))
rgbs.append(col)
rgbs.append(col)
# Add the faces
new = p0.numel() // 3 # number of new elements
curr = new * (len(verts) - 2) # assume all previous elements are of the same size
start = torch.arange(curr, curr + new)
end = torch.arange(curr + new, curr + new * 2)
line = torch.stack([start, end], dim=-1) # NM, 2
line = line.view(sh + (2,))
lines.append(line)
c2w = c2w[..., :3, :]
p = c2w[..., 3] # third row (corresponding to 3rd column)
if ixt is None: aspect = 1.0
else: aspect = ixt[..., 0, 0][..., None] / ixt[..., 1, 1][..., None]
if ixt is None: focal = 1000
else: focal = (ixt[..., 0, 0][..., None] + ixt[..., 1, 1][..., None]) / 2
axis_size = focal * axis_size / 1000
xs = axis_size * aspect
ys = axis_size
zs = axis_size * aspect * 2
a = p + xs * c2w[..., 0] + ys * c2w[..., 1] + zs * c2w[..., 2]
b = p - xs * c2w[..., 0] + ys * c2w[..., 1] + zs * c2w[..., 2]
c = p - xs * c2w[..., 0] - ys * c2w[..., 1] + zs * c2w[..., 2]
d = p + xs * c2w[..., 0] - ys * c2w[..., 1] + zs * c2w[..., 2]
add_line(p, p + axis_size * c2w[..., 0], torch.tensor([255, 64, 64]))
add_line(p, p + axis_size * c2w[..., 1], torch.tensor([64, 255, 64]))
add_line(p, p + axis_size * c2w[..., 2], torch.tensor([64, 64, 255]))
add_line(p, a, col)
add_line(p, b, col)
add_line(p, c, col)
add_line(p, d, col)
add_line(a, b, col)
add_line(b, c, col)
add_line(c, d, col)
add_line(d, a, col)
verts = torch.stack(verts)
lines = torch.stack(lines)
rgbs = torch.stack(rgbs)
export_lines(verts, lines, rgbs, filename=filename)
def export_mesh(verts: torch.Tensor, faces: torch.Tensor, uv: torch.Tensor = None, img: torch.Tensor = None, uvfaces: torch.Tensor = None, colors: torch.Tensor = None, normals: torch.Tensor = None, filename: str = "default.ply", subdivision=0):
if dirname(filename): os.makedirs(dirname(filename), exist_ok=True)
if subdivision > 0:
from easyvolcap.utils.mesh_utils import face_normals, loop_subdivision
verts, faces = loop_subdivision(verts, faces, subdivision)
if filename.endswith('.npz'):
def collect_args(**kwargs): return kwargs
kwargs = collect_args(verts=verts, faces=faces, uv=uv, img=img, uvfaces=uvfaces, colors=colors, normals=normals)
ret = dotdict({k: v for k, v in kwargs.items() if v is not None})
export_dotdict(ret, filename)
elif filename.endswith('.ply') or filename.endswith('.obj'):
if uvfaces is None:
mesh = get_mesh(verts, faces, uv, img, colors, normals, filename)
mesh.export(filename)
else:
from pytorch3d.io import save_obj
verts, faces, uv, img, uvfaces = get_tensor_mesh_data(verts, faces, uv, img, uvfaces)
save_obj(filename, verts, faces, verts_uvs=uv, faces_uvs=uvfaces, texture_map=img)
else:
raise NotImplementedError(f'Unrecognized input format for: {filename}')
def export_pynt_pts_alone(pts, color=None, filename="default.ply"):
import pandas as pd
from pyntcloud import PyntCloud
data = {}
pts = pts if isinstance(pts, np.ndarray) else pts.detach().cpu().numpy()
pts = pts.reshape(-1, 3)
data['x'] = pts[:, 0].astype(np.float32)
data['y'] = pts[:, 1].astype(np.float32)
data['z'] = pts[:, 2].astype(np.float32)
if color is not None:
color = color if isinstance(color, np.ndarray) else color.detach().cpu().numpy()
color = color.reshape(-1, 3)
data['red'] = color[:, 0].astype(np.uint8)
data['green'] = color[:, 1].astype(np.uint8)
data['blue'] = color[:, 2].astype(np.uint8)
else:
data['red'] = (pts[:, 0] * 255).astype(np.uint8)
data['green'] = (pts[:, 1] * 255).astype(np.uint8)
data['blue'] = (pts[:, 2] * 255).astype(np.uint8)
df = pd.DataFrame(data)
cloud = PyntCloud(df) # construct the data
dirname = dirname(filename)
if dirname: os.makedirs(dirname, exist_ok=True)
return cloud.to_file(filename)
def export_o3d_pts(pts: torch.Tensor, filename: str = "default.ply"):
import open3d as o3d
pts = to_numpy(pts)
pts = pts.reshape(-1, 3)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts)
return o3d.io.write_point_cloud(filename, pcd)
def export_o3d_pcd(pts: torch.Tensor, rgb: torch.Tensor, normal: torch.Tensor, filename="default.ply"):
import open3d as o3d
pts, rgb, normal = to_numpy([pts, rgb, normal])
pts = pts.reshape(-1, 3)
rgb = rgb.reshape(-1, 3)
normal = normal.reshape(-1, 3)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts)
pcd.colors = o3d.utility.Vector3dVector(rgb)
pcd.normals = o3d.utility.Vector3dVector(normal)
return o3d.io.write_point_cloud(filename, pcd)
def export_pcd(pts: torch.Tensor, rgb: torch.Tensor, occ: torch.Tensor, filename="default.ply"):
import pandas as pd
from pyntcloud import PyntCloud
pts, rgb, occ = to_numpy([pts, rgb, occ])
pts = pts.reshape(-1, 3)
rgb = rgb.reshape(-1, 3)
occ = occ.reshape(-1, 1)
# MARK: CloudCompare bad, set first to 0, last to 1
for i in range(3):
rgb[0, i] = 0
rgb[-1, i] = 1
occ[0, 0] = 0
occ[-1, 0] = 1
data = dotdict()
data.x = pts[:, 0]
data.y = pts[:, 1]
data.z = pts[:, 2]
# TODO: maybe, for compability, save color as uint?
# currently saving as float number from [0, 1]
data.red = rgb[:, 0]
data.green = rgb[:, 1]
data.blue = rgb[:, 2]
data.alpha = occ[:, 0]
# MARK: We're saving extra scalars for loading in CloudCompare
# can't assign same property to multiple fields
data.r = rgb[:, 0]
data.g = rgb[:, 1]
data.b = rgb[:, 2]
data.a = occ[:, 0]
df = pd.DataFrame(data)
cloud = PyntCloud(df) # construct the data
dirname = dirname(filename)
if dirname: os.makedirs(dirname, exist_ok=True)
return cloud.to_file(filename)
def load_rgb_image(img_path) -> np.ndarray:
# return cv2.imread(img_path, cv2.IMREAD_COLOR)[..., ::-1].copy() # removing the stride (for conversion to tensor)
return cv2.imread(img_path, cv2.IMREAD_COLOR)[..., [2, 1, 0]] # BGR to RGB
def load_unchanged_image(img_path) -> np.ndarray:
return cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
def load_npz(index, folder):
path = os.path.join(folder, f"{index}.npz")
data = np.load(path)
return dotdict({**data})
def load_dotdict(path):
f = np.load(path)
f = dotdict({**f})
return f
def start_save_npz(index, dir, param: dict, remove_batch=True):
return asyncio.create_task(async_save_npz(index, dir, param, remove_batch))
async def async_save_npz(index, dir, param: dict, remove_batch=True):
log(f"Trying to save: {index}")
save_npz(index, dir, param, remove_batch)
def save_img(index, dir, img: torch.Tensor, remove_batch=True, remap=False, flip=False):
img = to_numpy(img)
if remap:
img *= 255
img = img.astype(np.uint8)
if flip:
img = img[..., ::-1]
if remove_batch:
n_batch = img.shape[0]
for b in range(n_batch):
file_path = os.path.join(dir, f"{index*n_batch + b}.png")
im = img[b]
cv2.imwrite(file_path, im)
else:
file_path = os.path.join(dir, f"{index}.png")
cv2.imwrite(file_path, img)
def save_npz(index, dir, param: dict, remove_batch=False):
param = to_numpy(param)
if remove_batch:
n_batch = param[next(iter(param))].shape[0]
for b in range(n_batch):
file_path = os.path.join(dir, f"{index*n_batch + b}.npz")
p = {k: v[b] for k, v in param.items()}
np.savez_compressed(file_path, **p)
else:
file_path = os.path.join(dir, f"{index}.npz")
np.savez_compressed(file_path, **param)
def to_cuda(batch, device="cuda", ignore_list: bool = False) -> torch.Tensor:
if isinstance(batch, (tuple, list)):
batch = [to_cuda(b, device, ignore_list) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: (to_cuda(v, device, ignore_list) if k != "meta" else v) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
batch = batch.to(device, non_blocking=True)
else: # numpy and others
batch = torch.as_tensor(batch, device=device)
return batch
def to_x_if(batch, x: str, cond):
if isinstance(batch, (tuple, list)):
batch = [to_x(b, x) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_x(v, x) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
if cond(x):
batch = batch.to(x, non_blocking=True)
elif isinstance(batch, np.ndarray): # numpy and others
if cond(x):
batch = torch.as_tensor(batch).to(x, non_blocking=True)
else:
pass # do nothing here, used for typed in to_x for methods
# FIXME: Incosistent behavior here, might lead to undebuggable bugs
return batch
def to_x(batch, x: str) -> Union[torch.Tensor, dotdict[str, torch.Tensor]]:
if isinstance(batch, (tuple, list)):
batch = [to_x(b, x) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_x(v, x) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
batch = batch.to(x, non_blocking=True)
elif isinstance(batch, np.ndarray): # numpy and others
batch = torch.as_tensor(batch).to(x, non_blocking=True)
else:
pass # do nothing here, used for typed in to_x for methods
# FIXME: Incosistent behavior here, might lead to undebuggable bugs
return batch
def to_tensor(batch, ignore_list: bool = False) -> Union[torch.Tensor, dotdict[str, torch.Tensor]]:
if isinstance(batch, (tuple, list)) and not ignore_list:
batch = [to_tensor(b, ignore_list) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_tensor(v, ignore_list) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
pass
else: # numpy and others
batch = torch.as_tensor(batch)
return batch
def to_list(batch, non_blocking=False) -> Union[List, Dict, np.ndarray]: # almost always exporting, should block
if isinstance(batch, (tuple, list)):
batch = [to_list(b, non_blocking) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_list(v, non_blocking) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
batch = batch.detach().to('cpu', non_blocking=non_blocking).numpy().tolist()
elif isinstance(batch, torch.Tensor):
batch = batch.tolist()
else: # others, keep as is
pass
return batch
def to_cpu(batch, non_blocking=False, ignore_list: bool = False) -> torch.Tensor:
if isinstance(batch, (tuple, list)) and not ignore_list:
batch = [to_cpu(b, non_blocking, ignore_list) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_cpu(v, non_blocking, ignore_list) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
batch = batch.detach().to('cpu', non_blocking=non_blocking)
else: # numpy and others
batch = torch.as_tensor(batch, device="cpu")
return batch
def to_numpy(batch, non_blocking=False, ignore_list: bool = False) -> Union[List, Dict, np.ndarray]: # almost always exporting, should block
if isinstance(batch, (tuple, list)) and not ignore_list:
batch = [to_numpy(b, non_blocking, ignore_list) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_numpy(v, non_blocking, ignore_list) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
batch = batch.detach().to('cpu', non_blocking=non_blocking).numpy()
else: # numpy and others
batch = np.asarray(batch)
return batch
def remove_batch(batch) -> Union[torch.Tensor, np.ndarray]:
if isinstance(batch, (tuple, list)):
batch = [remove_batch(b) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: remove_batch(v) for k, v in batch.items()})
elif isinstance(batch, (torch.Tensor, np.ndarray)): # numpy and others
batch = batch[0]
else:
batch = torch.as_tensor(batch)[0]
return batch
def add_batch(batch) -> Union[torch.Tensor, np.ndarray]:
if isinstance(batch, (tuple, list)):
batch = [add_batch(b) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: add_batch(v) for k, v in batch.items()})
elif isinstance(batch, (torch.Tensor, np.ndarray)): # numpy and others
batch = batch[None]
else:
batch = torch.as_tensor(batch)[None]
return batch
def add_iter(batch, iter, total) -> Union[torch.Tensor, np.ndarray]:
batch = add_scalar(batch, iter, name="iter")
batch = add_scalar(batch, iter / total, name="frac")
return batch # training fraction and current iteration
def add_scalar(batch, value, name) -> Union[torch.Tensor, np.ndarray]:
if isinstance(batch, (tuple, list)):
for b in batch:
add_scalar(b, value, name)
if isinstance(batch, dict):
batch[name] = torch.tensor(value)
batch['meta'][name] = torch.tensor(value)
return batch
def get_voxel_grid_and_update_bounds(voxel_size: Union[List, np.ndarray], bounds: Union[List, np.ndarray]):
# now here's the problem
# 1. if you want the voxel size to be accurate, you bounds need to be changed along with this sampling process
# since the F.grid_sample will treat the bounds based on align_corners=True or not
# say we align corners, the actual bound on the sampled tpose blend weight should be determined by the actual sampling voxels
# not the bound that we kind of used to produce the voxels, THEY DO NOT LINE UP UNLESS your bounds is divisible by the voxel size in every direction
# TODO: is it possible to somehow get rid of this book-keeping step
if isinstance(voxel_size, List):
voxel_size = np.array(voxel_size)
bounds = np.array(bounds)
# voxel_size: [0.005, 0.005, 0.005]
# bounds: n_batch, 2, 3, initial bounds
x = np.arange(bounds[0, 0], bounds[1, 0] + voxel_size[0] / 2, voxel_size[0])
y = np.arange(bounds[0, 1], bounds[1, 1] + voxel_size[1] / 2, voxel_size[1])
z = np.arange(bounds[0, 2], bounds[1, 2] + voxel_size[2] / 2, voxel_size[2])
pts = np.stack(np.meshgrid(x, y, z, indexing='ij'), axis=-1).astype(np.float32)
bounds = np.stack([pts[0, 0, 0], pts[-1, -1, -1]], axis=0).astype(np.float32)
return pts, bounds
def get_rigid_transform(pose: np.ndarray, joints: np.ndarray, parents: np.ndarray):
# pose: N, 3
# joints: N, 3
# parents: N
from easyvolcap.utils.blend_utils import get_rigid_transform_nobatch as net_get_rigid_transform
pose, joints, parents = default_convert([pose, joints, parents])
J, A = net_get_rigid_transform(pose, joints, parents)
J, A = to_numpy([J, A])
return J, A
def get_bounds(xyz, padding=0.05):
min_xyz = np.min(xyz, axis=0)
max_xyz = np.max(xyz, axis=0)
min_xyz -= padding
max_xyz += padding
bounds = np.stack([min_xyz, max_xyz], axis=0)
return bounds
def load_image_file(img_path: str, ratio=1.0):
if img_path.endswith('.jpg') or img_path.endswith('.JPG') or img_path.endswith('.jpeg') or img_path.endswith('.JPEG'):
im = Image.open(img_path)
w, h = im.width, im.height
draft = im.draft('RGB', (int(w * ratio), int(h * ratio)))
img = np.asarray(im)
if np.issubdtype(img.dtype, np.integer):
img = img.astype(np.float32) / np.iinfo(img.dtype).max # normalize
if ratio != 1.0 and \
draft is None or \
draft is not None and \
(draft[1][2] != int(w * ratio) or
draft[1][3] != int(h * ratio)):
img = cv2.resize(img, (int(w * ratio), int(h * ratio)), interpolation=cv2.INTER_AREA)
if img.ndim == 2: # MARK: cv.resize will discard the last dimension of mask images
img = img[..., None]
return img
else:
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if img.ndim >= 3 and img.shape[-1] >= 3:
img[..., :3] = img[..., [2, 1, 0]] # BGR to RGB
if np.issubdtype(img.dtype, np.integer):
img = img.astype(np.float32) / np.iinfo(img.dtype).max # normalize
if ratio != 1.0:
height, width = img.shape[:2]
img = cv2.resize(img, (int(width * ratio), int(height * ratio)), interpolation=cv2.INTER_AREA)
if img.ndim == 2: # MARK: cv.resize will discard the last dimension of mask images
img = img[..., None]
return img
def load_depth(depth_file: str):
if depth_file.endswith('.npy'):
depth = np.load(depth_file)[..., None] # H, W, 1
elif depth_file.endswith('.pfm'):
depth, scale = read_pfm(depth_file)
depth = depth / scale
if depth.ndim == 2:
depth = depth[..., None] # H, W, 1
depth = depth[..., :1]
elif depth_file.endswith('.hdr') or depth_file.endswith('.exr'):
if depth_file.endswith('.exr'):
# ... https://github.com/opencv/opencv/issues/21326
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
depth = load_image(depth_file)
depth = depth[..., :1]
else:
raise NotImplementedError
return depth # H, W, 1
def load_image(path: Union[str, np.ndarray], ratio: int = 1.0):
if isinstance(path, str):
return load_image_file(path, ratio)
elif isinstance(path, np.ndarray):
return load_image_from_bytes(path, ratio)
else:
raise NotImplementedError('Supported overloading')
def load_unchanged(img_path: str, ratio=1.0):
if img_path.endswith('.jpg') or img_path.endswith('.JPG') or img_path.endswith('.jpeg') or img_path.endswith('.JPEG'):
im = Image.open(img_path)
w, h = im.width, im.height
draft = im.draft('RGB', (int(w * ratio), int(h * ratio)))
img = np.asarray(im).copy() # avoid writing error and already in RGB instead of BGR
if ratio != 1.0 and \
draft is None or \
draft is not None and \
(draft[1][2] != int(w * ratio) or \
draft[1][3] != int(h * ratio)):
img = cv2.resize(img, (int(w * ratio), int(h * ratio)), interpolation=cv2.INTER_AREA)
if img.ndim == 2: # MARK: cv.resize will discard the last dimension of mask images
img = img[..., None]
return img
else:
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if img.shape[-1] >= 3:
img[..., :3] = img[..., [2, 1, 0]]
if ratio != 1.0:
height, width = img.shape[:2]
img = cv2.resize(img, (int(width * ratio), int(height * ratio)), interpolation=cv2.INTER_AREA)
if img.ndim == 2: # MARK: cv.resize will discard the last dimension of mask images
img = img[..., None]
return img
def load_mask(msk_path: str, ratio=1.0):
"""
Load single-channel binary mask
"""
if msk_path.endswith('.jpg') or msk_path.endswith('.JPG') or msk_path.endswith('.jpeg') or msk_path.endswith('.JPEG'):
msk = Image.open(msk_path)
w, h = msk.width, msk.height
draft = msk.draft('L', (int(w * ratio), int(h * ratio)))
msk = np.asarray(msk).astype(int) # read the actual file content from drafted disk
msk = msk * 255 / msk.max() # if max already 255, do nothing
msk = msk[..., None] > 128 # make it binary
msk = msk.astype(np.uint8)
if ratio != 1.0 and \
draft is None or \
draft is not None and \
(draft[1][2] != int(w * ratio) or
draft[1][3] != int(h * ratio)):
msk = cv2.resize(msk.astype(np.uint8), (int(w * ratio), int(h * ratio)), interpolation=cv2.INTER_NEAREST)[..., None]
return msk
else:
msk = cv2.imread(msk_path, cv2.IMREAD_GRAYSCALE).astype(int) # BGR to GRAY
msk = msk * 255 / msk.max() # if max already 255, do nothing
msk = msk[..., None] > 128 # make it binary
msk = msk.astype(np.uint8)
if ratio != 1.0:
height, width = msk.shape[:2]
msk = cv2.resize(msk.astype(np.uint8), (int(width * ratio), int(height * ratio)), interpolation=cv2.INTER_NEAREST)[..., None]
# WTF: https://stackoverflow.com/questions/68502581/image-channel-missing-after-resizing-image-with-opencv
return msk
def save_unchanged(img_path: str, img: np.ndarray, quality=100, compression=6):
if img.shape[-1] >= 3:
img[..., :3] = img[..., [2, 1, 0]]
if img_path.endswith('.hdr'):
return cv2.imwrite(img_path, img) # nothing to say about hdr
if dirname(img_path):
os.makedirs(dirname(img_path), exist_ok=True)
return cv2.imwrite(img_path, img, [cv2.IMWRITE_JPEG_QUALITY, quality, cv2.IMWRITE_PNG_COMPRESSION, compression])
def save_image(img_path: str, img: np.ndarray, jpeg_quality=75, png_compression=9, save_dtype=np.uint8):
if isinstance(img, torch.Tensor): img = img.detach().cpu().numpy() # convert to numpy arrays
if img.ndim == 4: img = np.concatenate(img, axis=0) # merge into one image along y axis
if img.ndim == 2: img = img[..., None] # append last dim
if img.shape[0] < img.shape[-1] and (img.shape[0] == 3 or img.shape[0] == 4): img = np.transpose(img, (1, 2, 0))
if np.issubdtype(img.dtype, np.integer):
img = img / np.iinfo(img.dtype).max # to float
if img.shape[-1] >= 3:
if not img.flags['WRITEABLE']:
img = img.copy() # avoid assignment only inputs
img[..., :3] = img[..., [2, 1, 0]]
if dirname(img_path):
os.makedirs(dirname(img_path), exist_ok=True)
if img_path.endswith('.png'):
max = np.iinfo(save_dtype).max
img = (img * max).clip(0, max).astype(save_dtype)
elif img_path.endswith('.jpg'):
img = img[..., :3] # only color
img = (img * 255).clip(0, 255).astype(np.uint8)
elif img_path.endswith('.hdr'):
img = img[..., :3] # only color
elif img_path.endswith('.exr'):
# ... https://github.com/opencv/opencv/issues/21326
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
else:
# should we try to discard alpha channel here?
# exr could store alpha channel
pass # no transformation for other unspecified file formats
# log(f'Writing image to: {img_path}')
# breakpoint()
return cv2.imwrite(img_path, img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality,
cv2.IMWRITE_PNG_COMPRESSION, png_compression,
cv2.IMWRITE_EXR_COMPRESSION, cv2.IMWRITE_EXR_COMPRESSION_PIZ])
def save_mask(msk_path: str, msk: np.ndarray, quality=75, compression=9):
if dirname(msk_path):
os.makedirs(dirname(msk_path), exist_ok=True)
if msk.ndim == 2:
msk = msk[..., None]
return cv2.imwrite(msk_path, msk[..., 0] * 255, [cv2.IMWRITE_JPEG_QUALITY, quality,
cv2.IMWRITE_PNG_COMPRESSION, compression,
cv2.IMWRITE_EXR_COMPRESSION, cv2.IMWRITE_EXR_COMPRESSION_PIZ])
def list_to_numpy(x: list): return np.stack(x).transpose(0, 3, 1, 2)
def numpy_to_list(x: np.ndarray): return [y for y in x.transpose(0, 2, 3, 1)]
def list_to_tensor(x: list, device='cuda'): return torch.from_numpy(list_to_numpy(x)).to(device, non_blocking=True) # convert list of numpy arrays of HWC to BCHW
def tensor_to_list(x: torch.Tensor): return numpy_to_list(x.detach().cpu().numpy()) # convert tensor of BCHW to list of numpy arrays of HWC
def project(xyz, K, RT):
"""
xyz: [N, 3]
K: [3, 3]
RT: [3, 4]
"""
xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T
xyz = np.dot(xyz, K.T)
xy = xyz[:, :2] / xyz[:, 2:]
return xy
def unproject(depth, K, R, T):
H, W = depth.shape
i, j = np.meshgrid(np.arange(W, dtype=np.float32),
np.arange(H, dtype=np.float32),
indexing='xy')
xy1 = np.stack([i, j, np.ones_like(i)], axis=2)
xyz = xy1 * depth[..., None]
pts3d = np.dot(xyz, np.linalg.inv(K).T)
pts3d = np.dot(pts3d - T.ravel(), R)
return pts3d
def read_mask_by_img_path(data_root: str, img_path: str, erode_dilate_edge: bool = False, mask: str = '') -> np.ndarray:
def read_mask_file(path):
msk = load_mask(path).astype(np.uint8)
if len(msk.shape) == 3:
msk = msk[..., 0]
return msk
if mask:
msk_path = os.path.join(data_root, img_path.replace('images', mask))
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', mask)) + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', mask))[:-4] + '.png'
if not os.path.exists(msk_path):
log(f'warning: defined mask path {msk_path} does not exist', 'yellow')
else:
msk_path = os.path.join(data_root, 'mask', img_path)[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, 'mask', img_path)[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, 'mask_cihp', img_path)[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', 'merged_mask'))[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', 'rvm'))[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', 'rvm'))[:-4] + '.jpg'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', 'mask'))[:-4] + '.png'
if not os.path.exists(msk_path): # background matte v2
msk_path = os.path.join(data_root, img_path.replace('images', 'bgmt'))[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', 'mask'))[:-4] + '.jpg'
if not os.path.exists(msk_path):
log(f'cannot find mask file: {msk_path}, using all ones', 'yellow')
img = load_unchanged_image(os.path.join(data_root, img_path))
msk = np.ones_like(img[:, :, 0]).astype(np.uint8)
return msk
msk = read_mask_file(msk_path)
# erode edge inconsistence when evaluating and training
if erode_dilate_edge: # eroding edge on matte might erode the actual human
msk = fill_mask_edge_with(msk)
return msk
def fill_mask_edge_with(msk, border=5, value=100):
msk = msk.copy()
kernel = np.ones((border, border), np.uint8)
msk_erode = cv2.erode(msk.copy(), kernel)
msk_dilate = cv2.dilate(msk.copy(), kernel)
msk[(msk_dilate - msk_erode) == 1] = value
return msk
def get_rays_within_bounds_rendering(H, W, K, R, T, bounds):
ray_o, ray_d = get_rays(H, W, K, R, T)
ray_o = ray_o.reshape(-1, 3).astype(np.float32)
ray_d = ray_d.reshape(-1, 3).astype(np.float32)
near, far, mask_at_box = get_full_near_far(bounds, ray_o, ray_d)
near = near.reshape(H, W)
far = far.reshape(H, W)
ray_o = ray_o.reshape(H, W, 3)
ray_d = ray_d.reshape(H, W, 3)
mask_at_box = mask_at_box.reshape(H, W)
return ray_o, ray_d, near, far, mask_at_box
def get_rays(H, W, K, R, T):
# # calculate the camera origin
# ray_o = -np.dot(R.T, T).ravel()
# # calculate the world coodinates of pixels
# i, j = np.meshgrid(np.arange(H, dtype=np.float32),
# np.arange(W, dtype=np.float32),
# indexing='ij') # 0.5 indicates pixel center
# i = i + 0.5
# j = j + 0.5
# # 0->H, 0->W
# xy1 = np.stack([j, i, np.ones_like(i)], axis=2)
# if subpixel:
# rand = np.random.rand(H, W, 2) - 0.5
# xy1[:, :, :2] += rand
# pixel_camera = np.dot(xy1, np.linalg.inv(K).T)
# pixel_world = np.dot(pixel_camera - T.ravel(), R)
# # calculate the ray direction
# ray_d = pixel_world - ray_o[None, None]
# ray_d = ray_d / np.linalg.norm(ray_d, axis=2, keepdims=True)
# ray_o = np.broadcast_to(ray_o, ray_d.shape)
# return ray_o, ray_d
from easyvolcap.utils.ray_utils import get_rays
K, R, T = to_tensor([K, R, T])
ray_o, ray_d = get_rays(H, W, K, R, T)
ray_o, ray_d = to_numpy([ray_o, ray_d])
return ray_o, ray_d
def get_near_far(bounds, ray_o, ray_d) -> Tuple[np.ndarray, np.ndarray]:
# """
# calculate intersections with 3d bounding box
# return: near, far (indexed by mask_at_box (bounding box mask))
# """
# near, far, mask_at_box = get_full_near_far(bounds, ray_o, ray_d)
# norm_d = np.linalg.norm(ray_d, axis=-1, keepdims=True)
# near = near[mask_at_box] / norm_d[mask_at_box, 0]
# far = far[mask_at_box] / norm_d[mask_at_box, 0]
# return near, far, mask_at_box
from easyvolcap.utils.ray_utils import get_near_far_aabb
bounds, ray_o, ray_d = to_tensor([bounds, ray_o, ray_d]) # no copy
near, far = get_near_far_aabb(bounds, ray_o, ray_d)
near, far = to_numpy([near, far])
return near, far
def get_full_near_far(bounds, ray_o, ray_d):
"""calculate intersections with 3d bounding box"""
norm_d = np.linalg.norm(ray_d, axis=-1, keepdims=True)
viewdir = ray_d / norm_d
viewdir[(viewdir < 1e-5) & (viewdir > -1e-10)] = 1e-5
viewdir[(viewdir > -1e-5) & (viewdir < 1e-10)] = -1e-5
tmin = (bounds[:1] - ray_o[:1]) / viewdir
tmax = (bounds[1:2] - ray_o[:1]) / viewdir
t1 = np.minimum(tmin, tmax)
t2 = np.maximum(tmin, tmax)
near = np.max(t1, axis=-1)
far = np.min(t2, axis=-1)
mask_at_box = near < far
near = near / norm_d[..., 0]
far = far / norm_d[..., 0]
return near, far, mask_at_box
def full_sample_ray(img, msk, K, R, T, bounds, split='train', subpixel=False):
H, W = img.shape[:2]
ray_o, ray_d = get_rays(H, W, K, R, T, subpixel)
near, far, mask_at_box = get_full_near_far(bounds, ray_o, ray_d)
msk = msk * mask_at_box
coords = np.argwhere(np.ones_like(mask_at_box)) # every pixel
ray_o = ray_o[coords[:, 0], coords[:, 1]].astype(np.float32)
ray_d = ray_d[coords[:, 0], coords[:, 1]].astype(np.float32)
near = near[coords[:, 0], coords[:, 1]].astype(np.float32)
far = far[coords[:, 0], coords[:, 1]].astype(np.float32)
rgb = img[coords[:, 0], coords[:, 1]].astype(np.float32)
return rgb, ray_o, ray_d, near, far, coords, mask_at_box
def affine_inverse(m: np.ndarray):
import torch
from easyvolcap.utils.math_utils import affine_inverse
return affine_inverse(torch.from_numpy(m)).numpy()
def load_image_from_bytes(buffer: np.ndarray, ratio=1.0, normalize=False, decode_flag=cv2.IMREAD_UNCHANGED):
# from nvjpeg import NvJpeg
# if not hasattr(load_image_from_bytes, 'nj'):
# load_image_from_bytes.nj = NvJpeg()
# nj: NvJpeg = load_image_from_bytes.nj
def normalize_image(image):
image = torch.from_numpy(image) # pytorch is significantly faster than np
if image.ndim >= 3 and image.shape[-1] >= 3:
image[..., :3] = image[..., [2, 1, 0]]
image = image / torch.iinfo(image.dtype).max
image = image.float()
return image.numpy()
if isinstance(buffer, BytesIO):
buffer = buffer.getvalue() # slow? copy?
if isinstance(buffer, memoryview) or isinstance(buffer, bytes):
buffer = np.frombuffer(buffer, np.uint8)
if isinstance(buffer, torch.Tensor):
buffer = buffer.numpy()
buffer = buffer.astype(np.uint8)
image: np.ndarray = cv2.imdecode(buffer, decode_flag) # MARK: 10-15ms
# image: np.ndarray = nj.decode(np.frombuffer(buffer, np.uint8)) # MARK: 10-15ms
# if decode_flag == cv2.IMREAD_GRAYSCALE:
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if image.ndim == 2:
image = image[..., None]
if normalize:
image = normalize_image(image) # MARK: 3ms
height, width = image.shape[:2]
if ratio != 1.0:
image = cv2.resize(image, (int(width * ratio), int(height * ratio)), interpolation=cv2.INTER_AREA)
return image
def as_torch_func(func):
def wrapper(*args, **kwargs):
args = to_numpy(args)
kwargs = to_numpy(kwargs)
ret = func(*args, **kwargs)
return to_tensor(ret)
return wrapper
def as_numpy_func(func):
def wrapper(*args, **kwargs):
args = to_tensor(args)
kwargs = to_tensor(kwargs)
ret = func(*args, **kwargs)
return to_numpy(ret)
return wrapper
def load_image_bytes(im: str):
if im.endswith('.exr'):
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
with open(im, "rb") as fh:
buffer = fh.read()
return buffer
class UnstructuredTensors(torch.Tensor):
# https://github.com/pytorch/pytorch/issues/13246#issuecomment-617140519
# https://github.com/pytorch/pytorch/issues/69893
@staticmethod
def __new__(cls, bytes: Union[List[np.ndarray], List[torch.Tensor], np.ndarray], **kwargs):
"""
Creates a new UnstructuredTensors object from the given bytes.
Args:
- bytes (Union[List[np.ndarray], List[torch.Tensor], np.ndarray]): The bytes to create the object from.
Returns:
- self (UnstructuredTensors): The new UnstructuredTensors object.
"""
if isinstance(bytes, UnstructuredTensors):
return bytes
# Prepare the bytes array
if isinstance(bytes, np.ndarray):
bytes = [b for b in bytes]
if bytes[0].dtype == object:
bytes = [b.astype(np.uint8) for b in bytes]
bytes = to_tensor(bytes) # now, every element is a list
dtype = torch.uint8
if len(bytes):
dtype = bytes[0].dtype
# Create an empty tensor
self = torch.Tensor.__new__(cls).to(dtype)
# Remember accessing related configs
self.set_(torch.cat(bytes)) # flatten # sum(N)
self.lengths = torch.as_tensor([len(b) for b in bytes], dtype=torch.int32) # N,
self.cumsums = torch.cat([torch.as_tensor([0]), torch.cumsum(self.lengths, dim=0)[:-1]])
return self
@property
def is_unstructured(self): return hasattr(self, 'lengths')
def __getitem__(self, index: int):
"""
Returns a slice of the UnstructuredTensors object corresponding to the given index.
Args:
index (int): The index of the slice to return.
Returns:
torch.Tensor: A slice of the UnstructuredTensors object corresponding to the given index.
This function returns a slice of the UnstructuredTensors object corresponding to the given index. The slice is obtained by using the cumulative sums and lengths of the underlying bytes array to determine the start and end indices of the slice. If the index is out of range, the function returns the corresponding element of the underlying bytes array. This function is used to implement the indexing behavior of the UnstructuredTensors object, allowing it to be treated like a regular tensor.
"""
if self.is_unstructured:
return torch.Tensor.__getitem__(self, slice(self.cumsums[index], self.cumsums[index] + self.lengths[index]))
else:
return super().__getitem__(index)
def __len__(self):
if self.is_unstructured:
return len(self.lengths)
else:
return super().__len__()
def clone(self, *args, **kwargs):
if self.is_unstructured:
return UnstructuredTensors([self[i] for i in range(len(self.lengths))]) # manual cloning with copy and reconstruction
else:
return super().clone(*args, **kwargs)
def load_ims_bytes_from_disk(ims: np.ndarray, desc="Loading image bytes from disk"):
sh = ims.shape
ims = ims.ravel()
ims_bytes = parallel_execution(list(ims), action=load_image_bytes, desc=desc, print_progress=True)
ims_bytes = np.asarray(ims_bytes).reshape(sh) # reorganize shapes
return ims_bytes
def load_resize_undist_im_bytes(imp: str,
K: np.ndarray,
D: np.ndarray,
ratio: Union[float, List[int]] = 1.0,
center_crop_size: List[int] = [-1, -1],
encode_ext='.jpg',
decode_flag=cv2.IMREAD_UNCHANGED,
dist_opt_K: bool = False,
jpeg_quality: int = 100,
png_compression: int = 6
):
# Load image -> resize -> undistort -> save to bytes (jpeg)
img = load_image_from_bytes(load_image_bytes(imp), decode_flag=decode_flag)[..., :3] # cv2 decoding (fast)
oH, oW = img.shape[:2]
if dist_opt_K:
newCameraMatrix, _ = cv2.getOptimalNewCameraMatrix(K, D, (oW, oH), 0, (oW, oH))
img = cv2.undistort(img, K, D, newCameraMatrix=newCameraMatrix)
K = newCameraMatrix
else:
img = cv2.undistort(img, K, D)
# Maybe update image size
if not ((isinstance(ratio, float) and ratio == 1.0)):
if isinstance(ratio, float):
H, W = int(oH * ratio), int(oW * ratio)
else:
H, W = ratio # ratio is actually the target image size
rH, rW = H / oH, W / oW
K = K.copy()
K[0:1] = K[0:1] * rW # K[0, 0] *= rW
K[1:2] = K[1:2] * rH # K[1, 1] *= rH
img = cv2.resize(img, (W, H), interpolation=cv2.INTER_AREA) # H, W, 3, uint8
# Crop the image and intrinsic matrix if specified
if center_crop_size[0] > 0:
img, K, H, W = center_crop_img_ixt(img, K, H, W, center_crop_size)
is_success, buffer = cv2.imencode(encode_ext, img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])
if 'H' not in locals(): H, W = oH, oW
return buffer, K, H, W
def center_crop_img_ixt(img: np.ndarray, K: np.ndarray, H: int, W: int,
center_crop_size: Union[int, List[int]]):
# Parse the original size and the target crop size
oH, oW = H, W
if isinstance(center_crop_size, int): cH, cW = center_crop_size, center_crop_size
else: cH, cW = center_crop_size
# Compute left and right crop size for height and width respectively
hlc, wlc = int((oH - cH) * 0.5), int((oW - cW) * 0.5)
hrc, wrc = oH - cH - hlc, oW - cW - wlc
# Crop the image
if hlc != 0: img = img[hlc:-hrc, :]
if wlc != 0: img = img[:, wlc:-wrc]
# Crop the intrinsic matrix
if hlc != 0: K[1, 2] -= hlc
if wlc != 0: K[0, 2] -= wlc
return img, K, cH, cW
def load_resize_undist_ims_bytes(ims: np.ndarray,
Ks: np.ndarray,
Ds: np.ndarray,
ratio: Union[float, List[int], List[float]] = 1.0,
center_crop_size: List[int] = [-1, -1],
desc="Loading image bytes from disk",
**kwargs):
sh = ims.shape # V, N
# Ks = np.broadcast_to(Ks[:, None], (sh + (3, 3)))
# Ds = np.broadcast_to(Ds[:, None], (sh + (1, 5)))
ims = ims.reshape((np.prod(sh)))
# from easyvolcap.utils.dist_utils import get_rank
# if not get_rank(): __import__('easyvolcap.utils.console_utils', fromlist=['debugger']).debugger()
# else:
# while 1: pass
Ks = Ks.reshape((np.prod(sh), 3, 3))
Ds = Ds.reshape((np.prod(sh), 1, 5))
ims = list(ims)
Ks = list(Ks)
Ds = list(Ds) # only convert outer most dim to list
if isinstance(ratio, list) and len(ratio) and isinstance(ratio[0], float):
ratio = np.broadcast_to(np.asarray(ratio)[:, None], sh) # V, N
ratio = ratio.reshape((np.prod(sh)))
ratio = list(ratio)
elif isinstance(ratio, list):
ratio = np.asarray(ratio) # avoid expansion in parallel execution
if isinstance(center_crop_size, list):
center_crop_size = np.asarray(center_crop_size) # avoid expansion
# Should we batch these instead of loading?
out = parallel_execution(ims, Ks, Ds, ratio, center_crop_size,
action=load_resize_undist_im_bytes,
desc=desc, print_progress=True,
**kwargs,
)
ims_bytes, Ks, Hs, Ws = zip(*out) # is this OK?
ims_bytes, Ks, Hs, Ws = np.asarray(ims_bytes, dtype=object), np.asarray(Ks), np.asarray(Hs), np.asarray(Ws)
# ims_bytes = ims_bytes.reshape(sh) # numpy array of bytesio
Hs = Hs.reshape(sh) # should all be the same?
Ws = Ws.reshape(sh) # should all be the same?
Ks = Ks.reshape(sh + (3, 3)) # should all be the same?
return ims_bytes, Ks, Hs, Ws
def decode_crop_fill_im_bytes(im_bytes: BytesIO,
mk_bytes: BytesIO,
K: np.ndarray,
R: np.ndarray,
T: np.ndarray,
bounds: np.ndarray,
encode_ext=['.jpg', '.jpg'],
decode_flag=cv2.IMREAD_UNCHANGED,
jpeg_quality: int = 100,
png_compression: int = 6,
**kwargs):
# im_bytes: a series of jpeg bytes for the image
# mk_bytes: a series of jpeg bytes for the mask
# K: 3, 3 intrinsics matrix
# Use load_image_from_bytes to decode and update jpeg streams
img = load_image_from_bytes(im_bytes, decode_flag=decode_flag) # H, W, 3
msk = load_image_from_bytes(mk_bytes, decode_flag=decode_flag) # H, W, 3
# Crop both mask and the image using bbox's 2D projection
H, W, _ = img.shape
from easyvolcap.utils.bound_utils import get_bound_2d_bound
bx, by, bw, bh = as_numpy_func(get_bound_2d_bound)(bounds, K, R, T, H, W)
img = img[by:by + bh, bx:bx + bw]
msk = msk[by:by + bh, bx:bx + bw]
# Crop the image using the bounding rect of the mask
mx, my, mw, mh = cv2.boundingRect((msk > 128).astype(np.uint8)) # array data type = 0 is not supported
img = img[my:my + mh, mx:mx + mw]
msk = msk[my:my + mh, mx:mx + mw]
# Update the final size and intrinsics
x, y, w, h = bx + mx, by + my, mw, mh # w and h will always be the smaller one, xy will be accumulated
K[0, 2] -= x
K[1, 2] -= y
# Fill the image with black (premultiply by mask)
img = (img * (msk / 255)).clip(0, 255).astype(np.uint8) # fill with black, indexing starts at the front
# Reencode the videos and masks
if isinstance(encode_ext, str): encode_ext = [encode_ext] * 2 # '.jpg' -> ['.jpg', '.jpg']
im_bytes = cv2.imencode(encode_ext[0], img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])[1] # is_sucess, bytes_array
mk_bytes = cv2.imencode(encode_ext[1], msk, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])[1] # is_sucess, bytes_array
return im_bytes, mk_bytes, K, h, w, x, y
def decode_crop_fill_ims_bytes(ims_bytes: np.ndarray, mks_bytes: np.ndarray, Ks: np.ndarray, Rs: np.ndarray, Ts: np.ndarray, bounds: np.ndarray,
desc="Cropping images using mask", **kwargs):
sh = Ks.shape[:2] # V, N
# ims_bytes = ims_bytes.reshape((np.prod(sh)))
# mks_bytes = mks_bytes.reshape((np.prod(sh)))
Ks = Ks.reshape((np.prod(sh), 3, 3))
Rs = Rs.reshape((np.prod(sh), 3, 3))
Ts = Ts.reshape((np.prod(sh), 3, 1))
bounds = bounds.reshape((np.prod(sh), 2, 3))
# Should we batch these instead of loading?
out = parallel_execution(list(ims_bytes), list(mks_bytes), list(Ks), list(Rs), list(Ts), list(bounds),
action=decode_crop_fill_im_bytes,
desc=desc, print_progress=True,
**kwargs,
)
ims_bytes, mks_bytes, Ks, Hs, Ws, xs, ys = zip(*out) # is this OK?
ims_bytes, mks_bytes, Ks, Hs, Ws, xs, ys = np.asarray(ims_bytes, dtype=object), np.asarray(mks_bytes, dtype=object), np.asarray(Ks), np.asarray(Hs), np.asarray(Ws), np.asarray(xs), np.asarray(ys)
# ims_bytes = ims_bytes.reshape(sh)
# mks_bytes = mks_bytes.reshape(sh)
Hs = Hs.reshape(sh) # should all be the same?
Ws = Ws.reshape(sh) # should all be the same?
Ks = Ks.reshape(sh + (3, 3)) # should all be the same?
xs = xs.reshape(sh) # should all be the same?
ys = ys.reshape(sh) # should all be the same?
return ims_bytes, mks_bytes, Ks, Hs, Ws, xs, ys
def decode_fill_im_bytes(im_bytes: BytesIO,
mk_bytes: BytesIO,
encode_ext='.jpg',
decode_flag=cv2.IMREAD_UNCHANGED,
jpeg_quality: int = 100,
png_compression: int = 6,
**kwargs):
# im_bytes: a series of jpeg bytes for the image
# mk_bytes: a series of jpeg bytes for the mask
# K: 3, 3 intrinsics matrix
# Use load_image_from_bytes to decode and update jpeg streams
img = load_image_from_bytes(im_bytes, decode_flag=decode_flag) # H, W, 3
msk = load_image_from_bytes(mk_bytes, decode_flag=decode_flag) # H, W, 3
img = (img * (msk / 255)).clip(0, 255).astype(np.uint8) # fill with black, indexing starts at the front
im_bytes = cv2.imencode(encode_ext, img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])[1] # is_sucess, bytes_array
return im_bytes
def decode_fill_ims_bytes(ims_bytes: np.ndarray,
mks_bytes: np.ndarray,
desc="Filling images using mask",
**kwargs):
sh = ims_bytes.shape # V, N
ims_bytes = ims_bytes.reshape((np.prod(sh)))
mks_bytes = mks_bytes.reshape((np.prod(sh)))
# Should we batch these instead of loading?
ims_bytes = parallel_execution(list(ims_bytes), list(mks_bytes),
action=decode_fill_im_bytes,
desc=desc, print_progress=True,
**kwargs,
)
ims_bytes = np.asarray(ims_bytes, dtype=object)
ims_bytes = ims_bytes.reshape(sh)
return ims_bytes
def batch_rodrigues(poses):
""" poses: N x 3
"""
batch_size = poses.shape[0]
angle = np.linalg.norm(poses + 1e-8, axis=1, keepdims=True)
rot_dir = poses / angle
cos = np.cos(angle)[:, None]
sin = np.sin(angle)[:, None]
rx, ry, rz = np.split(rot_dir, 3, axis=1)
zeros = np.zeros([batch_size, 1])
K = np.concatenate([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros],
axis=1)
K = K.reshape([batch_size, 3, 3])
ident = np.eye(3)[None]
rot_mat = ident + sin * K + (1 - cos) * np.matmul(K, K)
return rot_mat.astype(np.float32)
def get_rigid_transformation_and_joints(poses, joints, parents):
"""
poses: n_bones x 3
joints: n_bones x 3
parents: n_bones
"""
n_bones = len(joints)
rot_mats = batch_rodrigues(poses)
# Obtain the relative joints
rel_joints = joints.copy()
rel_joints[1:] -= joints[parents[1:]]
# Create the transformation matrix
# First rotate then transform
transforms_mat = np.concatenate([rot_mats, rel_joints[..., None]], axis=2)
padding = np.zeros([n_bones, 1, 4])
padding[..., 3] = 1
transforms_mat = np.concatenate([transforms_mat, padding], axis=1)
# Rotate each part
# But this is a world transformation, with displacement...?
transform_chain = [transforms_mat[0]]
for i in range(1, parents.shape[0]): # assuming parents are in topological order
curr_res = np.dot(transform_chain[parents[i]], transforms_mat[i]) # THEY'RE RIGHT, LEARN FORWARD KINEMATICS
transform_chain.append(curr_res)
transforms = np.stack(transform_chain, axis=0)
# Obtain the rigid transformation
# AND THIS WEIRD STUFF IS TRYING TO MOVE VERTEX FROM VERTEX COORDINATES TO JOINT COORDINATES
# AND THIS IS THE CORRECT IMPLEMENTATION...
# THIS IS JUST TOO CLEVER...
# These three lines is effectively doing: transforms = transforms * (negative trarslation matrix for all joints)
joints_vector = np.concatenate([joints, np.zeros([n_bones, 1])], axis=1)
rot_joints = np.sum(transforms * joints_vector[:, None], axis=2) # This is effectively matmul
transforms[..., 3] = transforms[..., 3] - rot_joints # add in the translation, we should translate first
joints_points = np.concatenate([joints, np.ones([n_bones, 1])], axis=1)
pose_joints = np.sum(transforms * joints_points[:, None], axis=2) # This is effectively matmul
transforms = transforms.astype(np.float32)
return transforms, pose_joints[:, :3]
def get_rigid_transformation(poses, joints, parents):
"""
poses: n_bones x 3
joints: n_bones x 3
parents: n_bones
"""
transforms = get_rigid_transformation_and_joints(poses, joints, parents)[0]
return transforms
def padding_bbox_HW(bbox, h, w):
padding = 10
bbox[0] = bbox[0] - 10
bbox[1] = bbox[1] + 10
height = bbox[1, 1] - bbox[0, 1]
width = bbox[1, 0] - bbox[0, 0]
# a magic number of pytorch3d
ratio = 1.5
if height / width > ratio:
min_size = int(height / ratio)
if width < min_size:
padding = (min_size - width) // 2
bbox[0, 0] = bbox[0, 0] - padding
bbox[1, 0] = bbox[1, 0] + padding
if width / height > ratio:
min_size = int(width / ratio)
if height < min_size:
padding = (min_size - height) // 2
bbox[0, 1] = bbox[0, 1] - padding
bbox[1, 1] = bbox[1, 1] + padding
bbox[:, 0] = np.clip(bbox[:, 0], a_min=0, a_max=w - 1)
bbox[:, 1] = np.clip(bbox[:, 1], a_min=0, a_max=h - 1)
return bbox
def padding_bbox(bbox, img):
return padding_bbox_HW(bbox, *img.shape[:2])
def get_crop_box(H, W, K, ref_msk):
x, y, w, h = cv2.boundingRect(ref_msk)
bbox = np.array([[x, y], [x + w, y + h]])
bbox = padding_bbox_HW(bbox, H, W)
# revise the intrinsic camera matrix
K = K.copy()
K[0, 2] = K[0, 2] - bbox[0, 0]
K[1, 2] = K[1, 2] - bbox[0, 1]
K = K.astype(np.float32)
return K, bbox
def crop_image_msk(img, msk, K, ref_msk):
x, y, w, h = cv2.boundingRect(ref_msk)
bbox = np.array([[x, y], [x + w, y + h]])
bbox = padding_bbox(bbox, img)
crop = img[bbox[0, 1]:bbox[1, 1], bbox[0, 0]:bbox[1, 0]]
crop_msk = msk[bbox[0, 1]:bbox[1, 1], bbox[0, 0]:bbox[1, 0]]
# calculate the shape
shape = crop.shape
x = 8
height = (crop.shape[0] | (x - 1)) + 1
width = (crop.shape[1] | (x - 1)) + 1
# align image
aligned_image = np.zeros([height, width, 3])
aligned_image[:shape[0], :shape[1]] = crop
aligned_image = aligned_image.astype(np.float32)
# align mask
aligned_msk = np.zeros([height, width])
aligned_msk[:shape[0], :shape[1]] = crop_msk
aligned_msk = (aligned_msk == 1).astype(np.uint8)
# revise the intrinsic camera matrix
K = K.copy()
K[0, 2] = K[0, 2] - bbox[0, 0]
K[1, 2] = K[1, 2] - bbox[0, 1]
K = K.astype(np.float32)
return aligned_image, aligned_msk, K, bbox
def random_crop_image(img, msk, K, min_size, max_size):
# sometimes we sample regions with no valid pixel at all, this can be problematic for the training loop
# there's an assumption that the `msk` is always inside `mask_at_box`
# thus, if we're sampling inside the `msk`, we'll always be getting the correct results
H, W = img.shape[:2]
min_HW = min(H, W)
min_HW = min(min_HW, max_size)
max_size = min_HW
# min_size = int(min(min_size, 0.8 * min_HW))
if max_size < min_size:
H_size = np.random.randint(min_size, max_size)
else:
H_size = min_size
W_size = H_size
x = 8
H_size = (H_size | (x - 1)) + 1
W_size = (W_size | (x - 1)) + 1
# randomly select begin_x and begin_y
coords = np.argwhere(msk == 1)
center_xy = coords[np.random.randint(0, len(coords))][[1, 0]]
min_x, min_y = center_xy[0] - W_size // 2, center_xy[1] - H_size // 2
max_x, max_y = min_x + W_size, min_y + H_size
if min_x < 0:
min_x, max_x = 0, W_size
if max_x > W:
min_x, max_x = W - W_size, W
if min_y < 0:
min_y, max_y = 0, H_size
if max_y > H:
min_y, max_y = H - H_size, H
# crop image and mask
begin_x, begin_y = min_x, min_y
img = img[begin_y:begin_y + H_size, begin_x:begin_x + W_size]
msk = msk[begin_y:begin_y + H_size, begin_x:begin_x + W_size]
# revise the intrinsic camera matrix
K = K.copy()
K[0, 2] = K[0, 2] - begin_x
K[1, 2] = K[1, 2] - begin_y
K = K.astype(np.float32)
return img, msk, K
def get_bound_corners(bounds):
min_x, min_y, min_z = bounds[0]
max_x, max_y, max_z = bounds[1]
corners_3d = np.asarray([
[min_x, min_y, min_z],
[min_x, min_y, max_z],
[min_x, max_y, min_z],
[min_x, max_y, max_z],
[max_x, min_y, min_z],
[max_x, min_y, max_z],
[max_x, max_y, min_z],
[max_x, max_y, max_z],
], dtype=np.float32)
return corners_3d
def get_bound_2d_mask(bounds, K, RT, H, W):
corners_3d = get_bound_corners(bounds)
corners_2d = project(corners_3d, K, RT)
corners_2d = np.round(corners_2d).astype(int)
mask = np.zeros((H, W), dtype=np.uint8)
cv2.fillPoly(mask, [corners_2d[[0, 1, 3, 2, 0]]], 1)
cv2.fillPoly(mask, [corners_2d[[4, 5, 7, 6, 5]]], 1)
cv2.fillPoly(mask, [corners_2d[[0, 1, 5, 4, 0]]], 1)
cv2.fillPoly(mask, [corners_2d[[2, 3, 7, 6, 2]]], 1)
cv2.fillPoly(mask, [corners_2d[[0, 2, 6, 4, 0]]], 1)
cv2.fillPoly(mask, [corners_2d[[1, 3, 7, 5, 1]]], 1)
return mask
def get_bounds(xyz, box_padding=0.05):
min_xyz = np.min(xyz, axis=0)
max_xyz = np.max(xyz, axis=0)
min_xyz -= box_padding
max_xyz += box_padding
bounds = np.stack([min_xyz, max_xyz], axis=0)
bounds = bounds.astype(np.float32)
return bounds
def crop_mask_edge(msk):
msk = msk.copy()
border = 10
kernel = np.ones((border, border), np.uint8)
msk_erode = cv2.erode(msk.copy(), kernel)
msk_dilate = cv2.dilate(msk.copy(), kernel)
msk[(msk_dilate - msk_erode) == 1] = 100
return msk
def adjust_hsv(img, saturation, brightness, contrast):
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
hsv = hsv.astype(np.float32)
hsv[..., 1] = hsv[..., 1] * saturation
hsv[..., 1] = np.minimum(hsv[..., 1], 255)
hsv[..., 2] = hsv[..., 2] * brightness
hsv[..., 2] = np.minimum(hsv[..., 2], 255)
hsv = hsv.astype(np.uint8)
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
img = img.astype(np.float32) * contrast
img = np.minimum(img, 255)
img = img.astype(np.uint8)
return img
|
evocodebench_data_92
|
import math
import torch
from typing import Callable, Tuple
from easyvolcap.utils.console_utils import *
def chunkify(chunk_size=1024,
key='ray_o',
pos=0,
dim=-2,
merge_dims: bool = False,
ignore_mismatch: bool = False, # ignore mismatch in batch dims
print_progress: bool = False,
move_to_cpu: bool = False,
batch_key: str = 'batch',
inds_key: str = 'chunkify_sample',
):
from easyvolcap.utils.data_utils import to_cpu, to_cuda, to_numpy, to_tensor # keep global imports clean
# will fail if dim == -1, currently only tested on dim == -2 or dim == 1
# will select a key element from the argments: either by keyword `key` or position `pos`
# then, depending on whether user wants to merge other dimensions, will select the dim to chunkify according to `dim`
def merge_ret(ret, x: torch.Tensor, sh: torch.Size, nn_dim: int):
# Merge ret list based on reture type (single tensor or dotdict?)
# Return values of chunified function should all be tensors
if len(ret) and isinstance(ret[0], torch.Tensor):
# Stop recursion
ret = torch.cat(ret, dim=nn_dim)
if ignore_mismatch:
ret = ret
else:
ret = ret.view(*sh, *ret.shape[nn_dim + 1:]) if x.shape[nn_dim] == ret.shape[nn_dim] else ret
elif len(ret) and isinstance(ret[0], dict):
dict_type = type(ret[0])
# Start recursion
ret = {k: merge_ret([v[k] for v in ret], x, sh, nn_dim) for k in ret[0].keys()}
ret = dict_type(ret)
elif len(ret) and (isinstance(ret[0], list) or isinstance(ret[0], tuple)):
list_type = type(ret[0])
# Start recursion
ret = [merge_ret([v[i] for v in ret], x, sh, nn_dim) for i in range(len(ret[0]))]
ret = list_type(ret)
else:
raise RuntimeError(f'Unsupported return type to batchify: {type(ret[0])}, or got empty return value')
return ret
def wrapper(decoder: Callable[[torch.Tensor], torch.Tensor]):
def decode(*args, **kwargs):
# Prepare pivot args (find shape information from this arg)
if key in kwargs:
x: torch.Tensor = kwargs[key]
else:
x: torch.Tensor = args[pos]
args = [*args]
sh = x.shape[:dim + 1] # record original shape up until the chunkified dim
nn_dim = len(sh) - 1 # make dim a non-negative number (i.e. -2 to 1?)
# Prepare all tensor arguments by filtering with isinstance
tensor_args = [v for v in args if isinstance(v, torch.Tensor)]
tensor_kwargs = {k: v for k, v in kwargs.items() if isinstance(v, torch.Tensor)}
other_args = [v for v in args if not isinstance(v, torch.Tensor)]
other_kwargs = {k: v for k, v in kwargs.items() if not isinstance(v, torch.Tensor)}
# Merge all dims except first batch dim up until the actual chunkify dimension
if merge_dims:
x = x.view(x.shape[0], -1, *x.shape[nn_dim + 1:])
tensor_args = [v.view(v.shape[0], -1, *v.shape[nn_dim + 1:]) for v in tensor_args]
tensor_kwargs = {k: v.view(v.shape[0], -1, *v.shape[nn_dim + 1:]) for k, v in tensor_kwargs.items()}
nn_dim = 1 # will always be 1 in this situation
# Running the actual batchified forward pass
ret = []
total_size = x.shape[nn_dim]
# We need to update chunk size so that almost all chunk has a decent amount of queries
actual_size = math.ceil(total_size / math.ceil(total_size / chunk_size)) if total_size else chunk_size # this value should be smaller than the actual chunk_size specified
if print_progress: pbar = tqdm(total=total_size, back=3) # log previous frame
for i in range(0, total_size, actual_size):
# nn_dim should be used if there's multiplication involved
chunk_args = [v[(slice(None),) * nn_dim + (slice(i, i + actual_size), )] for v in tensor_args]
chunk_kwargs = {k: v[(slice(None),) * nn_dim + (slice(i, i + actual_size), )] for k, v in tensor_kwargs.items()}
# Other components can use this to perform manual trunking
if batch_key in other_kwargs: other_kwargs[batch_key].meta[inds_key] = [i, i + actual_size]
result = decoder(*chunk_args, *other_args, **chunk_kwargs, **other_kwargs)
result = to_cpu(result, non_blocking=True) if move_to_cpu else result
ret.append(result)
if print_progress: pbar.update(min(i + actual_size, total_size) - i)
if print_progress: pbar.close() # manual close necessary!
if not len(ret):
# Brute-forcely go through the network with empty input
log(f'zero length tensor detected in chunkify, are the camera parameters correct?', 'red')
i = 0
chunk_args = [v[(slice(None),) * nn_dim + (slice(i, i + actual_size), )] for v in tensor_args]
chunk_kwargs = {k: v[(slice(None),) * nn_dim + (slice(i, i + actual_size), )] for k, v in tensor_kwargs.items()}
result = decoder(*chunk_args, *other_args, **chunk_kwargs, **other_kwargs)
result = to_cpu(result, non_blocking=True) if move_to_cpu else result
ret.append(result)
return merge_ret(ret, x, sh, nn_dim)
return decode
return wrapper
def key_cache(key: Callable):
def key_cache_wrapper(func: Callable):
# will only use argument that match the key positiona or name in the args or kwargs collection as lru_cache's key
cached_result = None
cached_hash = None
def func_wrapper(*args, **kwargs):
nonlocal cached_result, cached_hash
key_value = key(*args, **kwargs)
key_hash = hash(key_value)
if key_hash != cached_hash:
cached_result = func(*args, **kwargs)
cached_hash = key_hash
return cached_result
return func_wrapper
return key_cache_wrapper
def batch_aware_indexing(mask: torch.Tensor, metric: torch.Tensor = None, dim=-1) -> Tuple[torch.Tensor, torch.Tensor, int]: # MARK: SYNC
# dim: in terms of the index (mask)
if mask.dtype != torch.bool: mask = mask.bool()
if metric is None: metric = mask.int()
if metric.dtype == torch.bool: metric = metric.int()
# retain all other dimensions (likely batch dimensions)
S = mask.sum(dim=dim).max().item() # the max value of this dim on all other dimension
valid, inds = metric.topk(S, dim=dim, sorted=False) # only find the top (mask = True) values (randomly select other values)
return valid, inds, S
def multi_indexing(indices: torch.Tensor, shape: torch.Size, dim=-2):
# index will first be augmented to match the values' dimentionality at the back
# then we will try to broatcast index's shape to values shape
shape = list(shape)
back_pad = len(shape) - indices.ndim
for _ in range(back_pad): indices = indices.unsqueeze(-1)
expand_shape = shape
expand_shape[dim] = -1
return indices.expand(*expand_shape)
def multi_gather(values: torch.Tensor, indices: torch.Tensor, dim=-2):
# Gather the value at the -2th dim of values, augment index shape on the back
# Example: values: B, P, 3, index: B, N, -> B, N, 3
# index will first be augmented to match the values' dimentionality at the back
# take care of batch dimension of, and acts like a linear indexing in the target dimention
# we assume that the values's second to last dimension is the dimension to be indexed on
return values.gather(dim, multi_indexing(indices, values.shape, dim))
def multi_scatter(target: torch.Tensor, indices: torch.Tensor, values: torch.Tensor, dim=-2):
# backward of multi_gather
return target.scatter(dim, multi_indexing(indices, values.shape, dim), values)
def multi_scatter_(target: torch.Tensor, indices: torch.Tensor, values: torch.Tensor, dim=-2):
# inplace version of multi_scatter
return target.scatter_(dim, multi_indexing(indices, values.shape, dim), values)
def multi_gather_tris(v: torch.Tensor, f: torch.Tensor, dim=-2) -> torch.Tensor:
# compute faces normals w.r.t the vertices (considering batch dimension)
if v.ndim == (f.ndim + 1): f = f[None].expand(v.shape[0], *f.shape)
# assert verts.shape[0] == faces.shape[0]
shape = torch.tensor(v.shape)
remainder = shape.flip(0)[:(len(shape) - dim - 1) % len(shape)]
return multi_gather(v, f.view(*f.shape[:-2], -1), dim=dim).view(*f.shape, *remainder) # B, F, 3, 3
def linear_indexing(indices: torch.Tensor, shape: torch.Size, dim=0):
assert indices.ndim == 1
shape = list(shape)
dim = dim if dim >= 0 else len(shape) + dim
front_pad = dim
back_pad = len(shape) - dim - 1
for _ in range(front_pad): indices = indices.unsqueeze(0)
for _ in range(back_pad): indices = indices.unsqueeze(-1)
expand_shape = shape
expand_shape[dim] = -1
return indices.expand(*expand_shape)
def linear_gather(values: torch.Tensor, indices: torch.Tensor, dim=0):
# only taking linear indices as input
return values.gather(dim, linear_indexing(indices, values.shape, dim))
def linear_scatter(target: torch.Tensor, indices: torch.Tensor, values: torch.Tensor, dim=0):
return target.scatter(dim, linear_indexing(indices, values.shape, dim), values)
def linear_scatter_(target: torch.Tensor, indices: torch.Tensor, values: torch.Tensor, dim=0):
return target.scatter_(dim, linear_indexing(indices, values.shape, dim), values)
def merge01(x: torch.Tensor):
return x.reshape(-1, *x.shape[2:])
def scatter0(target: torch.Tensor, inds: torch.Tensor, value: torch.Tensor):
return target.scatter(0, expand_at_the_back(target, inds), value) # Surface, 3 -> B * S, 3
def gather0(target: torch.Tensor, inds: torch.Tensor):
return target.gather(0, expand_at_the_back(target, inds)) # B * S, 3 -> Surface, 3
def expand_at_the_back(target: torch.Tensor, inds: torch.Tensor):
for _ in range(target.ndim - 1):
inds = inds.unsqueeze(-1)
inds = inds.expand(-1, *target.shape[1:])
return inds
def expand0(x: torch.Tensor, B: int):
return x[None].expand(B, *x.shape)
def expand1(x: torch.Tensor, P: int):
return x[:, None].expand(-1, P, *x.shape[1:])
def nonzero0(condition: torch.Tensor):
# MARK: will cause gpu cpu sync
# return those that are true in the provided tensor
return condition.nonzero(as_tuple=True)[0]
|
evocodebench_data_93
|
import os
import re
import cv2
import h5py
import torch
import struct
import asyncio
import subprocess
import numpy as np
from PIL import Image
from io import BytesIO
from typing import overload
from functools import lru_cache
# from imgaug import augmenters as iaa
from typing import Tuple, Union, List, Dict
from torch.nn import functional as F
from torch.utils.data._utils.pin_memory import pin_memory
from torch.utils.data._utils.collate import default_collate, default_convert
from easyvolcap.utils.parallel_utils import parallel_execution
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.console_utils import *
from enum import Enum, auto
# Copied from enerf (maybe was in turn copied from dtu)
def read_pickle(name):
import pickle
with open(name, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data
def read_cam_file(filename):
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ')
extrinsics = extrinsics.reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ')
intrinsics = intrinsics.reshape((3, 3))
# depth_min & depth_interval: line 11
depth_min = float(lines[11].split()[0])
return intrinsics, extrinsics, depth_min
def read_pmn_cam_file(filename):
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ')
extrinsics = extrinsics.reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ')
intrinsics = intrinsics.reshape((3, 3))
# depth_min & depth_interval: line 11
depth_min = float(lines[11].split()[0])
depth_max = float(lines[11].split()[1])
return intrinsics, extrinsics, depth_min, depth_max
def read_pfm(filename):
file = open(filename, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
def generate_video(result_str: str,
output: str,
fps: int = 30,
crf: int = 17,
cqv: int = 19,
lookahead: int = 20,
hwaccel: str = 'cuda',
preset: str = 'p7',
tag: str = 'hvc1',
vcodec: str = 'hevc_nvenc',
pix_fmt: str = 'yuv420p', # chrome friendly
):
cmd = [
'ffmpeg',
'-hwaccel', hwaccel,
'-hide_banner',
'-loglevel', 'error',
'-framerate', fps,
'-f', 'image2',
'-pattern_type', 'glob',
'-nostdin', # otherwise you cannot chain commands together
'-y',
'-r', fps,
'-i', result_str,
'-c:v', vcodec,
'-preset', preset,
'-cq:v', cqv,
'-rc:v', 'vbr',
'-tag:v', tag,
'-crf', crf,
'-pix_fmt', pix_fmt,
'-rc-lookahead', lookahead,
'-vf', '"pad=ceil(iw/2)*2:ceil(ih/2)*2"', # avoid yuv420p odd number bug
output,
]
run(cmd)
return output
def numpy_to_video(numpy_array: np.ndarray,
output_filename: str,
fps: float = 30.0,
crf: int = 18,
cqv: int = 19,
lookahead: int = 20,
preset='veryslow',
vcodec='libx265',
):
"""
Convert a numpy array (T, H, W, C) to a video using ffmpeg.
Parameters:
- numpy_array: Numpy array to be converted.
- output_filename: The filename of the output video.
- framerate: Frame rate for the video.
"""
if isinstance(numpy_array, np.ndarray):
T, H, W, C = numpy_array.shape
else:
T = len(numpy_array)
H, W, C = numpy_array[0].shape
assert C == 3, "Expected 3 channels!"
cmd = [
'ffmpeg',
'-hwaccel', 'cuda',
'-v', 'quiet', '-stats',
'-y', # Overwrite output file if it exists
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-s', f'{W}x{H}', # Size of one frame
'-pix_fmt', 'rgb24',
'-r', fps, # Frame rate
'-i', '-', # Read from pipe
'-an', # No audio
'-vcodec', vcodec,
'-preset', preset,
'-cq:v', cqv,
'-crf', crf,
'-rc-lookahead', lookahead,
'-rc:v', 'vbr',
'-tag:v', 'hvc1',
output_filename
]
os.makedirs(dirname(output_filename), exist_ok=True)
process = subprocess.Popen(map(str, cmd), stdin=subprocess.PIPE)
# process.communicate(input=numpy_array.tobytes())
for frame in numpy_array:
process.stdin.write(frame.tobytes())
# process.stdin.flush()
process.stdin.close()
process.communicate()
def get_video_dimensions(input_filename):
"""
Extract the width and height of a video using ffprobe.
Parameters:
- input_filename: The filename of the input video.
Returns:
- width and height of the video.
"""
cmd = [
'ffprobe',
'-v', 'error',
'-select_streams', 'v:0',
'-show_entries', 'stream=width,height',
'-of', 'csv=s=x:p=0',
input_filename
]
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, _ = pipe.communicate()
width, height = map(int, out.decode('utf-8').strip().split('x'))
return width, height
def video_to_numpy(input_filename, hwaccel='cuda', vcodec='hevc_cuvid'):
"""
Convert a video file to a numpy array (T, H, W, C) using ffmpeg.
Parameters:
- input_filename: The filename of the input video.
Returns:
- Numpy array representing the video.
"""
W, H = get_video_dimensions(input_filename)
cmd = [
'ffmpeg',
]
if hwaccel != 'none':
cmd += ['-hwaccel', hwaccel,]
cmd += [
'-v', 'quiet', '-stats',
]
if vcodec != 'none':
cmd += ['-vcodec', vcodec,]
cmd += [
'-i', input_filename,
'-f', 'image2pipe',
'-pix_fmt', 'rgb24',
'-vcodec', 'rawvideo',
'-'
]
pipe = subprocess.Popen(map(str, cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=10**8)
raw_data, _ = pipe.communicate()
# Convert the raw data to numpy array and reshape
video_np = np.frombuffer(raw_data, dtype=np.uint8)
H2, W2 = (H + 1) // 2 * 2, (W + 1) // 2 * 2
try:
video_np = video_np.reshape(-1, H2, W2, 3)[:, :H, :W, :]
except ValueError as e:
video_np = video_np.reshape(-1, H, W, 3)
return video_np
class Visualization(Enum):
# Universal visualization
RENDER = auto() # plain rgb render output
SURFACE = auto() # surface position (similar to depth)
DEFORM = auto() # deformation magnitude (as in correspondence?)
DEPTH = auto() # needs a little bit extra computation
ALPHA = auto() # occupancy (rendered volume density)
NORMAL = auto() # needs extra computation
FEATURE = auto() # embedder results
SEMANTIC = auto() # semantic nerf related
SRCINPS = auto() # Souce input images for image based rendering
# jacobian related
JACOBIAN = auto()
# Relighting related
ENVMAP = auto()
ALBEDO = auto()
SHADING = auto()
ROUGHNESS = auto()
# Geometry related output
MESH = auto()
POINT = auto()
VOLUME = auto()
class DataSplit(Enum):
TRAIN = auto()
TEST = auto()
VAL = auto()
def variance_of_laplacian(image: np.ndarray):
if image.ndim == 3 and image.shape[-1] > 1:
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
if image.dtype == np.float32 or image.dtype == np.float64:
image = (image * 255).astype(np.uint8)
return cv2.Laplacian(image, cv2.CV_64F).var()
def alpha2sdf(alpha, beta, dists=0.005):
return beta * np.log(2 * beta * (-np.log(1 - alpha) / dists))
def h5_to_dotdict(h5: h5py.File) -> dotdict:
d = {key: h5_to_dotdict(h5[key]) if isinstance(h5[key], h5py.Group) else h5[key][:] for key in h5.keys()} # loaded as numpy array
d = dotdict(d)
return d
def h5_to_list_of_dotdict(h5: h5py.File) -> list:
return [h5_to_dotdict(h5[key]) for key in tqdm(h5)]
def to_h5py(value, h5: h5py.File, key: str = None, compression: str = 'gzip'):
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
if isinstance(value, np.ndarray):
h5.create_dataset(str(key), data=value, compression=compression)
elif isinstance(value, list):
if key is not None:
h5 = h5.create_group(str(key))
[to_h5py(v, h5, k) for k, v in enumerate(value)]
elif isinstance(value, dict):
if key is not None:
h5 = h5.create_group(str(key))
[to_h5py(v, h5, k) for k, v in value.items()]
else:
raise NotImplementedError(f'unsupported type to write to h5: {type(value)}')
def export_h5(batch: dotdict, filename):
with h5py.File(filename, 'w') as f:
to_h5py(batch, f)
def load_h5(filename):
with h5py.File(filename, 'r') as f:
return h5_to_dotdict(f)
def merge_faces(faces, *args):
# Copied from trimesh, this will select one uv coordinates for a particular vertex
"""
Textured meshes can come with faces referencing vertex
indices (`v`) and an array the same shape which references
vertex texture indices (`vt`) and sometimes even normal (`vn`).
Vertex locations with different values of any of these can't
be considered the "same" vertex, and for our simple data
model we need to not combine these vertices.
Parameters
-------------
faces : (n, d) int
References vertex indices
*args : (n, d) int
Various references of corresponding values
This is usually UV coordinates or normal indexes
maintain_faces : bool
Do not alter original faces and return no-op masks.
Returns
-------------
new_faces : (m, d) int
New faces for masked vertices
mask_v : (p,) int
A mask to apply to vertices
mask_* : (p,) int
A mask to apply to vt array to get matching UV coordinates
Returns as many of these as args were passed
"""
# start with not altering faces at all
result = [faces]
# find the maximum index referenced by faces
max_idx = faces.max()
# add a vertex mask which is just ordered
result.append(np.arange(max_idx + 1))
# now given the order is fixed do our best on the rest of the order
for arg in args:
# create a mask of the attribute-vertex mapping
# note that these might conflict since we're not unmerging
masks = np.zeros((3, max_idx + 1), dtype=np.int64)
# set the mask using the unmodified face indexes
for i, f, a in zip(range(3), faces.T, arg.T):
masks[i][f] = a
# find the most commonly occurring attribute (i.e. UV coordinate)
# and use that index note that this is doing a float conversion
# and then median before converting back to int: could also do this as
# a column diff and sort but this seemed easier and is fast enough
result.append(np.median(masks, axis=0).astype(np.int64))
return result
def get_mesh(verts: torch.Tensor, faces: torch.Tensor, uv: torch.Tensor = None, img: torch.Tensor = None, colors: torch.Tensor = None, normals: torch.Tensor = None, filename: str = "default.ply"):
from trimesh import Trimesh
from trimesh.visual import TextureVisuals
from trimesh.visual.material import PBRMaterial, SimpleMaterial
from easyvolcap.utils.mesh_utils import face_normals, loop_subdivision
verts, faces = to_numpy([verts, faces])
verts = verts.reshape(-1, 3)
faces = faces.reshape(-1, 3)
# MARK: used process=False here to preserve vertex order
mesh = Trimesh(verts, faces, process=False)
if colors is None:
# colors = verts
colors = face_normals(torch.from_numpy(verts), torch.from_numpy(faces).long()) * 0.5 + 0.5
colors = to_numpy(colors)
colors = colors.reshape(-1, 3)
colors = (np.concatenate([colors, np.ones([*colors.shape[:-1], 1])], axis=-1) * 255).astype(np.uint8)
if len(verts) == len(colors):
mesh.visual.vertex_colors = colors
elif len(faces) == len(colors):
mesh.visual.face_colors = colors
if normals is not None:
normals = to_numpy(normals)
mesh.vertex_normals = normals
if uv is not None:
uv = to_numpy(uv)
uv = uv.reshape(-1, 2)
img = to_numpy(img)
img = img.reshape(*img.shape[-3:])
img = Image.fromarray(np.uint8(img * 255))
mat = SimpleMaterial(
image=img,
diffuse=(0.8, 0.8, 0.8),
ambient=(1.0, 1.0, 1.0),
)
mat.name = os.path.splitext(os.path.split(filename)[1])[0]
texture = TextureVisuals(uv=uv, material=mat)
mesh.visual = texture
return mesh
def get_tensor_mesh_data(verts: torch.Tensor, faces: torch.Tensor, uv: torch.Tensor = None, img: torch.Tensor = None, uvfaces: torch.Tensor = None):
# pytorch3d wants a tensor
verts, faces, uv, img, uvfaces = to_tensor([verts, faces, uv, img, uvfaces])
verts = verts.reshape(-1, 3)
faces = faces.reshape(-1, 3)
uv = uv.reshape(-1, 2)
img = img.reshape(img.shape[-3:])
uvfaces = uvfaces.reshape(-1, 3)
# textures = TexturesUV(img, uvfaces, uv)
# meshes = Meshes(verts, faces, textures)
return verts, faces, uv, img, uvfaces
def export_npz(batch: dotdict, filename: struct):
export_dotdict(batch, filename)
def export_dotdict(batch: dotdict, filename: struct):
batch = to_numpy(batch)
np.savez_compressed(filename, **batch)
def load_mesh(filename: str, device='cuda', load_uv=False, load_aux=False, backend='pytorch3d'):
from pytorch3d.io import load_ply, load_obj
if backend == 'trimesh':
import trimesh
mesh: trimesh.Trimesh = trimesh.load(filename)
return mesh.vertices, mesh.faces
vm, fm = None, None
if filename.endswith('.npz'):
mesh = np.load(filename)
v = torch.from_numpy(mesh['verts'])
f = torch.from_numpy(mesh['faces'])
if load_uv:
vm = torch.from_numpy(mesh['uvs'])
fm = torch.from_numpy(mesh['uvfaces'])
else:
if filename.endswith('.ply'):
v, f = load_ply(filename)
elif filename.endswith('.obj'):
v, faces_attr, aux = load_obj(filename)
f = faces_attr.verts_idx
if load_uv:
vm = aux.verts_uvs
fm = faces_attr.textures_idx
else:
raise NotImplementedError(f'Unrecognized input format for: {filename}')
v = v.to(device, non_blocking=True).contiguous()
f = f.to(device, non_blocking=True).contiguous()
if load_uv:
vm = vm.to(device, non_blocking=True).contiguous()
fm = fm.to(device, non_blocking=True).contiguous()
if load_uv:
if load_aux:
return v, f, vm, fm, aux
else:
return v, f, vm, fm
else:
return v, f
def load_pts(filename: str):
from pyntcloud import PyntCloud
cloud = PyntCloud.from_file(filename)
verts = cloud.xyz
if 'red' in cloud.points and 'green' in cloud.points and 'blue' in cloud.points:
r = np.asarray(cloud.points['red'])
g = np.asarray(cloud.points['green'])
b = np.asarray(cloud.points['blue'])
colors = (np.stack([r, g, b], axis=-1) / 255).astype(np.float32)
elif 'r' in cloud.points and 'g' in cloud.points and 'b' in cloud.points:
r = np.asarray(cloud.points['r'])
g = np.asarray(cloud.points['g'])
b = np.asarray(cloud.points['b'])
colors = (np.stack([r, g, b], axis=-1) / 255).astype(np.float32)
else:
colors = None
if 'nx' in cloud.points and 'ny' in cloud.points and 'nz' in cloud.points:
nx = np.asarray(cloud.points['nx'])
ny = np.asarray(cloud.points['ny'])
nz = np.asarray(cloud.points['nz'])
norms = np.stack([nx, ny, nz], axis=-1)
else:
norms = None
# if 'alpha' in cloud.points:
# cloud.points['alpha'] = cloud.points['alpha'] / 255
reserved = ['x', 'y', 'z', 'red', 'green', 'blue', 'r', 'g', 'b', 'nx', 'ny', 'nz']
scalars = dotdict({k: np.asarray(cloud.points[k])[..., None] for k in cloud.points if k not in reserved}) # one extra dimension at the back added
return verts, colors, norms, scalars
def export_pts(pts: torch.Tensor, color: torch.Tensor = None, normal: torch.Tensor = None, scalars: dotdict = dotdict(), filename: str = "default.ply"):
from pandas import DataFrame
from pyntcloud import PyntCloud
data = dotdict()
pts = to_numpy(pts) # always blocking?
pts = pts.reshape(-1, 3)
data.x = pts[:, 0].astype(np.float32)
data.y = pts[:, 1].astype(np.float32)
data.z = pts[:, 2].astype(np.float32)
if color is not None:
color = to_numpy(color)
color = color.reshape(-1, 3)
data.red = (color[:, 0] * 255).astype(np.uint8)
data.green = (color[:, 1] * 255).astype(np.uint8)
data.blue = (color[:, 2] * 255).astype(np.uint8)
else:
data.red = (pts[:, 0] * 255).astype(np.uint8)
data.green = (pts[:, 1] * 255).astype(np.uint8)
data.blue = (pts[:, 2] * 255).astype(np.uint8)
# if 'alpha' in scalars:
# data.alpha = (scalars.alpha * 255).astype(np.uint8)
if normal is not None:
normal = to_numpy(normal)
normal = normal / (np.linalg.norm(normal, axis=-1, keepdims=True) + 1e-13)
normal = normal.reshape(-1, 3)
data.nx = normal[:, 0].astype(np.float32)
data.ny = normal[:, 1].astype(np.float32)
data.nz = normal[:, 2].astype(np.float32)
if scalars is not None:
scalars = to_numpy(scalars)
for k, v in scalars.items():
v = v.reshape(-1, 1)
data[k] = v[:, 0]
df = DataFrame(data)
cloud = PyntCloud(df) # construct the data
dir = dirname(filename)
if dir: os.makedirs(dir, exist_ok=True)
return cloud.to_file(filename)
def export_lines(verts: torch.Tensor, lines: torch.Tensor, color: torch.Tensor = None, filename: str = 'default.ply'):
if color is None:
color = verts
verts, lines, color = to_numpy([verts, lines, color]) # always blocking?
if color.dtype == np.float32:
color = (color * 255).astype(np.uint8)
verts = verts.reshape(-1, 3)
lines = lines.reshape(-1, 2)
color = color.reshape(-1, 3)
# Write to PLY
with open(filename, 'wb') as f:
# PLY header
f.write(b"ply\n")
f.write(b"format binary_little_endian 1.0\n")
f.write(f"element vertex {len(verts)}\n".encode())
f.write(b"property float x\n")
f.write(b"property float y\n")
f.write(b"property float z\n")
f.write(b"property uchar red\n")
f.write(b"property uchar green\n")
f.write(b"property uchar blue\n")
f.write(f"element edge {len(lines)}\n".encode())
f.write(b"property int vertex1\n")
f.write(b"property int vertex2\n")
f.write(b"end_header\n")
# Write vertices and colors
for v, c in zip(verts, color):
f.write(struct.pack('fffBBB', v[0], v[1], v[2], c[0], c[1], c[2]))
# Write lines
for l in lines:
f.write(struct.pack('ii', l[0], l[1]))
def export_camera(c2w: torch.Tensor, ixt: torch.Tensor = None, col: torch.Tensor = torch.tensor([50, 50, 200]), axis_size=0.10, filename: str = 'default.ply'):
verts = []
lines = []
rgbs = []
def add_line(p0: torch.Tensor, p1: torch.Tensor, col: torch.Tensor):
# Add a and b vertices
verts.append(p0) # N, M, 3
verts.append(p1) # N, M, 3
sh = p0.shape[:-1]
# Add the vertex colors
col = torch.broadcast_to(col, sh + (3,))
rgbs.append(col)
rgbs.append(col)
# Add the faces
new = p0.numel() // 3 # number of new elements
curr = new * (len(verts) - 2) # assume all previous elements are of the same size
start = torch.arange(curr, curr + new)
end = torch.arange(curr + new, curr + new * 2)
line = torch.stack([start, end], dim=-1) # NM, 2
line = line.view(sh + (2,))
lines.append(line)
c2w = c2w[..., :3, :]
p = c2w[..., 3] # third row (corresponding to 3rd column)
if ixt is None: aspect = 1.0
else: aspect = ixt[..., 0, 0][..., None] / ixt[..., 1, 1][..., None]
if ixt is None: focal = 1000
else: focal = (ixt[..., 0, 0][..., None] + ixt[..., 1, 1][..., None]) / 2
axis_size = focal * axis_size / 1000
xs = axis_size * aspect
ys = axis_size
zs = axis_size * aspect * 2
a = p + xs * c2w[..., 0] + ys * c2w[..., 1] + zs * c2w[..., 2]
b = p - xs * c2w[..., 0] + ys * c2w[..., 1] + zs * c2w[..., 2]
c = p - xs * c2w[..., 0] - ys * c2w[..., 1] + zs * c2w[..., 2]
d = p + xs * c2w[..., 0] - ys * c2w[..., 1] + zs * c2w[..., 2]
add_line(p, p + axis_size * c2w[..., 0], torch.tensor([255, 64, 64]))
add_line(p, p + axis_size * c2w[..., 1], torch.tensor([64, 255, 64]))
add_line(p, p + axis_size * c2w[..., 2], torch.tensor([64, 64, 255]))
add_line(p, a, col)
add_line(p, b, col)
add_line(p, c, col)
add_line(p, d, col)
add_line(a, b, col)
add_line(b, c, col)
add_line(c, d, col)
add_line(d, a, col)
verts = torch.stack(verts)
lines = torch.stack(lines)
rgbs = torch.stack(rgbs)
export_lines(verts, lines, rgbs, filename=filename)
def export_mesh(verts: torch.Tensor, faces: torch.Tensor, uv: torch.Tensor = None, img: torch.Tensor = None, uvfaces: torch.Tensor = None, colors: torch.Tensor = None, normals: torch.Tensor = None, filename: str = "default.ply", subdivision=0):
if dirname(filename): os.makedirs(dirname(filename), exist_ok=True)
if subdivision > 0:
from easyvolcap.utils.mesh_utils import face_normals, loop_subdivision
verts, faces = loop_subdivision(verts, faces, subdivision)
if filename.endswith('.npz'):
def collect_args(**kwargs): return kwargs
kwargs = collect_args(verts=verts, faces=faces, uv=uv, img=img, uvfaces=uvfaces, colors=colors, normals=normals)
ret = dotdict({k: v for k, v in kwargs.items() if v is not None})
export_dotdict(ret, filename)
elif filename.endswith('.ply') or filename.endswith('.obj'):
if uvfaces is None:
mesh = get_mesh(verts, faces, uv, img, colors, normals, filename)
mesh.export(filename)
else:
from pytorch3d.io import save_obj
verts, faces, uv, img, uvfaces = get_tensor_mesh_data(verts, faces, uv, img, uvfaces)
save_obj(filename, verts, faces, verts_uvs=uv, faces_uvs=uvfaces, texture_map=img)
else:
raise NotImplementedError(f'Unrecognized input format for: {filename}')
def export_pynt_pts_alone(pts, color=None, filename="default.ply"):
import pandas as pd
from pyntcloud import PyntCloud
data = {}
pts = pts if isinstance(pts, np.ndarray) else pts.detach().cpu().numpy()
pts = pts.reshape(-1, 3)
data['x'] = pts[:, 0].astype(np.float32)
data['y'] = pts[:, 1].astype(np.float32)
data['z'] = pts[:, 2].astype(np.float32)
if color is not None:
color = color if isinstance(color, np.ndarray) else color.detach().cpu().numpy()
color = color.reshape(-1, 3)
data['red'] = color[:, 0].astype(np.uint8)
data['green'] = color[:, 1].astype(np.uint8)
data['blue'] = color[:, 2].astype(np.uint8)
else:
data['red'] = (pts[:, 0] * 255).astype(np.uint8)
data['green'] = (pts[:, 1] * 255).astype(np.uint8)
data['blue'] = (pts[:, 2] * 255).astype(np.uint8)
df = pd.DataFrame(data)
cloud = PyntCloud(df) # construct the data
dirname = dirname(filename)
if dirname: os.makedirs(dirname, exist_ok=True)
return cloud.to_file(filename)
def export_o3d_pts(pts: torch.Tensor, filename: str = "default.ply"):
import open3d as o3d
pts = to_numpy(pts)
pts = pts.reshape(-1, 3)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts)
return o3d.io.write_point_cloud(filename, pcd)
def export_o3d_pcd(pts: torch.Tensor, rgb: torch.Tensor, normal: torch.Tensor, filename="default.ply"):
import open3d as o3d
pts, rgb, normal = to_numpy([pts, rgb, normal])
pts = pts.reshape(-1, 3)
rgb = rgb.reshape(-1, 3)
normal = normal.reshape(-1, 3)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts)
pcd.colors = o3d.utility.Vector3dVector(rgb)
pcd.normals = o3d.utility.Vector3dVector(normal)
return o3d.io.write_point_cloud(filename, pcd)
def export_pcd(pts: torch.Tensor, rgb: torch.Tensor, occ: torch.Tensor, filename="default.ply"):
import pandas as pd
from pyntcloud import PyntCloud
pts, rgb, occ = to_numpy([pts, rgb, occ])
pts = pts.reshape(-1, 3)
rgb = rgb.reshape(-1, 3)
occ = occ.reshape(-1, 1)
# MARK: CloudCompare bad, set first to 0, last to 1
for i in range(3):
rgb[0, i] = 0
rgb[-1, i] = 1
occ[0, 0] = 0
occ[-1, 0] = 1
data = dotdict()
data.x = pts[:, 0]
data.y = pts[:, 1]
data.z = pts[:, 2]
# TODO: maybe, for compability, save color as uint?
# currently saving as float number from [0, 1]
data.red = rgb[:, 0]
data.green = rgb[:, 1]
data.blue = rgb[:, 2]
data.alpha = occ[:, 0]
# MARK: We're saving extra scalars for loading in CloudCompare
# can't assign same property to multiple fields
data.r = rgb[:, 0]
data.g = rgb[:, 1]
data.b = rgb[:, 2]
data.a = occ[:, 0]
df = pd.DataFrame(data)
cloud = PyntCloud(df) # construct the data
dirname = dirname(filename)
if dirname: os.makedirs(dirname, exist_ok=True)
return cloud.to_file(filename)
def load_rgb_image(img_path) -> np.ndarray:
# return cv2.imread(img_path, cv2.IMREAD_COLOR)[..., ::-1].copy() # removing the stride (for conversion to tensor)
return cv2.imread(img_path, cv2.IMREAD_COLOR)[..., [2, 1, 0]] # BGR to RGB
def load_unchanged_image(img_path) -> np.ndarray:
return cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
def load_npz(index, folder):
path = os.path.join(folder, f"{index}.npz")
data = np.load(path)
return dotdict({**data})
def load_dotdict(path):
f = np.load(path)
f = dotdict({**f})
return f
def start_save_npz(index, dir, param: dict, remove_batch=True):
return asyncio.create_task(async_save_npz(index, dir, param, remove_batch))
async def async_save_npz(index, dir, param: dict, remove_batch=True):
log(f"Trying to save: {index}")
save_npz(index, dir, param, remove_batch)
def save_img(index, dir, img: torch.Tensor, remove_batch=True, remap=False, flip=False):
img = to_numpy(img)
if remap:
img *= 255
img = img.astype(np.uint8)
if flip:
img = img[..., ::-1]
if remove_batch:
n_batch = img.shape[0]
for b in range(n_batch):
file_path = os.path.join(dir, f"{index*n_batch + b}.png")
im = img[b]
cv2.imwrite(file_path, im)
else:
file_path = os.path.join(dir, f"{index}.png")
cv2.imwrite(file_path, img)
def save_npz(index, dir, param: dict, remove_batch=False):
param = to_numpy(param)
if remove_batch:
n_batch = param[next(iter(param))].shape[0]
for b in range(n_batch):
file_path = os.path.join(dir, f"{index*n_batch + b}.npz")
p = {k: v[b] for k, v in param.items()}
np.savez_compressed(file_path, **p)
else:
file_path = os.path.join(dir, f"{index}.npz")
np.savez_compressed(file_path, **param)
def to_cuda(batch, device="cuda", ignore_list: bool = False) -> torch.Tensor:
if isinstance(batch, (tuple, list)):
batch = [to_cuda(b, device, ignore_list) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: (to_cuda(v, device, ignore_list) if k != "meta" else v) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
batch = batch.to(device, non_blocking=True)
else: # numpy and others
batch = torch.as_tensor(batch, device=device)
return batch
def to_x_if(batch, x: str, cond):
if isinstance(batch, (tuple, list)):
batch = [to_x(b, x) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_x(v, x) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
if cond(x):
batch = batch.to(x, non_blocking=True)
elif isinstance(batch, np.ndarray): # numpy and others
if cond(x):
batch = torch.as_tensor(batch).to(x, non_blocking=True)
else:
pass # do nothing here, used for typed in to_x for methods
# FIXME: Incosistent behavior here, might lead to undebuggable bugs
return batch
def to_x(batch, x: str) -> Union[torch.Tensor, dotdict[str, torch.Tensor]]:
if isinstance(batch, (tuple, list)):
batch = [to_x(b, x) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_x(v, x) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
batch = batch.to(x, non_blocking=True)
elif isinstance(batch, np.ndarray): # numpy and others
batch = torch.as_tensor(batch).to(x, non_blocking=True)
else:
pass # do nothing here, used for typed in to_x for methods
# FIXME: Incosistent behavior here, might lead to undebuggable bugs
return batch
def to_tensor(batch, ignore_list: bool = False) -> Union[torch.Tensor, dotdict[str, torch.Tensor]]:
if isinstance(batch, (tuple, list)) and not ignore_list:
batch = [to_tensor(b, ignore_list) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_tensor(v, ignore_list) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
pass
else: # numpy and others
batch = torch.as_tensor(batch)
return batch
def to_list(batch, non_blocking=False) -> Union[List, Dict, np.ndarray]: # almost always exporting, should block
if isinstance(batch, (tuple, list)):
batch = [to_list(b, non_blocking) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_list(v, non_blocking) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
batch = batch.detach().to('cpu', non_blocking=non_blocking).numpy().tolist()
elif isinstance(batch, torch.Tensor):
batch = batch.tolist()
else: # others, keep as is
pass
return batch
def to_cpu(batch, non_blocking=False, ignore_list: bool = False) -> torch.Tensor:
if isinstance(batch, (tuple, list)) and not ignore_list:
batch = [to_cpu(b, non_blocking, ignore_list) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_cpu(v, non_blocking, ignore_list) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
batch = batch.detach().to('cpu', non_blocking=non_blocking)
else: # numpy and others
batch = torch.as_tensor(batch, device="cpu")
return batch
def to_numpy(batch, non_blocking=False, ignore_list: bool = False) -> Union[List, Dict, np.ndarray]: # almost always exporting, should block
if isinstance(batch, (tuple, list)) and not ignore_list:
batch = [to_numpy(b, non_blocking, ignore_list) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: to_numpy(v, non_blocking, ignore_list) for k, v in batch.items()})
elif isinstance(batch, torch.Tensor):
batch = batch.detach().to('cpu', non_blocking=non_blocking).numpy()
else: # numpy and others
batch = np.asarray(batch)
return batch
def remove_batch(batch) -> Union[torch.Tensor, np.ndarray]:
if isinstance(batch, (tuple, list)):
batch = [remove_batch(b) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: remove_batch(v) for k, v in batch.items()})
elif isinstance(batch, (torch.Tensor, np.ndarray)): # numpy and others
batch = batch[0]
else:
batch = torch.as_tensor(batch)[0]
return batch
def add_batch(batch) -> Union[torch.Tensor, np.ndarray]:
if isinstance(batch, (tuple, list)):
batch = [add_batch(b) for b in batch]
elif isinstance(batch, dict):
batch = dotdict({k: add_batch(v) for k, v in batch.items()})
elif isinstance(batch, (torch.Tensor, np.ndarray)): # numpy and others
batch = batch[None]
else:
batch = torch.as_tensor(batch)[None]
return batch
def add_iter(batch, iter, total) -> Union[torch.Tensor, np.ndarray]:
batch = add_scalar(batch, iter, name="iter")
batch = add_scalar(batch, iter / total, name="frac")
return batch # training fraction and current iteration
def add_scalar(batch, value, name) -> Union[torch.Tensor, np.ndarray]:
if isinstance(batch, (tuple, list)):
for b in batch:
add_scalar(b, value, name)
if isinstance(batch, dict):
batch[name] = torch.tensor(value)
batch['meta'][name] = torch.tensor(value)
return batch
def get_voxel_grid_and_update_bounds(voxel_size: Union[List, np.ndarray], bounds: Union[List, np.ndarray]):
# now here's the problem
# 1. if you want the voxel size to be accurate, you bounds need to be changed along with this sampling process
# since the F.grid_sample will treat the bounds based on align_corners=True or not
# say we align corners, the actual bound on the sampled tpose blend weight should be determined by the actual sampling voxels
# not the bound that we kind of used to produce the voxels, THEY DO NOT LINE UP UNLESS your bounds is divisible by the voxel size in every direction
# TODO: is it possible to somehow get rid of this book-keeping step
if isinstance(voxel_size, List):
voxel_size = np.array(voxel_size)
bounds = np.array(bounds)
# voxel_size: [0.005, 0.005, 0.005]
# bounds: n_batch, 2, 3, initial bounds
x = np.arange(bounds[0, 0], bounds[1, 0] + voxel_size[0] / 2, voxel_size[0])
y = np.arange(bounds[0, 1], bounds[1, 1] + voxel_size[1] / 2, voxel_size[1])
z = np.arange(bounds[0, 2], bounds[1, 2] + voxel_size[2] / 2, voxel_size[2])
pts = np.stack(np.meshgrid(x, y, z, indexing='ij'), axis=-1).astype(np.float32)
bounds = np.stack([pts[0, 0, 0], pts[-1, -1, -1]], axis=0).astype(np.float32)
return pts, bounds
def get_rigid_transform(pose: np.ndarray, joints: np.ndarray, parents: np.ndarray):
# pose: N, 3
# joints: N, 3
# parents: N
from easyvolcap.utils.blend_utils import get_rigid_transform_nobatch as net_get_rigid_transform
pose, joints, parents = default_convert([pose, joints, parents])
J, A = net_get_rigid_transform(pose, joints, parents)
J, A = to_numpy([J, A])
return J, A
def get_bounds(xyz, padding=0.05):
min_xyz = np.min(xyz, axis=0)
max_xyz = np.max(xyz, axis=0)
min_xyz -= padding
max_xyz += padding
bounds = np.stack([min_xyz, max_xyz], axis=0)
return bounds
def load_image_file(img_path: str, ratio=1.0):
if img_path.endswith('.jpg') or img_path.endswith('.JPG') or img_path.endswith('.jpeg') or img_path.endswith('.JPEG'):
im = Image.open(img_path)
w, h = im.width, im.height
draft = im.draft('RGB', (int(w * ratio), int(h * ratio)))
img = np.asarray(im)
if np.issubdtype(img.dtype, np.integer):
img = img.astype(np.float32) / np.iinfo(img.dtype).max # normalize
if ratio != 1.0 and \
draft is None or \
draft is not None and \
(draft[1][2] != int(w * ratio) or
draft[1][3] != int(h * ratio)):
img = cv2.resize(img, (int(w * ratio), int(h * ratio)), interpolation=cv2.INTER_AREA)
if img.ndim == 2: # MARK: cv.resize will discard the last dimension of mask images
img = img[..., None]
return img
else:
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if img.ndim >= 3 and img.shape[-1] >= 3:
img[..., :3] = img[..., [2, 1, 0]] # BGR to RGB
if np.issubdtype(img.dtype, np.integer):
img = img.astype(np.float32) / np.iinfo(img.dtype).max # normalize
if ratio != 1.0:
height, width = img.shape[:2]
img = cv2.resize(img, (int(width * ratio), int(height * ratio)), interpolation=cv2.INTER_AREA)
if img.ndim == 2: # MARK: cv.resize will discard the last dimension of mask images
img = img[..., None]
return img
def load_depth(depth_file: str):
if depth_file.endswith('.npy'):
depth = np.load(depth_file)[..., None] # H, W, 1
elif depth_file.endswith('.pfm'):
depth, scale = read_pfm(depth_file)
depth = depth / scale
if depth.ndim == 2:
depth = depth[..., None] # H, W, 1
depth = depth[..., :1]
elif depth_file.endswith('.hdr') or depth_file.endswith('.exr'):
if depth_file.endswith('.exr'):
# ... https://github.com/opencv/opencv/issues/21326
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
depth = load_image(depth_file)
depth = depth[..., :1]
else:
raise NotImplementedError
return depth # H, W, 1
def load_image(path: Union[str, np.ndarray], ratio: int = 1.0):
if isinstance(path, str):
return load_image_file(path, ratio)
elif isinstance(path, np.ndarray):
return load_image_from_bytes(path, ratio)
else:
raise NotImplementedError('Supported overloading')
def load_unchanged(img_path: str, ratio=1.0):
if img_path.endswith('.jpg') or img_path.endswith('.JPG') or img_path.endswith('.jpeg') or img_path.endswith('.JPEG'):
im = Image.open(img_path)
w, h = im.width, im.height
draft = im.draft('RGB', (int(w * ratio), int(h * ratio)))
img = np.asarray(im).copy() # avoid writing error and already in RGB instead of BGR
if ratio != 1.0 and \
draft is None or \
draft is not None and \
(draft[1][2] != int(w * ratio) or \
draft[1][3] != int(h * ratio)):
img = cv2.resize(img, (int(w * ratio), int(h * ratio)), interpolation=cv2.INTER_AREA)
if img.ndim == 2: # MARK: cv.resize will discard the last dimension of mask images
img = img[..., None]
return img
else:
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if img.shape[-1] >= 3:
img[..., :3] = img[..., [2, 1, 0]]
if ratio != 1.0:
height, width = img.shape[:2]
img = cv2.resize(img, (int(width * ratio), int(height * ratio)), interpolation=cv2.INTER_AREA)
if img.ndim == 2: # MARK: cv.resize will discard the last dimension of mask images
img = img[..., None]
return img
def load_mask(msk_path: str, ratio=1.0):
"""
Load single-channel binary mask
"""
if msk_path.endswith('.jpg') or msk_path.endswith('.JPG') or msk_path.endswith('.jpeg') or msk_path.endswith('.JPEG'):
msk = Image.open(msk_path)
w, h = msk.width, msk.height
draft = msk.draft('L', (int(w * ratio), int(h * ratio)))
msk = np.asarray(msk).astype(int) # read the actual file content from drafted disk
msk = msk * 255 / msk.max() # if max already 255, do nothing
msk = msk[..., None] > 128 # make it binary
msk = msk.astype(np.uint8)
if ratio != 1.0 and \
draft is None or \
draft is not None and \
(draft[1][2] != int(w * ratio) or
draft[1][3] != int(h * ratio)):
msk = cv2.resize(msk.astype(np.uint8), (int(w * ratio), int(h * ratio)), interpolation=cv2.INTER_NEAREST)[..., None]
return msk
else:
msk = cv2.imread(msk_path, cv2.IMREAD_GRAYSCALE).astype(int) # BGR to GRAY
msk = msk * 255 / msk.max() # if max already 255, do nothing
msk = msk[..., None] > 128 # make it binary
msk = msk.astype(np.uint8)
if ratio != 1.0:
height, width = msk.shape[:2]
msk = cv2.resize(msk.astype(np.uint8), (int(width * ratio), int(height * ratio)), interpolation=cv2.INTER_NEAREST)[..., None]
# WTF: https://stackoverflow.com/questions/68502581/image-channel-missing-after-resizing-image-with-opencv
return msk
def save_unchanged(img_path: str, img: np.ndarray, quality=100, compression=6):
if img.shape[-1] >= 3:
img[..., :3] = img[..., [2, 1, 0]]
if img_path.endswith('.hdr'):
return cv2.imwrite(img_path, img) # nothing to say about hdr
if dirname(img_path):
os.makedirs(dirname(img_path), exist_ok=True)
return cv2.imwrite(img_path, img, [cv2.IMWRITE_JPEG_QUALITY, quality, cv2.IMWRITE_PNG_COMPRESSION, compression])
def save_image(img_path: str, img: np.ndarray, jpeg_quality=75, png_compression=9, save_dtype=np.uint8):
if isinstance(img, torch.Tensor): img = img.detach().cpu().numpy() # convert to numpy arrays
if img.ndim == 4: img = np.concatenate(img, axis=0) # merge into one image along y axis
if img.ndim == 2: img = img[..., None] # append last dim
if img.shape[0] < img.shape[-1] and (img.shape[0] == 3 or img.shape[0] == 4): img = np.transpose(img, (1, 2, 0))
if np.issubdtype(img.dtype, np.integer):
img = img / np.iinfo(img.dtype).max # to float
if img.shape[-1] >= 3:
if not img.flags['WRITEABLE']:
img = img.copy() # avoid assignment only inputs
img[..., :3] = img[..., [2, 1, 0]]
if dirname(img_path):
os.makedirs(dirname(img_path), exist_ok=True)
if img_path.endswith('.png'):
max = np.iinfo(save_dtype).max
img = (img * max).clip(0, max).astype(save_dtype)
elif img_path.endswith('.jpg'):
img = img[..., :3] # only color
img = (img * 255).clip(0, 255).astype(np.uint8)
elif img_path.endswith('.hdr'):
img = img[..., :3] # only color
elif img_path.endswith('.exr'):
# ... https://github.com/opencv/opencv/issues/21326
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
else:
# should we try to discard alpha channel here?
# exr could store alpha channel
pass # no transformation for other unspecified file formats
# log(f'Writing image to: {img_path}')
# breakpoint()
return cv2.imwrite(img_path, img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality,
cv2.IMWRITE_PNG_COMPRESSION, png_compression,
cv2.IMWRITE_EXR_COMPRESSION, cv2.IMWRITE_EXR_COMPRESSION_PIZ])
def save_mask(msk_path: str, msk: np.ndarray, quality=75, compression=9):
if dirname(msk_path):
os.makedirs(dirname(msk_path), exist_ok=True)
if msk.ndim == 2:
msk = msk[..., None]
return cv2.imwrite(msk_path, msk[..., 0] * 255, [cv2.IMWRITE_JPEG_QUALITY, quality,
cv2.IMWRITE_PNG_COMPRESSION, compression,
cv2.IMWRITE_EXR_COMPRESSION, cv2.IMWRITE_EXR_COMPRESSION_PIZ])
def list_to_numpy(x: list): return np.stack(x).transpose(0, 3, 1, 2)
def numpy_to_list(x: np.ndarray): return [y for y in x.transpose(0, 2, 3, 1)]
def list_to_tensor(x: list, device='cuda'): return torch.from_numpy(list_to_numpy(x)).to(device, non_blocking=True) # convert list of numpy arrays of HWC to BCHW
def tensor_to_list(x: torch.Tensor): return numpy_to_list(x.detach().cpu().numpy()) # convert tensor of BCHW to list of numpy arrays of HWC
def project(xyz, K, RT):
"""
xyz: [N, 3]
K: [3, 3]
RT: [3, 4]
"""
xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T
xyz = np.dot(xyz, K.T)
xy = xyz[:, :2] / xyz[:, 2:]
return xy
def unproject(depth, K, R, T):
H, W = depth.shape
i, j = np.meshgrid(np.arange(W, dtype=np.float32),
np.arange(H, dtype=np.float32),
indexing='xy')
xy1 = np.stack([i, j, np.ones_like(i)], axis=2)
xyz = xy1 * depth[..., None]
pts3d = np.dot(xyz, np.linalg.inv(K).T)
pts3d = np.dot(pts3d - T.ravel(), R)
return pts3d
def read_mask_by_img_path(data_root: str, img_path: str, erode_dilate_edge: bool = False, mask: str = '') -> np.ndarray:
def read_mask_file(path):
msk = load_mask(path).astype(np.uint8)
if len(msk.shape) == 3:
msk = msk[..., 0]
return msk
if mask:
msk_path = os.path.join(data_root, img_path.replace('images', mask))
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', mask)) + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', mask))[:-4] + '.png'
if not os.path.exists(msk_path):
log(f'warning: defined mask path {msk_path} does not exist', 'yellow')
else:
msk_path = os.path.join(data_root, 'mask', img_path)[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, 'mask', img_path)[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, 'mask_cihp', img_path)[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', 'merged_mask'))[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', 'rvm'))[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', 'rvm'))[:-4] + '.jpg'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', 'mask'))[:-4] + '.png'
if not os.path.exists(msk_path): # background matte v2
msk_path = os.path.join(data_root, img_path.replace('images', 'bgmt'))[:-4] + '.png'
if not os.path.exists(msk_path):
msk_path = os.path.join(data_root, img_path.replace('images', 'mask'))[:-4] + '.jpg'
if not os.path.exists(msk_path):
log(f'cannot find mask file: {msk_path}, using all ones', 'yellow')
img = load_unchanged_image(os.path.join(data_root, img_path))
msk = np.ones_like(img[:, :, 0]).astype(np.uint8)
return msk
msk = read_mask_file(msk_path)
# erode edge inconsistence when evaluating and training
if erode_dilate_edge: # eroding edge on matte might erode the actual human
msk = fill_mask_edge_with(msk)
return msk
def fill_mask_edge_with(msk, border=5, value=100):
msk = msk.copy()
kernel = np.ones((border, border), np.uint8)
msk_erode = cv2.erode(msk.copy(), kernel)
msk_dilate = cv2.dilate(msk.copy(), kernel)
msk[(msk_dilate - msk_erode) == 1] = value
return msk
def get_rays_within_bounds_rendering(H, W, K, R, T, bounds):
ray_o, ray_d = get_rays(H, W, K, R, T)
ray_o = ray_o.reshape(-1, 3).astype(np.float32)
ray_d = ray_d.reshape(-1, 3).astype(np.float32)
near, far, mask_at_box = get_full_near_far(bounds, ray_o, ray_d)
near = near.reshape(H, W)
far = far.reshape(H, W)
ray_o = ray_o.reshape(H, W, 3)
ray_d = ray_d.reshape(H, W, 3)
mask_at_box = mask_at_box.reshape(H, W)
return ray_o, ray_d, near, far, mask_at_box
def get_rays(H, W, K, R, T):
# # calculate the camera origin
# ray_o = -np.dot(R.T, T).ravel()
# # calculate the world coodinates of pixels
# i, j = np.meshgrid(np.arange(H, dtype=np.float32),
# np.arange(W, dtype=np.float32),
# indexing='ij') # 0.5 indicates pixel center
# i = i + 0.5
# j = j + 0.5
# # 0->H, 0->W
# xy1 = np.stack([j, i, np.ones_like(i)], axis=2)
# if subpixel:
# rand = np.random.rand(H, W, 2) - 0.5
# xy1[:, :, :2] += rand
# pixel_camera = np.dot(xy1, np.linalg.inv(K).T)
# pixel_world = np.dot(pixel_camera - T.ravel(), R)
# # calculate the ray direction
# ray_d = pixel_world - ray_o[None, None]
# ray_d = ray_d / np.linalg.norm(ray_d, axis=2, keepdims=True)
# ray_o = np.broadcast_to(ray_o, ray_d.shape)
# return ray_o, ray_d
from easyvolcap.utils.ray_utils import get_rays
K, R, T = to_tensor([K, R, T])
ray_o, ray_d = get_rays(H, W, K, R, T)
ray_o, ray_d = to_numpy([ray_o, ray_d])
return ray_o, ray_d
def get_near_far(bounds, ray_o, ray_d) -> Tuple[np.ndarray, np.ndarray]:
# """
# calculate intersections with 3d bounding box
# return: near, far (indexed by mask_at_box (bounding box mask))
# """
# near, far, mask_at_box = get_full_near_far(bounds, ray_o, ray_d)
# norm_d = np.linalg.norm(ray_d, axis=-1, keepdims=True)
# near = near[mask_at_box] / norm_d[mask_at_box, 0]
# far = far[mask_at_box] / norm_d[mask_at_box, 0]
# return near, far, mask_at_box
from easyvolcap.utils.ray_utils import get_near_far_aabb
bounds, ray_o, ray_d = to_tensor([bounds, ray_o, ray_d]) # no copy
near, far = get_near_far_aabb(bounds, ray_o, ray_d)
near, far = to_numpy([near, far])
return near, far
def get_full_near_far(bounds, ray_o, ray_d):
"""calculate intersections with 3d bounding box"""
norm_d = np.linalg.norm(ray_d, axis=-1, keepdims=True)
viewdir = ray_d / norm_d
viewdir[(viewdir < 1e-5) & (viewdir > -1e-10)] = 1e-5
viewdir[(viewdir > -1e-5) & (viewdir < 1e-10)] = -1e-5
tmin = (bounds[:1] - ray_o[:1]) / viewdir
tmax = (bounds[1:2] - ray_o[:1]) / viewdir
t1 = np.minimum(tmin, tmax)
t2 = np.maximum(tmin, tmax)
near = np.max(t1, axis=-1)
far = np.min(t2, axis=-1)
mask_at_box = near < far
near = near / norm_d[..., 0]
far = far / norm_d[..., 0]
return near, far, mask_at_box
def full_sample_ray(img, msk, K, R, T, bounds, split='train', subpixel=False):
H, W = img.shape[:2]
ray_o, ray_d = get_rays(H, W, K, R, T, subpixel)
near, far, mask_at_box = get_full_near_far(bounds, ray_o, ray_d)
msk = msk * mask_at_box
coords = np.argwhere(np.ones_like(mask_at_box)) # every pixel
ray_o = ray_o[coords[:, 0], coords[:, 1]].astype(np.float32)
ray_d = ray_d[coords[:, 0], coords[:, 1]].astype(np.float32)
near = near[coords[:, 0], coords[:, 1]].astype(np.float32)
far = far[coords[:, 0], coords[:, 1]].astype(np.float32)
rgb = img[coords[:, 0], coords[:, 1]].astype(np.float32)
return rgb, ray_o, ray_d, near, far, coords, mask_at_box
def affine_inverse(m: np.ndarray):
import torch
from easyvolcap.utils.math_utils import affine_inverse
return affine_inverse(torch.from_numpy(m)).numpy()
def load_image_from_bytes(buffer: np.ndarray, ratio=1.0, normalize=False, decode_flag=cv2.IMREAD_UNCHANGED):
# from nvjpeg import NvJpeg
# if not hasattr(load_image_from_bytes, 'nj'):
# load_image_from_bytes.nj = NvJpeg()
# nj: NvJpeg = load_image_from_bytes.nj
def normalize_image(image):
image = torch.from_numpy(image) # pytorch is significantly faster than np
if image.ndim >= 3 and image.shape[-1] >= 3:
image[..., :3] = image[..., [2, 1, 0]]
image = image / torch.iinfo(image.dtype).max
image = image.float()
return image.numpy()
if isinstance(buffer, BytesIO):
buffer = buffer.getvalue() # slow? copy?
if isinstance(buffer, memoryview) or isinstance(buffer, bytes):
buffer = np.frombuffer(buffer, np.uint8)
if isinstance(buffer, torch.Tensor):
buffer = buffer.numpy()
buffer = buffer.astype(np.uint8)
image: np.ndarray = cv2.imdecode(buffer, decode_flag) # MARK: 10-15ms
# image: np.ndarray = nj.decode(np.frombuffer(buffer, np.uint8)) # MARK: 10-15ms
# if decode_flag == cv2.IMREAD_GRAYSCALE:
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if image.ndim == 2:
image = image[..., None]
if normalize:
image = normalize_image(image) # MARK: 3ms
height, width = image.shape[:2]
if ratio != 1.0:
image = cv2.resize(image, (int(width * ratio), int(height * ratio)), interpolation=cv2.INTER_AREA)
return image
def as_torch_func(func):
def wrapper(*args, **kwargs):
args = to_numpy(args)
kwargs = to_numpy(kwargs)
ret = func(*args, **kwargs)
return to_tensor(ret)
return wrapper
def as_numpy_func(func):
def wrapper(*args, **kwargs):
args = to_tensor(args)
kwargs = to_tensor(kwargs)
ret = func(*args, **kwargs)
return to_numpy(ret)
return wrapper
def load_image_bytes(im: str):
if im.endswith('.exr'):
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
with open(im, "rb") as fh:
buffer = fh.read()
return buffer
class UnstructuredTensors(torch.Tensor):
# https://github.com/pytorch/pytorch/issues/13246#issuecomment-617140519
# https://github.com/pytorch/pytorch/issues/69893
@staticmethod
def __new__(cls, bytes: Union[List[np.ndarray], List[torch.Tensor], np.ndarray], **kwargs):
"""
Creates a new UnstructuredTensors object from the given bytes.
Args:
- bytes (Union[List[np.ndarray], List[torch.Tensor], np.ndarray]): The bytes to create the object from.
Returns:
- self (UnstructuredTensors): The new UnstructuredTensors object.
"""
if isinstance(bytes, UnstructuredTensors):
return bytes
# Prepare the bytes array
if isinstance(bytes, np.ndarray):
bytes = [b for b in bytes]
if bytes[0].dtype == object:
bytes = [b.astype(np.uint8) for b in bytes]
bytes = to_tensor(bytes) # now, every element is a list
dtype = torch.uint8
if len(bytes):
dtype = bytes[0].dtype
# Create an empty tensor
self = torch.Tensor.__new__(cls).to(dtype)
# Remember accessing related configs
self.set_(torch.cat(bytes)) # flatten # sum(N)
self.lengths = torch.as_tensor([len(b) for b in bytes], dtype=torch.int32) # N,
self.cumsums = torch.cat([torch.as_tensor([0]), torch.cumsum(self.lengths, dim=0)[:-1]])
return self
@property
def is_unstructured(self): return hasattr(self, 'lengths')
def __getitem__(self, index: int):
"""
Returns a slice of the UnstructuredTensors object corresponding to the given index.
Args:
index (int): The index of the slice to return.
Returns:
torch.Tensor: A slice of the UnstructuredTensors object corresponding to the given index.
This function returns a slice of the UnstructuredTensors object corresponding to the given index. The slice is obtained by using the cumulative sums and lengths of the underlying bytes array to determine the start and end indices of the slice. If the index is out of range, the function returns the corresponding element of the underlying bytes array. This function is used to implement the indexing behavior of the UnstructuredTensors object, allowing it to be treated like a regular tensor.
"""
if self.is_unstructured:
return torch.Tensor.__getitem__(self, slice(self.cumsums[index], self.cumsums[index] + self.lengths[index]))
else:
return super().__getitem__(index)
def __len__(self):
if self.is_unstructured:
return len(self.lengths)
else:
return super().__len__()
def clone(self, *args, **kwargs):
if self.is_unstructured:
return UnstructuredTensors([self[i] for i in range(len(self.lengths))]) # manual cloning with copy and reconstruction
else:
return super().clone(*args, **kwargs)
def load_ims_bytes_from_disk(ims: np.ndarray, desc="Loading image bytes from disk"):
sh = ims.shape
ims = ims.ravel()
ims_bytes = parallel_execution(list(ims), action=load_image_bytes, desc=desc, print_progress=True)
ims_bytes = np.asarray(ims_bytes).reshape(sh) # reorganize shapes
return ims_bytes
def load_resize_undist_im_bytes(imp: str,
K: np.ndarray,
D: np.ndarray,
ratio: Union[float, List[int]] = 1.0,
center_crop_size: List[int] = [-1, -1],
encode_ext='.jpg',
decode_flag=cv2.IMREAD_UNCHANGED,
dist_opt_K: bool = False,
jpeg_quality: int = 100,
png_compression: int = 6
):
# Load image -> resize -> undistort -> save to bytes (jpeg)
img = load_image_from_bytes(load_image_bytes(imp), decode_flag=decode_flag)[..., :3] # cv2 decoding (fast)
oH, oW = img.shape[:2]
if dist_opt_K:
newCameraMatrix, _ = cv2.getOptimalNewCameraMatrix(K, D, (oW, oH), 0, (oW, oH))
img = cv2.undistort(img, K, D, newCameraMatrix=newCameraMatrix)
K = newCameraMatrix
else:
img = cv2.undistort(img, K, D)
# Maybe update image size
if not ((isinstance(ratio, float) and ratio == 1.0)):
if isinstance(ratio, float):
H, W = int(oH * ratio), int(oW * ratio)
else:
H, W = ratio # ratio is actually the target image size
rH, rW = H / oH, W / oW
K = K.copy()
K[0:1] = K[0:1] * rW # K[0, 0] *= rW
K[1:2] = K[1:2] * rH # K[1, 1] *= rH
img = cv2.resize(img, (W, H), interpolation=cv2.INTER_AREA) # H, W, 3, uint8
# Crop the image and intrinsic matrix if specified
if center_crop_size[0] > 0:
img, K, H, W = center_crop_img_ixt(img, K, H, W, center_crop_size)
is_success, buffer = cv2.imencode(encode_ext, img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])
if 'H' not in locals(): H, W = oH, oW
return buffer, K, H, W
def center_crop_img_ixt(img: np.ndarray, K: np.ndarray, H: int, W: int,
center_crop_size: Union[int, List[int]]):
# Parse the original size and the target crop size
oH, oW = H, W
if isinstance(center_crop_size, int): cH, cW = center_crop_size, center_crop_size
else: cH, cW = center_crop_size
# Compute left and right crop size for height and width respectively
hlc, wlc = int((oH - cH) * 0.5), int((oW - cW) * 0.5)
hrc, wrc = oH - cH - hlc, oW - cW - wlc
# Crop the image
if hlc != 0: img = img[hlc:-hrc, :]
if wlc != 0: img = img[:, wlc:-wrc]
# Crop the intrinsic matrix
if hlc != 0: K[1, 2] -= hlc
if wlc != 0: K[0, 2] -= wlc
return img, K, cH, cW
def load_resize_undist_ims_bytes(ims: np.ndarray,
Ks: np.ndarray,
Ds: np.ndarray,
ratio: Union[float, List[int], List[float]] = 1.0,
center_crop_size: List[int] = [-1, -1],
desc="Loading image bytes from disk",
**kwargs):
sh = ims.shape # V, N
# Ks = np.broadcast_to(Ks[:, None], (sh + (3, 3)))
# Ds = np.broadcast_to(Ds[:, None], (sh + (1, 5)))
ims = ims.reshape((np.prod(sh)))
# from easyvolcap.utils.dist_utils import get_rank
# if not get_rank(): __import__('easyvolcap.utils.console_utils', fromlist=['debugger']).debugger()
# else:
# while 1: pass
Ks = Ks.reshape((np.prod(sh), 3, 3))
Ds = Ds.reshape((np.prod(sh), 1, 5))
ims = list(ims)
Ks = list(Ks)
Ds = list(Ds) # only convert outer most dim to list
if isinstance(ratio, list) and len(ratio) and isinstance(ratio[0], float):
ratio = np.broadcast_to(np.asarray(ratio)[:, None], sh) # V, N
ratio = ratio.reshape((np.prod(sh)))
ratio = list(ratio)
elif isinstance(ratio, list):
ratio = np.asarray(ratio) # avoid expansion in parallel execution
if isinstance(center_crop_size, list):
center_crop_size = np.asarray(center_crop_size) # avoid expansion
# Should we batch these instead of loading?
out = parallel_execution(ims, Ks, Ds, ratio, center_crop_size,
action=load_resize_undist_im_bytes,
desc=desc, print_progress=True,
**kwargs,
)
ims_bytes, Ks, Hs, Ws = zip(*out) # is this OK?
ims_bytes, Ks, Hs, Ws = np.asarray(ims_bytes, dtype=object), np.asarray(Ks), np.asarray(Hs), np.asarray(Ws)
# ims_bytes = ims_bytes.reshape(sh) # numpy array of bytesio
Hs = Hs.reshape(sh) # should all be the same?
Ws = Ws.reshape(sh) # should all be the same?
Ks = Ks.reshape(sh + (3, 3)) # should all be the same?
return ims_bytes, Ks, Hs, Ws
def decode_crop_fill_im_bytes(im_bytes: BytesIO,
mk_bytes: BytesIO,
K: np.ndarray,
R: np.ndarray,
T: np.ndarray,
bounds: np.ndarray,
encode_ext=['.jpg', '.jpg'],
decode_flag=cv2.IMREAD_UNCHANGED,
jpeg_quality: int = 100,
png_compression: int = 6,
**kwargs):
# im_bytes: a series of jpeg bytes for the image
# mk_bytes: a series of jpeg bytes for the mask
# K: 3, 3 intrinsics matrix
# Use load_image_from_bytes to decode and update jpeg streams
img = load_image_from_bytes(im_bytes, decode_flag=decode_flag) # H, W, 3
msk = load_image_from_bytes(mk_bytes, decode_flag=decode_flag) # H, W, 3
# Crop both mask and the image using bbox's 2D projection
H, W, _ = img.shape
from easyvolcap.utils.bound_utils import get_bound_2d_bound
bx, by, bw, bh = as_numpy_func(get_bound_2d_bound)(bounds, K, R, T, H, W)
img = img[by:by + bh, bx:bx + bw]
msk = msk[by:by + bh, bx:bx + bw]
# Crop the image using the bounding rect of the mask
mx, my, mw, mh = cv2.boundingRect((msk > 128).astype(np.uint8)) # array data type = 0 is not supported
img = img[my:my + mh, mx:mx + mw]
msk = msk[my:my + mh, mx:mx + mw]
# Update the final size and intrinsics
x, y, w, h = bx + mx, by + my, mw, mh # w and h will always be the smaller one, xy will be accumulated
K[0, 2] -= x
K[1, 2] -= y
# Fill the image with black (premultiply by mask)
img = (img * (msk / 255)).clip(0, 255).astype(np.uint8) # fill with black, indexing starts at the front
# Reencode the videos and masks
if isinstance(encode_ext, str): encode_ext = [encode_ext] * 2 # '.jpg' -> ['.jpg', '.jpg']
im_bytes = cv2.imencode(encode_ext[0], img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])[1] # is_sucess, bytes_array
mk_bytes = cv2.imencode(encode_ext[1], msk, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])[1] # is_sucess, bytes_array
return im_bytes, mk_bytes, K, h, w, x, y
def decode_crop_fill_ims_bytes(ims_bytes: np.ndarray, mks_bytes: np.ndarray, Ks: np.ndarray, Rs: np.ndarray, Ts: np.ndarray, bounds: np.ndarray,
desc="Cropping images using mask", **kwargs):
sh = Ks.shape[:2] # V, N
# ims_bytes = ims_bytes.reshape((np.prod(sh)))
# mks_bytes = mks_bytes.reshape((np.prod(sh)))
Ks = Ks.reshape((np.prod(sh), 3, 3))
Rs = Rs.reshape((np.prod(sh), 3, 3))
Ts = Ts.reshape((np.prod(sh), 3, 1))
bounds = bounds.reshape((np.prod(sh), 2, 3))
# Should we batch these instead of loading?
out = parallel_execution(list(ims_bytes), list(mks_bytes), list(Ks), list(Rs), list(Ts), list(bounds),
action=decode_crop_fill_im_bytes,
desc=desc, print_progress=True,
**kwargs,
)
ims_bytes, mks_bytes, Ks, Hs, Ws, xs, ys = zip(*out) # is this OK?
ims_bytes, mks_bytes, Ks, Hs, Ws, xs, ys = np.asarray(ims_bytes, dtype=object), np.asarray(mks_bytes, dtype=object), np.asarray(Ks), np.asarray(Hs), np.asarray(Ws), np.asarray(xs), np.asarray(ys)
# ims_bytes = ims_bytes.reshape(sh)
# mks_bytes = mks_bytes.reshape(sh)
Hs = Hs.reshape(sh) # should all be the same?
Ws = Ws.reshape(sh) # should all be the same?
Ks = Ks.reshape(sh + (3, 3)) # should all be the same?
xs = xs.reshape(sh) # should all be the same?
ys = ys.reshape(sh) # should all be the same?
return ims_bytes, mks_bytes, Ks, Hs, Ws, xs, ys
def decode_fill_im_bytes(im_bytes: BytesIO,
mk_bytes: BytesIO,
encode_ext='.jpg',
decode_flag=cv2.IMREAD_UNCHANGED,
jpeg_quality: int = 100,
png_compression: int = 6,
**kwargs):
# im_bytes: a series of jpeg bytes for the image
# mk_bytes: a series of jpeg bytes for the mask
# K: 3, 3 intrinsics matrix
# Use load_image_from_bytes to decode and update jpeg streams
img = load_image_from_bytes(im_bytes, decode_flag=decode_flag) # H, W, 3
msk = load_image_from_bytes(mk_bytes, decode_flag=decode_flag) # H, W, 3
img = (img * (msk / 255)).clip(0, 255).astype(np.uint8) # fill with black, indexing starts at the front
im_bytes = cv2.imencode(encode_ext, img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])[1] # is_sucess, bytes_array
return im_bytes
def decode_fill_ims_bytes(ims_bytes: np.ndarray,
mks_bytes: np.ndarray,
desc="Filling images using mask",
**kwargs):
sh = ims_bytes.shape # V, N
ims_bytes = ims_bytes.reshape((np.prod(sh)))
mks_bytes = mks_bytes.reshape((np.prod(sh)))
# Should we batch these instead of loading?
ims_bytes = parallel_execution(list(ims_bytes), list(mks_bytes),
action=decode_fill_im_bytes,
desc=desc, print_progress=True,
**kwargs,
)
ims_bytes = np.asarray(ims_bytes, dtype=object)
ims_bytes = ims_bytes.reshape(sh)
return ims_bytes
def batch_rodrigues(poses):
""" poses: N x 3
"""
batch_size = poses.shape[0]
angle = np.linalg.norm(poses + 1e-8, axis=1, keepdims=True)
rot_dir = poses / angle
cos = np.cos(angle)[:, None]
sin = np.sin(angle)[:, None]
rx, ry, rz = np.split(rot_dir, 3, axis=1)
zeros = np.zeros([batch_size, 1])
K = np.concatenate([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros],
axis=1)
K = K.reshape([batch_size, 3, 3])
ident = np.eye(3)[None]
rot_mat = ident + sin * K + (1 - cos) * np.matmul(K, K)
return rot_mat.astype(np.float32)
def get_rigid_transformation_and_joints(poses, joints, parents):
"""
poses: n_bones x 3
joints: n_bones x 3
parents: n_bones
"""
n_bones = len(joints)
rot_mats = batch_rodrigues(poses)
# Obtain the relative joints
rel_joints = joints.copy()
rel_joints[1:] -= joints[parents[1:]]
# Create the transformation matrix
# First rotate then transform
transforms_mat = np.concatenate([rot_mats, rel_joints[..., None]], axis=2)
padding = np.zeros([n_bones, 1, 4])
padding[..., 3] = 1
transforms_mat = np.concatenate([transforms_mat, padding], axis=1)
# Rotate each part
# But this is a world transformation, with displacement...?
transform_chain = [transforms_mat[0]]
for i in range(1, parents.shape[0]): # assuming parents are in topological order
curr_res = np.dot(transform_chain[parents[i]], transforms_mat[i]) # THEY'RE RIGHT, LEARN FORWARD KINEMATICS
transform_chain.append(curr_res)
transforms = np.stack(transform_chain, axis=0)
# Obtain the rigid transformation
# AND THIS WEIRD STUFF IS TRYING TO MOVE VERTEX FROM VERTEX COORDINATES TO JOINT COORDINATES
# AND THIS IS THE CORRECT IMPLEMENTATION...
# THIS IS JUST TOO CLEVER...
# These three lines is effectively doing: transforms = transforms * (negative trarslation matrix for all joints)
joints_vector = np.concatenate([joints, np.zeros([n_bones, 1])], axis=1)
rot_joints = np.sum(transforms * joints_vector[:, None], axis=2) # This is effectively matmul
transforms[..., 3] = transforms[..., 3] - rot_joints # add in the translation, we should translate first
joints_points = np.concatenate([joints, np.ones([n_bones, 1])], axis=1)
pose_joints = np.sum(transforms * joints_points[:, None], axis=2) # This is effectively matmul
transforms = transforms.astype(np.float32)
return transforms, pose_joints[:, :3]
def get_rigid_transformation(poses, joints, parents):
"""
poses: n_bones x 3
joints: n_bones x 3
parents: n_bones
"""
transforms = get_rigid_transformation_and_joints(poses, joints, parents)[0]
return transforms
def padding_bbox_HW(bbox, h, w):
padding = 10
bbox[0] = bbox[0] - 10
bbox[1] = bbox[1] + 10
height = bbox[1, 1] - bbox[0, 1]
width = bbox[1, 0] - bbox[0, 0]
# a magic number of pytorch3d
ratio = 1.5
if height / width > ratio:
min_size = int(height / ratio)
if width < min_size:
padding = (min_size - width) // 2
bbox[0, 0] = bbox[0, 0] - padding
bbox[1, 0] = bbox[1, 0] + padding
if width / height > ratio:
min_size = int(width / ratio)
if height < min_size:
padding = (min_size - height) // 2
bbox[0, 1] = bbox[0, 1] - padding
bbox[1, 1] = bbox[1, 1] + padding
bbox[:, 0] = np.clip(bbox[:, 0], a_min=0, a_max=w - 1)
bbox[:, 1] = np.clip(bbox[:, 1], a_min=0, a_max=h - 1)
return bbox
def padding_bbox(bbox, img):
return padding_bbox_HW(bbox, *img.shape[:2])
def get_crop_box(H, W, K, ref_msk):
x, y, w, h = cv2.boundingRect(ref_msk)
bbox = np.array([[x, y], [x + w, y + h]])
bbox = padding_bbox_HW(bbox, H, W)
# revise the intrinsic camera matrix
K = K.copy()
K[0, 2] = K[0, 2] - bbox[0, 0]
K[1, 2] = K[1, 2] - bbox[0, 1]
K = K.astype(np.float32)
return K, bbox
def crop_image_msk(img, msk, K, ref_msk):
x, y, w, h = cv2.boundingRect(ref_msk)
bbox = np.array([[x, y], [x + w, y + h]])
bbox = padding_bbox(bbox, img)
crop = img[bbox[0, 1]:bbox[1, 1], bbox[0, 0]:bbox[1, 0]]
crop_msk = msk[bbox[0, 1]:bbox[1, 1], bbox[0, 0]:bbox[1, 0]]
# calculate the shape
shape = crop.shape
x = 8
height = (crop.shape[0] | (x - 1)) + 1
width = (crop.shape[1] | (x - 1)) + 1
# align image
aligned_image = np.zeros([height, width, 3])
aligned_image[:shape[0], :shape[1]] = crop
aligned_image = aligned_image.astype(np.float32)
# align mask
aligned_msk = np.zeros([height, width])
aligned_msk[:shape[0], :shape[1]] = crop_msk
aligned_msk = (aligned_msk == 1).astype(np.uint8)
# revise the intrinsic camera matrix
K = K.copy()
K[0, 2] = K[0, 2] - bbox[0, 0]
K[1, 2] = K[1, 2] - bbox[0, 1]
K = K.astype(np.float32)
return aligned_image, aligned_msk, K, bbox
def random_crop_image(img, msk, K, min_size, max_size):
# sometimes we sample regions with no valid pixel at all, this can be problematic for the training loop
# there's an assumption that the `msk` is always inside `mask_at_box`
# thus, if we're sampling inside the `msk`, we'll always be getting the correct results
H, W = img.shape[:2]
min_HW = min(H, W)
min_HW = min(min_HW, max_size)
max_size = min_HW
# min_size = int(min(min_size, 0.8 * min_HW))
if max_size < min_size:
H_size = np.random.randint(min_size, max_size)
else:
H_size = min_size
W_size = H_size
x = 8
H_size = (H_size | (x - 1)) + 1
W_size = (W_size | (x - 1)) + 1
# randomly select begin_x and begin_y
coords = np.argwhere(msk == 1)
center_xy = coords[np.random.randint(0, len(coords))][[1, 0]]
min_x, min_y = center_xy[0] - W_size // 2, center_xy[1] - H_size // 2
max_x, max_y = min_x + W_size, min_y + H_size
if min_x < 0:
min_x, max_x = 0, W_size
if max_x > W:
min_x, max_x = W - W_size, W
if min_y < 0:
min_y, max_y = 0, H_size
if max_y > H:
min_y, max_y = H - H_size, H
# crop image and mask
begin_x, begin_y = min_x, min_y
img = img[begin_y:begin_y + H_size, begin_x:begin_x + W_size]
msk = msk[begin_y:begin_y + H_size, begin_x:begin_x + W_size]
# revise the intrinsic camera matrix
K = K.copy()
K[0, 2] = K[0, 2] - begin_x
K[1, 2] = K[1, 2] - begin_y
K = K.astype(np.float32)
return img, msk, K
def get_bound_corners(bounds):
min_x, min_y, min_z = bounds[0]
max_x, max_y, max_z = bounds[1]
corners_3d = np.asarray([
[min_x, min_y, min_z],
[min_x, min_y, max_z],
[min_x, max_y, min_z],
[min_x, max_y, max_z],
[max_x, min_y, min_z],
[max_x, min_y, max_z],
[max_x, max_y, min_z],
[max_x, max_y, max_z],
], dtype=np.float32)
return corners_3d
def get_bound_2d_mask(bounds, K, RT, H, W):
corners_3d = get_bound_corners(bounds)
corners_2d = project(corners_3d, K, RT)
corners_2d = np.round(corners_2d).astype(int)
mask = np.zeros((H, W), dtype=np.uint8)
cv2.fillPoly(mask, [corners_2d[[0, 1, 3, 2, 0]]], 1)
cv2.fillPoly(mask, [corners_2d[[4, 5, 7, 6, 5]]], 1)
cv2.fillPoly(mask, [corners_2d[[0, 1, 5, 4, 0]]], 1)
cv2.fillPoly(mask, [corners_2d[[2, 3, 7, 6, 2]]], 1)
cv2.fillPoly(mask, [corners_2d[[0, 2, 6, 4, 0]]], 1)
cv2.fillPoly(mask, [corners_2d[[1, 3, 7, 5, 1]]], 1)
return mask
def get_bounds(xyz, box_padding=0.05):
min_xyz = np.min(xyz, axis=0)
max_xyz = np.max(xyz, axis=0)
min_xyz -= box_padding
max_xyz += box_padding
bounds = np.stack([min_xyz, max_xyz], axis=0)
bounds = bounds.astype(np.float32)
return bounds
def crop_mask_edge(msk):
msk = msk.copy()
border = 10
kernel = np.ones((border, border), np.uint8)
msk_erode = cv2.erode(msk.copy(), kernel)
msk_dilate = cv2.dilate(msk.copy(), kernel)
msk[(msk_dilate - msk_erode) == 1] = 100
return msk
def adjust_hsv(img, saturation, brightness, contrast):
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
hsv = hsv.astype(np.float32)
hsv[..., 1] = hsv[..., 1] * saturation
hsv[..., 1] = np.minimum(hsv[..., 1], 255)
hsv[..., 2] = hsv[..., 2] * brightness
hsv[..., 2] = np.minimum(hsv[..., 2], 255)
hsv = hsv.astype(np.uint8)
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
img = img.astype(np.float32) * contrast
img = np.minimum(img, 255)
img = img.astype(np.uint8)
return img
|
evocodebench_data_94
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from imgui_bundle import imgui
from easyvolcap.runners.volumetric_video_viewer import VolumetricVideoViewer
import glm
import torch
import numpy as np
from os.path import join
from scipy import interpolate
from copy import copy, deepcopy
from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.easy_utils import read_camera, write_camera
from easyvolcap.utils.math_utils import normalize, affine_inverse
from easyvolcap.utils.data_utils import to_numpy, to_tensor, to_cuda, to_list
from easyvolcap.utils.cam_utils import gen_cubic_spline_interp_func, gen_linear_interp_func
def debug_project(proj: mat4, a: vec3, aa: "imgui.ImVec2"):
# proj: 4, 4
# a: 3,
a: vec4 = proj @ vec4(a, 1.0) # 4, 4 @ 4 = 4
if a.w <= 0.02: # depth should be positive to save upfront
return False
else:
aa.x, aa.y = a.x / a.w, a.y / a.w
return True
def add_debug_line(proj: mat4, a: vec3, b: vec3, col: np.uint32 = 0xffffffff, thickness: float = 2.0):
# proj: world to screen transformation matrix: 4, 4
# a: 3,
# b: 3,
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import col2imu32
draw_list: imgui.ImDrawList = imgui.get_background_draw_list()
aa, bb = imgui.ImVec2(), imgui.ImVec2()
if debug_project(proj, a, aa) and debug_project(proj, b, bb):
draw_list.add_line(aa, bb, col2imu32(col), thickness)
def add_debug_text(proj: mat4, a: vec3, text: str, col: np.uint32 = 0xffffffff):
# proj: world to screen transformation matrix: 4, 4
# a: 3,
# text: str
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import col2imu32
draw_list: imgui.ImDrawList = imgui.get_background_draw_list()
aa = imgui.ImVec2()
if debug_project(proj, a, aa):
draw_list.add_text(aa, col2imu32(col), text)
def add_debug_text_2d(aa: "imgui.ImVec2", text: str, col: np.uint32 = 0xff4040ff):
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import col2imu32
draw_list: imgui.ImDrawList = imgui.get_background_draw_list()
draw_list.add_text(aa, col2imu32(col), text)
def visualize_axes(proj: mat4, a: vec3, b: vec3, thickness=3.0, name: str = None): # bounds in world coordinates
add_debug_text(proj, vec3(b.x + 0.025, a.y, a.z + 0.045), 'x', 0xccccccff)
add_debug_text(proj, vec3(a.x, b.y + 0.025, a.z + 0.045), 'y', 0xccccccff)
add_debug_text(proj, vec3(a.x, a.y, b.z + 0.025 + 0.045), 'z', 0xccccccff)
add_debug_line(proj, vec3(a.x, a.y, a.z), vec3(b.x, a.y, a.z), 0xff4040ff, thickness=thickness)
add_debug_line(proj, vec3(a.x, a.y, a.z), vec3(a.x, b.y, a.z), 0x40ff40ff, thickness=thickness)
add_debug_line(proj, vec3(a.x, a.y, a.z), vec3(a.x, a.y, b.z), 0x4040ffff, thickness=thickness)
if name is not None: add_debug_text(proj, a + vec3(0.045), str(name), 0xccccccff) # maybe mark the cameras
def visualize_cube(proj: mat4, a: vec3, b: vec3, thickness=3.0, name: str = None): # bounds in world coordinates
add_debug_line(proj, vec3(a.x, a.y, a.z), vec3(b.x, a.y, a.z), 0xff4040ff, thickness=thickness) # X
add_debug_line(proj, vec3(a.x, b.y, a.z), vec3(b.x, b.y, a.z), 0xffffffff, thickness=thickness)
add_debug_line(proj, vec3(a.x, a.y, b.z), vec3(b.x, a.y, b.z), 0xffffffff, thickness=thickness)
add_debug_line(proj, vec3(a.x, b.y, b.z), vec3(b.x, b.y, b.z), 0xffffffff, thickness=thickness)
add_debug_line(proj, vec3(a.x, a.y, a.z), vec3(a.x, b.y, a.z), 0x40ff40ff, thickness=thickness) # Y
add_debug_line(proj, vec3(b.x, a.y, a.z), vec3(b.x, b.y, a.z), 0xffffffff, thickness=thickness)
add_debug_line(proj, vec3(a.x, a.y, b.z), vec3(a.x, b.y, b.z), 0xffffffff, thickness=thickness)
add_debug_line(proj, vec3(b.x, a.y, b.z), vec3(b.x, b.y, b.z), 0xffffffff, thickness=thickness)
add_debug_line(proj, vec3(a.x, a.y, a.z), vec3(a.x, a.y, b.z), 0x4040ffff, thickness=thickness) # Z
add_debug_line(proj, vec3(b.x, a.y, a.z), vec3(b.x, a.y, b.z), 0xffffffff, thickness=thickness)
add_debug_line(proj, vec3(a.x, b.y, a.z), vec3(a.x, b.y, b.z), 0xffffffff, thickness=thickness)
add_debug_line(proj, vec3(b.x, b.y, a.z), vec3(b.x, b.y, b.z), 0xffffffff, thickness=thickness)
if name is not None: add_debug_text(proj, a + vec3(0.045), str(name), 0xffcccccc) # maybe mark the cameras
def visualize_cameras(proj: mat4, ixt: mat3, c2w: mat4x3, axis_size: float = 0.10, col: np.uint32 = 0x80ffffff, thickness: float = 2.0, name: str = None):
p = c2w[3] # third row (corresponding to 3rd column)
focal = (ixt[0, 0] + ixt[1, 1]) / 2
axis_size = focal * axis_size / 1000
aspect = ixt[0, 0] / ixt[1, 1]
xs = axis_size * aspect
ys = axis_size
zs = axis_size * aspect * 2
a = p + xs * c2w[0] + ys * c2w[1] + zs * c2w[2]
b = p - xs * c2w[0] + ys * c2w[1] + zs * c2w[2]
c = p - xs * c2w[0] - ys * c2w[1] + zs * c2w[2]
d = p + xs * c2w[0] - ys * c2w[1] + zs * c2w[2]
add_debug_line(proj, p, a, col, thickness)
add_debug_line(proj, p, b, col, thickness)
add_debug_line(proj, p, c, col, thickness)
add_debug_line(proj, p, d, col, thickness)
add_debug_line(proj, a, b, col, thickness)
add_debug_line(proj, b, c, col, thickness)
add_debug_line(proj, c, d, col, thickness)
add_debug_line(proj, d, a, col, thickness)
add_debug_line(proj, p, p + axis_size * c2w[0], 0xff4040ff, thickness)
add_debug_line(proj, p, p + axis_size * c2w[1], 0x40ff40ff, thickness)
add_debug_line(proj, p, p + axis_size * c2w[2], 0x4040ffff, thickness)
if name is not None: add_debug_text(proj, p, str(name), 0xccccccff) # maybe mark the cameras
class CameraPath:
# This is the Model in the EVC gui designs
# Basic a list of cameras with interpolations
# Use the underlying Camera class as backbone
# Will export to a sequence of cameras extri.yml and intri.yml
# Will support keyframes based manipulations:
# 1. Adding current view as key frame
# 2. Jumping to previous keyframe (resize window as well?) (toggle edit state?) (or just has a replace button)
# - Snap to current keyframe?
# - Editing would be much better (just a button to replace the selected keyframe)
# 3. Toggle playing animation of this keyframe (supports some degress of control)
# 4. Export the animation as a pair of extri.yml and intri.yml
# 5. imguizmo control of the created camera in the list (translation, rotation etc)
def __init__(self,
playing: bool = False,
playing_time: float = 0.5,
playing_speed: float = 0.0005,
n_render_views: int = 100,
render_plots: bool = True,
# Visualization related
visible: bool = True,
name: str = 'camera_path',
filename: str = '',
plot_thickness: float = 8.0,
camera_thickness: float = 6.0,
plot_color: int = 0x80ff80ff,
camera_color: int = 0x80ffffff,
camera_axis_size: float = 0.10,
**kwargs,
) -> None:
self.keyframes: List[Camera] = [] # orders matter
self.playing_time = playing_time # range: 0-1
self.playing = playing # is this playing? update cam if it is
self.playing_speed = playing_speed # faster interpolation time
self.n_render_views = n_render_views
self.render_plots = render_plots
# Private
self.cursor_index = -1 # the camera to edit
self.periodic = True
# Visualization
self.name = name
self.visible = visible
self.plot_thickness = plot_thickness
self.camera_thickness = camera_thickness
self.plot_color = plot_color
self.camera_color = camera_color
self.camera_axis_size = camera_axis_size
if filename:
self.load_keyframes(filename)
def __len__(self):
return len(self.keyframes)
@property
def loop_interp(self):
return self.periodic
@loop_interp.setter
def loop_interp(self, v: bool):
changed = self.periodic != v
self.periodic = v
if changed: self.update() # only perform heavy operation after change
@property
def selected(self):
return self.cursor_index
@selected.setter
def selected(self, v: int):
if v >= len(self): return
if not len(self): self.cursor_index = -1; return
self.cursor_index = range(len(self))[v]
denom = (len(self) - 1)
if denom: self.playing_time = self.cursor_index / denom # 1 means last frame
else: self.playing_time = 0.5
def replace(self, camera: Camera):
self.keyframes[self.selected] = deepcopy(camera)
self.update()
def insert(self, camera: Camera):
self.keyframes = self.keyframes[:self.selected + 1] + [deepcopy(camera)] + self.keyframes[self.selected + 1:]
self.selected = self.selected + 1
self.update()
def delete(self, index: int):
del self.keyframes[index]
self.selected = self.selected - 1 # go back one
self.update()
def clear(self):
self.keyframes.clear()
self.selected = -1
def update(self):
# MARK: HEAVY
K = len(self.keyframes)
if K <= 3: return
# Prepare for linear and extrinsic parameters
ks = np.asarray([c.K.to_list() for c in self.keyframes]).transpose(0, 2, 1).reshape(K, -1) # 9
hs = np.asarray([c.H for c in self.keyframes]).reshape(K, -1)
ws = np.asarray([c.W for c in self.keyframes]).reshape(K, -1)
ns = np.asarray([c.n for c in self.keyframes]).reshape(K, -1)
fs = np.asarray([c.f for c in self.keyframes]).reshape(K, -1)
ts = np.asarray([c.t for c in self.keyframes]).reshape(K, -1)
vs = np.asarray([c.v for c in self.keyframes]).reshape(K, -1)
bs = np.asarray([c.bounds.to_list() for c in self.keyframes]).reshape(K, -1) # 6
lins = np.concatenate([ks, hs, ws, ns, fs, ts, vs, bs], axis=-1) # K, D
c2ws = np.asarray([c.c2w.to_list() for c in self.keyframes]).transpose(0, 2, 1) # K, 3, 4
# Recompute interpolation parameters
self.lin_func = gen_linear_interp_func(lins, smoothing_term=0.0 if self.periodic else 10.0) # smoothness: 0 -> period, >0 -> non-period, -1 orbit (not here)
self.c2w_func = gen_cubic_spline_interp_func(c2ws, smoothing_term=0.0 if self.periodic else 10.0)
def interp(self, us: float, **kwargs):
K = len(self.keyframes)
if K <= 3: return
# MARK: HEAVY?
# Actual interpolation
lin = self.lin_func(us)
c2w = self.c2w_func(us)
# Extract linear parameters
K = torch.as_tensor(lin[:9]).view(3, 3) # need a transpose
H = int(lin[9])
W = int(lin[10])
n = torch.as_tensor(lin[11], dtype=torch.float)
f = torch.as_tensor(lin[12], dtype=torch.float)
t = torch.as_tensor(lin[13], dtype=torch.float)
v = torch.as_tensor(lin[14], dtype=torch.float)
bounds = torch.as_tensor(lin[15:]).view(2, 3) # no need for transpose
# Extract splined parameters
w2c = affine_inverse(torch.as_tensor(c2w)) # already float32
R = w2c[:3, :3]
T = w2c[:3, 3:]
return H, W, K, R, T, n, f, t, v, bounds
def export_keyframes(self, path: str):
# Store keyframes to path
cameras = {f'{i:06d}': k.to_easymocap() for i, k in enumerate(self.keyframes)}
write_camera(cameras, path) # without extri.yml, only dirname
log(yellow(f'Keyframes saved to: {blue(path)}'))
def load_keyframes(self, path: str):
# Store keyframes to path
cameras = read_camera(join(path, 'intri.yml'), join(path, 'extri.yml'))
cameras = dotdict({k: cameras[k] for k in sorted(cameras.keys())}) # assuming dict is ordered (python 3.7+)
self.keyframes = [Camera().from_easymocap(cam) for cam in cameras.values()]
self.name = path
self.update()
def export_interps(self, path: str):
# Store interpolations (animation) to path
us = np.linspace(0, 1, self.n_render_views, dtype=np.float32)
cameras = dotdict()
for i, u in enumerate(tqdm(us, desc='Exporting interpolated cameras')):
cameras[f'{i:06d}'] = self.interp(u).to_easymocap()
write_camera(cameras, path) # without extri.yml, only dirname
log(yellow(f'Interpolated cameras saved to: {blue(path)}'))
def render_imgui(self, viewer: 'VolumetricVideoViewer', batch: dotdict):
# from easyvolcap.utils.gl_utils import Mesh
# Mesh.render_imgui(self, viewer, batch)
from imgui_bundle import imgui
from easyvolcap.utils.imgui_utils import push_button_color, pop_button_color, col2rgba, col2vec4, vec42col, list2col, col2imu32
i = batch.i
will_delete = batch.will_delete
slider_width = batch.slider_width
imgui.push_item_width(slider_width * 0.5)
self.name = imgui.input_text(f'Mesh name##{i}', self.name)[1]
self.n_render_views = imgui.slider_int(f'Plot samples##{i}', self.n_render_views, 0, 3000)[1]
self.plot_thickness = imgui.slider_float(f'Plot thickness##{i}', self.plot_thickness, 0.01, 10.0)[1]
self.camera_thickness = imgui.slider_float(f'Camera thickness##{i}', self.camera_thickness, 0.01, 10.0)[1]
self.camera_axis_size = imgui.slider_float(f'Camera axis size##{i}', self.camera_axis_size, 0.01, 1.0)[1]
self.plot_color = list2col(imgui.color_edit4(f'Plot color##{i}', col2vec4(self.plot_color), flags=imgui.ColorEditFlags_.no_inputs.value)[1])
self.camera_color = list2col(imgui.color_edit4(f'Camera color##{i}', col2vec4(self.camera_color), flags=imgui.ColorEditFlags_.no_inputs.value)[1])
push_button_color(0x55cc33ff if not self.render_plots else 0x8855aaff)
if imgui.button(f'No Plot##{i}' if not self.render_plots else f' Plot ##{i}'):
self.render_plots = not self.render_plots
pop_button_color()
imgui.same_line()
push_button_color(0x55cc33ff if not self.visible else 0x8855aaff)
if imgui.button(f'Show##{i}' if not self.visible else f'Hide##{i}'):
self.visible = not self.visible
pop_button_color()
# Render the delete button
imgui.same_line()
push_button_color(0xff5533ff)
if imgui.button(f'Delete##{i}'):
will_delete.append(i)
pop_button_color()
# The actual rendering
self.draw(viewer.camera)
def draw(self, camera: Camera):
# The actual rendering starts here, the camera paths are considered GUI elements for eaiser management
# This rendering pattern is extremly slow and hard on the CPU, but whatever for now, just visualization
if not self.visible: return
if not len(self): return
proj = camera.w2p # 3, 4
# Render cameras
for i, cam in enumerate(self.keyframes):
ixt = cam.ixt
c2w = cam.c2w
c2w = mat4x3(c2w) # vis cam only supports this
# Add to imgui rendering list
visualize_cameras(proj, ixt, c2w, col=self.camera_color, thickness=self.camera_thickness, axis_size=self.camera_axis_size)
if self.render_plots and len(self) >= 4:
us = np.linspace(0, 1, self.n_render_views, dtype=np.float32)
c2ws = self.c2w_func(us)
cs = c2ws[..., :3, 3] # N, 3
for i, c in enumerate(cs):
if i == 0:
p = c # previous
continue
add_debug_line(proj, vec3(*p), vec3(*c), col=self.plot_color, thickness=self.plot_thickness)
p = c
def render(self, camera: Camera):
pass
class Camera:
# Helper class to manage camera parameters
def __init__(self,
H: int = 512,
W: int = 512,
K: torch.Tensor = torch.tensor([[512.0, 0.0, 256], [0.0, 512.0, 256.0], [0.0, 0.0, 1.0]]), # intrinsics
R: torch.Tensor = torch.tensor([[-0.9977766275405884, 0.06664637476205826, 0.0], [0.004728599451482296, 0.07079283893108368, -0.9974799156188965], [-0.0664784237742424, -0.9952622056007385, -0.07095059007406235]]), # extrinsics
T: torch.Tensor = torch.tensor([[-2.059340476989746e-5], [2.5779008865356445e-6], [-3.000047445297241]]), # extrinsics
n: float = 0.002, # bounds limit
f: float = 100, # bounds limit
t: float = 0.0, # temporal dimension (implemented as a float instead of int)
v: float = 0.0, # view dimension (implemented as a float instead of int)
bounds: torch.Tensor = torch.tensor([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]]), # bounding box
# camera update hyperparameters
origin: torch.Tensor = torch.tensor([0.0, 0.0, 0.0]),
world_up: torch.Tensor = torch.tensor([0.0, 0.0, 1.0]),
movement_speed: float = 1.0, # gui movement speed
movement_force: float = 1.0, # include some physiscs
drag_coeff_mult: float = 1.0, # include some physiscs
constant_drag: float = 1.0,
mass: float = 0.1,
moment_of_inertia: float = 0.1,
movement_torque: float = 1.0,
angular_friction: float = 2.0,
constant_torque: float = 1.0,
min_interval: float = 0.0334, # simulate at at least 30 fps
pause_physics: bool = False,
batch: dotdict = None, # will ignore all other inputs
string: str = None, # will ignore all other inputs
**kwargs,
) -> None:
# Batch (network input parameters)
if string is None:
if batch is None:
batch = dotdict()
batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds = H, W, K, R, T, n, f, t, v, bounds
self.from_batch(batch)
# Other configurables
self.origin = vec3(*origin)
# self.origin = self.center # rotate about center
self.world_up = vec3(*world_up)
self.movement_speed = movement_speed
# self.front = self.front # will trigger an update
else:
self.from_string(string)
# Internal states to facilitate camera position change
self.is_dragging = False # rotation
self.about_origin = False # about origin rotation
self.is_panning = False # translation
self.lock_fx_fy = True
self.drag_start = vec2(0.0)
# Internal states to facilitate moving with mass
self.mass = mass
self.force = vec3(0.0)
self.speed = vec3(0.0) # no movement
self.acc = vec3(0.0)
self.drag_coeff_mult = drag_coeff_mult
self.movement_force = movement_force
self.constant_drag = constant_drag
self.pause_physics = pause_physics
self.min_interval = min_interval
self.torque = vec3(0.0)
self.moment_of_inertia = moment_of_inertia
self.angular_speed = vec3(0.0) # relative angular speed on three euler angles
self.angular_acc = vec3(0.0)
self.angular_friction = angular_friction
self.constant_torque = constant_torque
self.movement_torque = movement_torque
def step(self, interval: float):
if self.pause_physics: return
# Limit interval to make the simulation more stable
interval = min(interval, self.min_interval)
# Compute the drag force
speed2 = glm.dot(self.speed, self.speed)
if speed2 > 1.0:
# Drag at opposite direction of movement
drag = -speed2 * (self.speed / speed2) * self.drag_coeff_mult
elif speed2 > 0:
# Constant drag if speed is blow a threshold to make it stop faster
drag = -self.constant_drag * self.speed
else:
drag = vec3(0.0)
# Compute acceleration and final speed
self.acc = (self.force + drag) / self.mass
self.speed += self.acc * interval
# Compute displacement in this interval
speed2 = glm.dot(self.speed, self.speed)
if speed2 > 0:
direction = mat3(self.right, -glm.normalize(glm.cross(self.right, self.world_up)), self.world_up)
movement = direction @ (self.speed - self.acc * interval / 2) * interval
self.center += movement
# Compute rotation change
# Compute the drag torque
speed2 = glm.dot(self.angular_speed, self.angular_speed)
if speed2 > 0.1:
# Drag at opposite direction of movement
drag = -speed2 * (self.angular_speed / speed2) * self.angular_friction
elif speed2 > 0.0:
# Constant drag if speed is blow a threshold to make it stop faster
drag = -self.constant_torque * self.angular_speed
else:
drag = vec3(0.0)
# Compute angular acceleration and final angular speed
self.angular_acc = (self.torque + drag) / self.moment_of_inertia
self.angular_speed += self.angular_acc * interval
# Angular movement direction
delta = self.angular_speed * interval # about x, y and z axis (euler angle)
# Limit look up
dot = glm.dot(self.world_up, self.front)
self.drag_ymin = -np.arccos(-dot) + 0.01 # drag up, look down
self.drag_ymax = np.pi + self.drag_ymin - 0.02 # remove the 0.01 of drag_ymin
# Rotate about euler angle
EPS = 1e-7
if abs(delta.x) > EPS or abs(delta.y) > EPS or abs(delta.z) > EPS:
m = mat4(1.0)
m = glm.rotate(m, np.clip(delta.x, self.drag_ymin, self.drag_ymax), self.right)
m = glm.rotate(m, delta.y, -self.world_up)
m = glm.rotate(m, delta.z, self.front)
center = self.center
self.front = m @ self.front # might overshoot and will update center
self.center = center
@property
def w2p(self):
ixt = mat4(self.ixt)
ixt[3, 3] = 0
ixt[2, 3] = 1
return ixt @ self.ext # w2c -> c2p = w2p
@property
def V(self): return self.c2w
@property
def ixt(self): return self.K
@property
def gl_ext(self):
gl_c2w = self.c2w
gl_c2w[0] *= 1 # do notflip x
gl_c2w[1] *= -1 # flip y
gl_c2w[2] *= -1 # flip z
gl_ext = glm.affineInverse(gl_c2w)
return gl_ext # use original opencv ext since we've taken care of the intrinsics in gl_ixt
@property
def gl_ixt(self):
# Construct opengl camera matrix with projection & clipping
# https://fruty.io/2019/08/29/augmented-reality-with-opencv-and-opengl-the-tricky-projection-matrix/
# https://gist.github.com/davegreenwood/3a32d779f81f08dce32f3bb423672191
# fmt: off
gl_ixt = mat4(
2 * self.fx / self.W, 0, 0, 0,
2 * self.s / self.W, 2 * self.fy / self.H, 0, 0,
1 - 2 * (self.cx / self.W), 2 * (self.cy / self.H) - 1, (self.f + self.n) / (self.n - self.f), -1,
0, 0, 2 * self.f * self.n / (self.n - self.f), 0,
)
# fmt: on
return gl_ixt
@property
def ext(self): return self.w2c
@property
def w2c(self):
w2c = mat4(self.R)
w2c[3] = vec4(*self.T, 1.0)
return w2c
@property
def c2w(self):
return glm.affineInverse(self.w2c)
@property
def right(self) -> vec3: return vec3(self.R[0, 0], self.R[1, 0], self.R[2, 0]) # c2w R, 0 -> 3,
@property
def down(self) -> vec3: return vec3(self.R[0, 1], self.R[1, 1], self.R[2, 1]) # c2w R, 1 -> 3,
@property
def front(self) -> vec3: return vec3(self.R[0, 2], self.R[1, 2], self.R[2, 2]) # c2w R, 2 -> 3,
@front.setter
def front(self, v: vec3):
front = v # the last row of R
self.R[0, 2], self.R[1, 2], self.R[2, 2] = front.x, front.y, front.z
right = glm.normalize(glm.cross(self.front, self.world_up)) # right
self.R[0, 0], self.R[1, 0], self.R[2, 0] = right.x, right.y, right.z
down = glm.cross(self.front, self.right) # down
self.R[0, 1], self.R[1, 1], self.R[2, 1] = down.x, down.y, down.z
@property
def center(self):
return -glm.transpose(self.R) @ self.T # 3,
@center.setter
def center(self, v: vec3):
self.T = -self.R @ v # 3, 1
@property
def s(self): return self.K[1, 0]
@s.setter
def s(self, s): self.K[1, 0] = s
@property
def fx(self): return self.K[0, 0]
@fx.setter
def fx(self, v: float):
v = min(v, 1e5)
v = max(v, 1e-3)
if self.lock_fx_fy:
self.K[1, 1] = v / self.K[0, 0] * self.K[1, 1]
self.K[0, 0] = v
@property
def fy(self): return self.K[1, 1]
@fy.setter
def fy(self, v: float):
if self.lock_fx_fy:
self.K[0, 0] = v / self.K[1, 1] * self.K[0, 0]
self.K[1, 1] = v
@property
def cx(self): return self.K[2, 0]
@cx.setter
def cx(self, v: float):
self.K[2, 0] = v
@property
def cy(self): return self.K[2, 1]
@cy.setter
def cy(self, v: float):
self.K[2, 1] = v
def begin_dragging(self,
x: float, y: float,
is_panning: bool,
about_origin: bool,
):
self.is_dragging = True
self.is_panning = is_panning
self.about_origin = about_origin
self.drag_start = vec2([x, y])
def end_dragging(self):
self.is_dragging = False
def update_dragging(self, x: float, y: float):
if not self.is_dragging:
return
current = vec2(x, y)
delta = current - self.drag_start
delta /= max(self.H, self.W)
delta *= -1
self.drag_start = vec2([x, y])
self.drag_start_front = self.front # a recording
self.drag_start_down = self.down
self.drag_start_right = self.right
self.drag_start_center = self.center
self.drag_start_origin = self.origin
self.drag_start_world_up = self.world_up
# Need to find the max or min delta y to align with world_up
dot = glm.dot(self.world_up, self.front)
self.drag_ymin = -np.arccos(-dot) + 0.01 # drag up, look down
self.drag_ymax = np.pi + self.drag_ymin - 0.02 # remove the 0.01 of drag_ymin
if self.is_panning:
delta *= self.movement_speed
center_delta = delta[0] * self.drag_start_right + delta[1] * self.drag_start_down
self.center = self.drag_start_center + center_delta
if self.about_origin:
self.origin = self.drag_start_origin + center_delta
else:
m = mat4(1.0)
m = glm.rotate(m, delta.x % 2 * np.pi, self.world_up)
m = glm.rotate(m, np.clip(delta.y, self.drag_ymin, self.drag_ymax), self.drag_start_right)
self.front = m @ self.drag_start_front # might overshoot
if self.about_origin:
self.center = -m @ (self.origin - self.drag_start_center) + self.origin
def move(self, x_offset: float, y_offset: float):
speed_factor = 1e-1
movement = y_offset * speed_factor
movement = movement * self.front * self.movement_speed
self.center += movement
if self.is_dragging:
self.drag_start_center += movement
def to_batch(self):
meta = dotdict()
meta.H = torch.as_tensor(self.H)
meta.W = torch.as_tensor(self.W)
meta.K = torch.as_tensor(self.K.to_list(), dtype=torch.float).mT
meta.R = torch.as_tensor(self.R.to_list(), dtype=torch.float).mT
meta.T = torch.as_tensor(self.T.to_list(), dtype=torch.float)[..., None]
meta.n = torch.as_tensor(self.n, dtype=torch.float)
meta.f = torch.as_tensor(self.f, dtype=torch.float)
meta.t = torch.as_tensor(self.t, dtype=torch.float)
meta.v = torch.as_tensor(self.v, dtype=torch.float)
meta.bounds = torch.as_tensor(self.bounds.to_list(), dtype=torch.float) # no transpose for bounds
# GUI related elements
meta.mass = torch.as_tensor(self.mass, dtype=torch.float)
meta.moment_of_inertia = torch.as_tensor(self.moment_of_inertia, dtype=torch.float)
meta.movement_force = torch.as_tensor(self.movement_force, dtype=torch.float)
meta.movement_torque = torch.as_tensor(self.movement_torque, dtype=torch.float)
meta.movement_speed = torch.as_tensor(self.movement_speed, dtype=torch.float)
meta.origin = torch.as_tensor(self.origin.to_list(), dtype=torch.float)
meta.world_up = torch.as_tensor(self.world_up.to_list(), dtype=torch.float)
batch = dotdict()
batch.update(meta)
batch.meta.update(meta)
return batch
def to_easymocap(self):
batch = self.to_batch()
camera = to_numpy(batch)
return camera
def from_easymocap(self, camera: dict):
batch = to_tensor(camera)
self.from_batch(batch)
return self
def to_string(self) -> str:
batch = to_list(self.to_batch().meta)
return json.dumps(batch)
def from_string(self, string: str):
batch = to_tensor(dotdict(json.loads(string)), ignore_list=True)
self.from_batch(batch)
def from_batch(self, batch: dotdict):
H, W, K, R, T, n, f, t, v, bounds = batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds
# Batch (network input parameters)
self.H = int(H)
self.W = int(W)
self.K = mat3(*K.mT.ravel())
self.R = mat3(*R.mT.ravel())
self.T = vec3(*T.ravel()) # 3,
self.n = float(n)
self.f = float(f)
self.t = float(t)
self.v = float(v)
self.bounds = mat2x3(*bounds.ravel()) # 2, 3
if 'mass' in batch: self.mass = float(batch.mass)
if 'moment_of_inertia' in batch: self.moment_of_inertia = float(batch.moment_of_inertia)
if 'movement_force' in batch: self.movement_force = float(batch.movement_force)
if 'movement_torque' in batch: self.movement_torque = float(batch.movement_torque)
if 'movement_speed' in batch: self.movement_speed = float(batch.movement_speed)
if 'origin' in batch: self.origin = vec3(*batch.origin.ravel()) # 3,
if 'world_up' in batch: self.world_up = vec3(*batch.world_up.ravel()) # 3,
return self
def custom_pose(self, R: torch.Tensor, T: torch.Tensor, K: torch.Tensor):
# self.K = mat3(*K.mT.ravel())
self.R = mat3(*R.mT.ravel())
self.T = vec3(*T.ravel())
|
evocodebench_data_95
|
from agents.agent_serializer import AgentSerializer
from integrations.memoize import memoize_to_sqlite
from integrations.sqlite_agent_persistence import SQLiteAgentPersistence
class AgentPersistenceManager:
def __init__(self, db_filename="agents.db"):
self.persistence = SQLiteAgentPersistence(db_filename)
def remove_agent(self, agent):
"""
Remove an agent from the database.
"""
self.persistence.remove_agent(agent.id)
def save_agent(self, agent):
"""
Serialize and save the agent state if it is a working agent and not a prime agent.
"""
if agent.is_working_agent() and not agent.is_prime_agent():
serialized_agent = AgentSerializer.serialize(agent)
self.persistence.save_agent(serialized_agent)
def load_agent(self, purpose, agent_lifecycle, openai_wrapper):
"""
Load an agent with the given purpose from the database.
"""
serialized_agent = self.persistence.fetch_agent(purpose)
if serialized_agent:
return AgentSerializer.from_dict(serialized_agent, agent_lifecycle, openai_wrapper)
return None
def load_all_agents(self, agent_lifecycle, openai_wrapper):
"""
Load all agents from the database.
"""
purposes = self.persistence.load_all_purposes()
agents = []
for purpose in purposes:
agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)
if agent:
agents.append(agent)
return agents
|
evocodebench_data_96
|
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
|
evocodebench_data_97
|
import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
|
evocodebench_data_98
|
from agents.agent_serializer import AgentSerializer
from integrations.memoize import memoize_to_sqlite
from integrations.sqlite_agent_persistence import SQLiteAgentPersistence
class AgentPersistenceManager:
def __init__(self, db_filename="agents.db"):
self.persistence = SQLiteAgentPersistence(db_filename)
def remove_agent(self, agent):
"""
Remove an agent from the database.
"""
self.persistence.remove_agent(agent.id)
def save_agent(self, agent):
"""
Serialize and save the agent state if it is a working agent and not a prime agent.
"""
if agent.is_working_agent() and not agent.is_prime_agent():
serialized_agent = AgentSerializer.serialize(agent)
self.persistence.save_agent(serialized_agent)
def load_agent(self, purpose, agent_lifecycle, openai_wrapper):
"""
Load an agent with the given purpose from the database.
"""
serialized_agent = self.persistence.fetch_agent(purpose)
if serialized_agent:
return AgentSerializer.from_dict(serialized_agent, agent_lifecycle, openai_wrapper)
return None
def load_all_agents(self, agent_lifecycle, openai_wrapper):
"""
Load all agents from the database.
"""
purposes = self.persistence.load_all_purposes()
agents = []
for purpose in purposes:
agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)
if agent:
agents.append(agent)
return agents
|
evocodebench_data_99
|
from agents.agent_serializer import AgentSerializer
from integrations.memoize import memoize_to_sqlite
from integrations.sqlite_agent_persistence import SQLiteAgentPersistence
class AgentPersistenceManager:
def __init__(self, db_filename="agents.db"):
self.persistence = SQLiteAgentPersistence(db_filename)
def remove_agent(self, agent):
"""
Remove an agent from the database.
"""
self.persistence.remove_agent(agent.id)
def save_agent(self, agent):
"""
Serialize and save the agent state if it is a working agent and not a prime agent.
"""
if agent.is_working_agent() and not agent.is_prime_agent():
serialized_agent = AgentSerializer.serialize(agent)
self.persistence.save_agent(serialized_agent)
def load_agent(self, purpose, agent_lifecycle, openai_wrapper):
"""
Load an agent with the given purpose from the database.
"""
serialized_agent = self.persistence.fetch_agent(purpose)
if serialized_agent:
return AgentSerializer.from_dict(serialized_agent, agent_lifecycle, openai_wrapper)
return None
def load_all_agents(self, agent_lifecycle, openai_wrapper):
"""
Load all agents from the database.
"""
purposes = self.persistence.load_all_purposes()
agents = []
for purpose in purposes:
agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)
if agent:
agents.append(agent)
return agents
|
evocodebench_data_100
|
import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.