python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/datapipes/benchmarks/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import torch
import warp as wp
from dataclasses import dataclass
from typing import Iterable, List, Union, Tuple, Dict
from pathlib import Path
from torch.utils.data import Dataset
from ..datapipe import Datapipe
from ..meta import DatapipeMetaData
from .kernels.initialization import init_uniform_random_2d
from .kernels.finite_volume import (
euler_primitive_to_conserved_batched_2d,
euler_conserved_to_primitive_batched_2d,
euler_extrapolation_batched_2d,
euler_get_flux_batched_2d,
euler_apply_flux_batched_2d,
initialize_kelvin_helmoltz_batched_2d,
)
Tensor = torch.Tensor
# TODO unsure if better to remove this
wp.init()
@dataclass
class MetaData(DatapipeMetaData):
name: str = "KelvinHelmholtz2D"
# Optimization
auto_device: bool = True
cuda_graphs: bool = True
# Parallel
ddp_sharding: bool = False
class KelvinHelmholtz2D(Datapipe):
"""Kelvin-Helmholtz instability benchmark problem datapipe.
This datapipe continuously generates samples with random initial conditions. All samples
are generated on the fly and is meant to be a benchmark problem for testing data driven
models. Initial conditions are given in the form of small perturbations. The solution
is obtained using a GPU enabled Finite Volume Method.
Parameters
----------
resolution : int, optional
Resolution to run simulation at, by default 512
batch_size : int, optional
Batch size of simulations, by default 16
seq_length : int, optional
Sequence length of output samples, by default 8
nr_perturbation_freq : int, optional
Number of frequencies to use for generating random initial perturbations, by default 5
perturbation_range : float, optional
Range to use for random perturbations. This value will be the max amplitude of the
initial perturbation, by default 0.1
nr_snapshots : int, optional
Number of snapshots of simulation to generate for data generation. This will
control how long the simulation is run for, by default 256
iteration_per_snapshot : int, optional
Number of finite volume steps to take between each snapshot. Each step size is
fixed as the smallest possible value that satisfies the Courant-Friedrichs-Lewy
condition, by default 32
gamma : float, optional
Heat capacity ratio, by default 5.0/3.0
normaliser : Union[Dict[str, Tuple[float, float]], None], optional
Dictionary with keys `density`, `velocity`, and `pressure`. The values for these keys are two floats corresponding to mean and std `(mean, std)`.
device : Union[str, torch.device], optional
Device for datapipe to run place data on, by default "cuda"
"""
def __init__(
self,
resolution: int = 512,
batch_size: int = 16,
seq_length: int = 8,
nr_perturbation_freq: int = 5,
perturbation_range: float = 0.1,
nr_snapshots: int = 256,
iteration_per_snapshot: int = 32,
gamma: float = 5.0 / 3.0,
normaliser: Union[Dict[str, Tuple[float, float]], None] = None,
device: Union[str, torch.device] = "cuda",
):
super().__init__(meta=MetaData())
# simulation params
self.resolution = resolution
self.batch_size = batch_size
self.seq_length = seq_length
self.nr_perturbation_freq = nr_perturbation_freq
self.perturbation_range = perturbation_range
self.nr_snapshots = nr_snapshots
self.iteration_per_snapshot = iteration_per_snapshot
self.gamma = gamma
self.courant_fac = 0.4 # hard set
self.normaliser = normaliser
# check normaliser keys
if self.normaliser is not None:
if not {"density", "velocity", "pressure"}.issubset(
set(self.normaliser.keys())
):
raise ValueError(
"normaliser need to have keys `density`, `velocity` and `pressure` with mean and std"
)
# Set up device for warp, warp has same naming convention as torch.
if isinstance(device, torch.device):
device = str(device)
self.device = device
# spatial dims
self.dx = 1.0 / resolution
self.dt = (
self.courant_fac * self.dx / (np.sqrt(self.gamma * 5.0) + 2.0)
) # hard set to smallest possible step needed
self.vol = self.dx**2
self.dim = (self.batch_size, self.resolution, self.resolution)
# allocate array for initial freq perturbation
self.w = wp.zeros(
(self.batch_size, self.nr_perturbation_freq),
dtype=float,
device=self.device,
)
# allocate conservation quantities
self.mass = wp.zeros(self.dim, dtype=float, device=self.device)
self.mom = wp.zeros(self.dim, dtype=wp.vec2, device=self.device)
self.e = wp.zeros(self.dim, dtype=float, device=self.device)
# allocate primitive quantities
self.rho = wp.zeros(self.dim, dtype=float, device=self.device)
self.vel = wp.zeros(self.dim, dtype=wp.vec2, device=self.device)
self.p = wp.zeros(self.dim, dtype=float, device=self.device)
# allocate flux values for computation
self.mass_flux_x = wp.zeros(self.dim, dtype=float, device=self.device)
self.mass_flux_y = wp.zeros(self.dim, dtype=float, device=self.device)
self.mom_flux_x = wp.zeros(self.dim, dtype=wp.vec2, device=self.device)
self.mom_flux_y = wp.zeros(self.dim, dtype=wp.vec2, device=self.device)
self.e_flux_x = wp.zeros(self.dim, dtype=float, device=self.device)
self.e_flux_y = wp.zeros(self.dim, dtype=float, device=self.device)
# allocate extrapolation values for computation
self.rho_xl = wp.zeros(self.dim, dtype=float, device=self.device)
self.rho_xr = wp.zeros(self.dim, dtype=float, device=self.device)
self.rho_yl = wp.zeros(self.dim, dtype=float, device=self.device)
self.rho_yr = wp.zeros(self.dim, dtype=float, device=self.device)
self.vel_xl = wp.zeros(self.dim, dtype=wp.vec2, device=self.device)
self.vel_xr = wp.zeros(self.dim, dtype=wp.vec2, device=self.device)
self.vel_yl = wp.zeros(self.dim, dtype=wp.vec2, device=self.device)
self.vel_yr = wp.zeros(self.dim, dtype=wp.vec2, device=self.device)
self.p_xl = wp.zeros(self.dim, dtype=float, device=self.device)
self.p_xr = wp.zeros(self.dim, dtype=float, device=self.device)
self.p_yl = wp.zeros(self.dim, dtype=float, device=self.device)
self.p_yr = wp.zeros(self.dim, dtype=float, device=self.device)
# allocate arrays for storing results
self.seq_rho = [
wp.zeros(self.dim, dtype=float, device=self.device)
for _ in range(self.nr_snapshots)
]
self.seq_vel = [
wp.zeros(self.dim, dtype=wp.vec2, device=self.device)
for _ in range(self.nr_snapshots)
]
self.seq_p = [
wp.zeros(self.dim, dtype=float, device=self.device)
for _ in range(self.nr_snapshots)
]
self.output_rho = None
self.output_vel = None
self.output_p = None
def initialize_batch(self) -> None:
"""Initializes arrays for new batch of simulations"""
# initialize random Fourier freq
seed = np.random.randint(np.iinfo(np.uint64).max, dtype=np.uint64)
wp.launch(
init_uniform_random_2d,
dim=[self.batch_size, self.nr_perturbation_freq],
inputs=[self.w, -self.perturbation_range, self.perturbation_range, seed],
device=self.device,
)
# initialize fields
wp.launch(
initialize_kelvin_helmoltz_batched_2d,
dim=self.dim,
inputs=[
self.rho,
self.vel,
self.p,
self.w,
0.05 / np.sqrt(2.0),
self.dim[1],
self.dim[2],
self.nr_perturbation_freq,
],
device=self.device,
)
wp.launch(
euler_primitive_to_conserved_batched_2d,
dim=self.dim,
inputs=[
self.rho,
self.vel,
self.p,
self.mass,
self.mom,
self.e,
self.gamma,
self.vol,
self.dim[1],
self.dim[2],
],
device=self.device,
)
def generate_batch(self) -> None:
"""Solve for new batch of simulations"""
# initialize tensors with random coef
self.initialize_batch()
# run solver
for s in range(self.nr_snapshots):
# save arrays for
wp.copy(self.seq_rho[s], self.rho)
wp.copy(self.seq_vel[s], self.vel)
wp.copy(self.seq_p[s], self.p)
# iterations
for i in range(self.iteration_per_snapshot):
# compute primitives
wp.launch(
euler_conserved_to_primitive_batched_2d,
dim=self.dim,
inputs=[
self.mass,
self.mom,
self.e,
self.rho,
self.vel,
self.p,
self.gamma,
self.vol,
self.dim[1],
self.dim[2],
],
device=self.device,
)
# compute extrapolations to faces
wp.launch(
euler_extrapolation_batched_2d,
dim=self.dim,
inputs=[
self.rho,
self.vel,
self.p,
self.rho_xl,
self.rho_xr,
self.rho_yl,
self.rho_yr,
self.vel_xl,
self.vel_xr,
self.vel_yl,
self.vel_yr,
self.p_xl,
self.p_xr,
self.p_yl,
self.p_yr,
self.gamma,
self.dx,
self.dt,
self.dim[1],
self.dim[2],
],
device=self.device,
)
# compute fluxes
wp.launch(
euler_get_flux_batched_2d,
dim=self.dim,
inputs=[
self.rho_xl,
self.rho_xr,
self.rho_yl,
self.rho_yr,
self.vel_xl,
self.vel_xr,
self.vel_yl,
self.vel_yr,
self.p_xl,
self.p_xr,
self.p_yl,
self.p_yr,
self.mass_flux_x,
self.mass_flux_y,
self.mom_flux_x,
self.mom_flux_y,
self.e_flux_x,
self.e_flux_y,
self.gamma,
self.dim[1],
self.dim[2],
],
device=self.device,
)
# apply fluxes
wp.launch(
euler_apply_flux_batched_2d,
dim=self.dim,
inputs=[
self.mass_flux_x,
self.mass_flux_y,
self.mom_flux_x,
self.mom_flux_y,
self.e_flux_x,
self.e_flux_y,
self.mass,
self.mom,
self.e,
self.dx,
self.dt,
self.dim[1],
self.dim[2],
],
device=self.device,
)
def __iter__(self) -> Tuple[Tensor, Tensor, Tensor]:
"""
Yields
------
Iterator[Tuple[Tensor, Tensor]]
Infinite iterator that returns a batch of timeseries with (density, velocity, pressure)
fields of size [batch, seq_length, dim, resolution, resolution]
"""
# infinite generator
while True:
# run simulation
self.generate_batch()
# return all samples generated before rerunning simulation
batch_ind = [
np.arange(self.nr_snapshots - self.seq_length)
for _ in range(self.batch_size)
]
for b_ind in batch_ind:
np.random.shuffle(b_ind)
for bb in range(self.nr_snapshots - self.seq_length):
# run over batch to gather samples
batched_seq_rho = []
batched_seq_vel = []
batched_seq_p = []
for b in range(self.batch_size):
# gather seq from each batch
seq_rho = []
seq_vel = []
seq_p = []
for s in range(self.seq_length):
# get variables
rho = wp.to_torch(self.seq_rho[batch_ind[b][bb] + s])[b]
vel = wp.to_torch(self.seq_vel[batch_ind[b][bb] + s])[b]
p = wp.to_torch(self.seq_p[batch_ind[b][bb] + s])[b]
# add channels
rho = torch.unsqueeze(rho, 0)
vel = torch.permute(vel, (2, 0, 1))
p = torch.unsqueeze(p, 0)
# normalize values
if self.normaliser is not None:
rho = (
rho - self.normaliser["density"][0]
) / self.normaliser["density"][1]
vel = (
vel - self.normaliser["velocity"][0]
) / self.normaliser["velocity"][1]
p = (p - self.normaliser["pressure"][0]) / self.normaliser[
"pressure"
][1]
# store for producing seq
seq_rho.append(rho)
seq_vel.append(vel)
seq_p.append(p)
# concat seq
batched_seq_rho.append(torch.stack(seq_rho, axis=0))
batched_seq_vel.append(torch.stack(seq_vel, axis=0))
batched_seq_p.append(torch.stack(seq_p, axis=0))
# CUDA graphs static copies
if self.output_rho is None:
# concat batches
self.output_rho = torch.stack(batched_seq_rho, axis=0)
self.output_vel = torch.stack(batched_seq_vel, axis=0)
self.output_p = torch.stack(batched_seq_p, axis=0)
else:
self.output_rho.data.copy_(torch.stack(batched_seq_rho, axis=0))
self.output_vel.data.copy_(torch.stack(batched_seq_vel, axis=0))
self.output_p.data.copy_(torch.stack(batched_seq_p, axis=0))
yield {
"density": self.output_rho,
"velocity": self.output_vel,
"pressure": self.output_p,
}
def __len__(self):
return sys.maxsize
|
modulus-main
|
modulus/datapipes/benchmarks/kelvin_helmholtz.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import torch
import warp as wp
from dataclasses import dataclass
from typing import Union, Tuple, Dict
from ..datapipe import Datapipe
from ..meta import DatapipeMetaData
from .kernels.initialization import init_uniform_random_4d
from .kernels.finite_difference import (
darcy_mgrid_jacobi_iterative_batched_2d,
mgrid_inf_residual_batched_2d,
)
from .kernels.utils import (
fourier_to_array_batched_2d,
threshold_3d,
bilinear_upsample_batched_2d,
)
Tensor = torch.Tensor
# TODO unsure if better to remove this. Keeping this in for now
wp.init()
@dataclass
class MetaData(DatapipeMetaData):
name: str = "Darcy2D"
# Optimization
auto_device: bool = True
cuda_graphs: bool = True
# Parallel
ddp_sharding: bool = False
class Darcy2D(Datapipe):
"""2D Darcy flow benchmark problem datapipe.
This datapipe continuously generates solutions to the 2D Darcy equation with variable
permeability. All samples are generated on the fly and is meant to be a benchmark
problem for testing data driven models. Permeability is drawn from a random Fourier
series and threshold it to give a piecewise constant function. The solution is obtained
using a GPU enabled multi-grid Jacobi iterative method.
Parameters
----------
resolution : int, optional
Resolution to run simulation at, by default 256
batch_size : int, optional
Batch size of simulations, by default 64
nr_permeability_freq : int, optional
Number of frequencies to use for generating random permeability. Higher values
will give higher freq permeability fields., by default 5
max_permeability : float, optional
Max permeability, by default 2.0
min_permeability : float, optional
Min permeability, by default 0.5
max_iterations : int, optional
Maximum iterations to use for each multi-grid, by default 30000
convergence_threshold : float, optional
Solver L-Infinity convergence threshold, by default 1e-6
iterations_per_convergence_check : int, optional
Number of Jacobi iterations to run before checking convergence, by default 1000
nr_multigrids : int, optional
Number of multi-grid levels, by default 4
normaliser : Union[Dict[str, Tuple[float, float]], None], optional
Dictionary with keys `permeability` and `darcy`. The values for these keys are two floats corresponding to mean and std `(mean, std)`.
device : Union[str, torch.device], optional
Device for datapipe to run place data on, by default "cuda"
Raises
------
ValueError
Incompatable multi-grid and resolution settings
"""
def __init__(
self,
resolution: int = 256,
batch_size: int = 64,
nr_permeability_freq: int = 5,
max_permeability: float = 2.0,
min_permeability: float = 0.5,
max_iterations: int = 30000,
convergence_threshold: float = 1e-6,
iterations_per_convergence_check: int = 1000,
nr_multigrids: int = 4,
normaliser: Union[Dict[str, Tuple[float, float]], None] = None,
device: Union[str, torch.device] = "cuda",
):
super().__init__(meta=MetaData())
# simulation params
self.resolution = resolution
self.batch_size = batch_size
self.nr_permeability_freq = nr_permeability_freq
self.max_permeability = max_permeability
self.min_permeability = min_permeability
self.max_iterations = max_iterations
self.convergence_threshold = convergence_threshold
self.iterations_per_convergence_check = iterations_per_convergence_check
self.nr_multigrids = nr_multigrids
self.normaliser = normaliser
# check normaliser keys
if self.normaliser is not None:
if not {"permeability", "darcy"}.issubset(set(self.normaliser.keys())):
raise ValueError(
"normaliser need to have keys permeability and darcy with mean and std"
)
# Set up device for warp, warp has same naming convention as torch.
if isinstance(device, torch.device):
device = str(device)
self.device = device
# spatial dims
self.dx = 1.0 / (self.resolution + 1) # pad edges by 1 for multi-grid
self.dim = (self.batch_size, self.resolution + 1, self.resolution + 1)
self.fourier_dim = (
4,
self.batch_size,
self.nr_permeability_freq,
self.nr_permeability_freq,
)
# assert resolution is compatible with multi-grid method
if (resolution % 2 ** (nr_multigrids - 1)) != 0:
raise ValueError("Resolution is incompatible with number of sub grids.")
# allocate arrays for constructing dataset
self.darcy0 = wp.zeros(self.dim, dtype=float, device=self.device)
self.darcy1 = wp.zeros(self.dim, dtype=float, device=self.device)
self.permeability = wp.zeros(self.dim, dtype=float, device=self.device)
self.rand_fourier = wp.zeros(self.fourier_dim, dtype=float, device=self.device)
self.inf_residual = wp.zeros([1], dtype=float, device=self.device)
# Output tenors
self.output_k = None
self.output_p = None
def initialize_batch(self) -> None:
"""Initializes arrays for new batch of simulations"""
# initialize permeability
self.permeability.zero_()
seed = np.random.randint(np.iinfo(np.uint64).max, dtype=np.uint64)
wp.launch(
kernel=init_uniform_random_4d,
dim=self.fourier_dim,
inputs=[self.rand_fourier, -1.0, 1.0, seed],
device=self.device,
)
wp.launch(
kernel=fourier_to_array_batched_2d,
dim=self.dim,
inputs=[
self.permeability,
self.rand_fourier,
self.nr_permeability_freq,
self.resolution,
self.resolution,
],
device=self.device,
)
wp.launch(
kernel=threshold_3d,
dim=self.dim,
inputs=[
self.permeability,
0.0,
self.min_permeability,
self.max_permeability,
],
device=self.device,
)
# zero darcy arrays
self.darcy0.zero_()
self.darcy1.zero_()
def generate_batch(self) -> None:
"""Solve for new batch of simulations"""
# initialize tensors with random permeability
self.initialize_batch()
# run solver
for res in range(self.nr_multigrids):
# calculate grid reduction factor and reduced dim
grid_reduction_factor = 2 ** (self.nr_multigrids - res - 1)
if grid_reduction_factor > 1:
multigrid_dim = tuple(
[self.batch_size] + 2 * [(self.resolution) // grid_reduction_factor]
)
else:
multigrid_dim = self.dim
# run till max steps is reached
for k in range(
self.max_iterations // self.iterations_per_convergence_check
):
# run jacobi iterations
for s in range(self.iterations_per_convergence_check):
# iterate solver
wp.launch(
kernel=darcy_mgrid_jacobi_iterative_batched_2d,
dim=multigrid_dim,
inputs=[
self.darcy0,
self.darcy1,
self.permeability,
1.0,
self.dim[1],
self.dim[2],
self.dx,
grid_reduction_factor,
],
device=self.device,
)
# swap buffers
(self.darcy0, self.darcy1) = (self.darcy1, self.darcy0)
# compute residual
self.inf_residual.zero_()
wp.launch(
kernel=mgrid_inf_residual_batched_2d,
dim=multigrid_dim,
inputs=[
self.darcy0,
self.darcy1,
self.inf_residual,
grid_reduction_factor,
],
device=self.device,
)
normalized_inf_residual = self.inf_residual.numpy()[0]
# check if converged
if normalized_inf_residual < (
self.convergence_threshold * grid_reduction_factor
):
break
# upsample to higher resolution
if grid_reduction_factor > 1:
wp.launch(
kernel=bilinear_upsample_batched_2d,
dim=self.dim,
inputs=[
self.darcy0,
self.dim[1],
self.dim[2],
grid_reduction_factor,
],
device=self.device,
)
def __iter__(self) -> Tuple[Tensor, Tensor]:
"""
Yields
------
Iterator[Tuple[Tensor, Tensor]]
Infinite iterator that returns a batch of (permeability, darcy pressure)
fields of size [batch, resolution, resolution]
"""
# infinite generator
while True:
# run simulation
self.generate_batch()
# convert warp arrays to pytorch
permeability = wp.to_torch(self.permeability)
darcy = wp.to_torch(self.darcy0)
# add channel dims
permeability = torch.unsqueeze(permeability, axis=1)
darcy = torch.unsqueeze(darcy, axis=1)
# crop edges by 1 from multi-grid TODO messy
permeability = permeability[:, :, : self.resolution, : self.resolution]
darcy = darcy[:, :, : self.resolution, : self.resolution]
# normalize values
if self.normaliser is not None:
permeability = (
permeability - self.normaliser["permeability"][0]
) / self.normaliser["permeability"][1]
darcy = (darcy - self.normaliser["darcy"][0]) / self.normaliser[
"darcy"
][1]
# CUDA graphs static copies
if self.output_k is None:
self.output_k = permeability
self.output_p = darcy
else:
self.output_k.data.copy_(permeability)
self.output_p.data.copy_(darcy)
yield {"permeability": self.output_k, "darcy": self.output_p}
def __len__(self):
return sys.maxsize
|
modulus-main
|
modulus/datapipes/benchmarks/darcy.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/datapipes/benchmarks/kernels/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import warp as wp
except ImportError:
print(
"""NVIDIA WARP is required for this datapipe. This package is under the
NVIDIA Source Code License (NVSCL). To install use:
pip install warp-lang
"""
)
raise SystemExit(1)
@wp.kernel
def init_uniform_random_2d(
array: wp.array2d(dtype=float),
min_value: float,
max_value: float,
external_seed: int,
): # pragma: no cover
"""Initialize 2d array with uniform random values
Parameters
----------
array : wp.array2d
Array to initialize
min_value : float
Min random value
max_value : float
Max random value
external_seed : int
External seed to use
"""
i, j = wp.tid()
state = wp.rand_init(external_seed, wp.tid())
array[i, j] = wp.randf(state, -min_value, max_value)
@wp.kernel
def init_uniform_random_4d(
array: wp.array4d(dtype=float),
min_value: float,
max_value: float,
external_seed: int,
): # pragma: no cover
"""Initialize 4d array with uniform random values
Parameters
----------
array : wp.array4d
Array to initialize
min_value : float
Min random value
max_value : float
Max random value
external_seed : int
External seed to use
"""
b, i, j, k = wp.tid()
state = wp.rand_init(external_seed, wp.tid())
array[b, i, j, k] = wp.randf(state, min_value, max_value)
|
modulus-main
|
modulus/datapipes/benchmarks/kernels/initialization.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import warp as wp
except ImportError:
print(
"""NVIDIA WARP is required for this datapipe. This package is under the
NVIDIA Source Code License (NVSCL). To install use:
pip install warp-lang
"""
)
raise SystemExit(1)
from .indexing import index_zero_edges_batched_2d
@wp.kernel
def bilinear_upsample_batched_2d(
array: wp.array3d(dtype=float), lx: int, ly: int, grid_reduction_factor: int
): # pragma: no cover
"""Bilinear upsampling from batch 2d array
Parameters
----------
array : wp.array3d
Array to perform upsampling on
lx : int
Grid size X
ly : int
Grid size Y
grid_reduction_factor : int
Grid reduction factor for multi-grid
"""
# get index
b, x, y = wp.tid()
# get four neighbors coordinates
x_0 = x - (x + 1) % grid_reduction_factor
x_1 = x + (x + 1) % grid_reduction_factor
y_0 = y - (y + 1) % grid_reduction_factor
y_1 = y + (y + 1) % grid_reduction_factor
# simple linear upsampling
d_0_0 = index_zero_edges_batched_2d(array, b, x_0, y_0, lx, ly)
d_1_0 = index_zero_edges_batched_2d(array, b, x_1, y_0, lx, ly)
d_0_1 = index_zero_edges_batched_2d(array, b, x_0, y_1, lx, ly)
d_1_1 = index_zero_edges_batched_2d(array, b, x_1, y_1, lx, ly)
# get relative distance
rel_x = wp.float32(x - x_0) / wp.float32(grid_reduction_factor)
rel_y = wp.float32(y - y_0) / wp.float32(grid_reduction_factor)
# interpolation in x direction
d_x_0 = (1.0 - rel_x) * d_0_0 + rel_x * d_1_0
d_x_1 = (1.0 - rel_x) * d_0_1 + rel_x * d_1_1
# interpolation in y direction
d = (1.0 - rel_y) * d_x_0 + rel_y * d_x_1
# set interpolation
array[b, x, y] = d
@wp.kernel
def threshold_3d(
array: wp.array3d(dtype=float), threshold: float, min_value: float, max_value: float
): # pragma: no cover
"""Threshold 3d array by value. Values bellow threshold will be `min_value` and those above will be `max_value`.
Parameters
----------
array : wp.array3d
Array to apply threshold on
threshold : float
Threshold value
min_value : float
Value to set if bellow threshold
max_value : float
Value to set if above threshold
"""
i, j, k = wp.tid()
if array[i, j, k] < threshold:
array[i, j, k] = min_value
else:
array[i, j, k] = max_value
@wp.kernel
def fourier_to_array_batched_2d(
array: wp.array3d(dtype=float),
fourier: wp.array4d(dtype=float),
nr_freq: int,
lx: int,
ly: int,
): # pragma: no cover
"""Array of Fourier amplitudes to batched 2d spatial array
Parameters
----------
array : wp.array3d
Spatial array
fourier : wp.array4d
Array of Fourier amplitudes
nr_freq : int
Number of frequencies in Fourier array
lx : int
Grid size x
ly : int
Grid size y
"""
b, x, y = wp.tid()
dx = 6.28318 / wp.float32(lx)
dy = 6.28318 / wp.float32(ly)
rx = dx * wp.float32(x)
ry = dy * wp.float32(y)
for i in range(nr_freq):
for j in range(nr_freq):
ri = wp.float32(i)
rj = wp.float32(j)
ss = fourier[0, b, i, j] * wp.sin(ri * rx) * wp.sin(rj * ry)
cs = fourier[1, b, i, j] * wp.cos(ri * rx) * wp.sin(rj * ry)
sc = fourier[2, b, i, j] * wp.sin(ri * rx) * wp.cos(rj * ry)
cc = fourier[3, b, i, j] * wp.cos(ri * rx) * wp.cos(rj * ry)
wp.atomic_add(
array, b, x, y, 1.0 / (wp.float32(nr_freq) ** 2.0) * (ss + cs + sc + cc)
)
|
modulus-main
|
modulus/datapipes/benchmarks/kernels/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import warp as wp
except ImportError:
print(
"""NVIDIA WARP is required for this datapipe. This package is under the
NVIDIA Source Code License (NVSCL). To install use:
pip install warp-lang
"""
)
raise SystemExit(1)
from .indexing import (
index_periodic_edges_batched_2d,
index_vec2_periodic_edges_batched_2d,
)
@wp.func
def extrapolate_to_face_2d(
f: float, f_dx: float, f_dy: float, dx: float
): # pragma: no cover
"""Extrapolate cell values to edges of face
Parameters
----------
f : float
Cell value
f_dx : float
X derivative of cell value
f_dy : float
Y derivative of cell value
dx : float
Cell size
Returns
-------
wp.vec4
(value on left x, value on right x, value left y, value right y)
"""
f_xl = f - f_dx * (dx / 2.0)
f_xr = f + f_dx * (dx / 2.0)
f_yl = f - f_dy * (dx / 2.0)
f_yr = f + f_dy * (dx / 2.0)
return wp.vec4(f_xl, f_xr, f_yl, f_yr)
@wp.func
def apply_flux_2d(
f: float,
flux_f_xl_dx: float,
flux_f_xr_dx: float,
flux_f_yl_dy: float,
flux_f_yr_dy: float,
dx: float,
dt: float,
): # pragma: no cover
"""Apply flux to cell
Parameters
----------
f : float
Cell value
flux_f_xl_dx : float
Left x flux
flux_f_xr_dx : float
Right x flux
flux_f_yl_dy : float
Left y flux
flux_f_yr_dy : float
Right y flux
dx : float
Cell size
dt : float
Time step size
Returns
-------
float
Cell value with added flux
"""
f += -dt * dx * flux_f_xl_dx
f += dt * dx * flux_f_xr_dx
f += -dt * dx * flux_f_yl_dy
f += dt * dx * flux_f_yr_dy
return f
@wp.func
def apply_flux_vec2_2d(
f: wp.vec2,
flux_f_xl_dx: wp.vec2,
flux_f_xr_dx: wp.vec2,
flux_f_yl_dy: wp.vec2,
flux_f_yr_dy: wp.vec2,
dx: float,
dt: float,
): # pragma: no cover
"""Apply flux on cell with vector value
Parameters
----------
f : wp.vec2
Cell vector value
flux_f_xl_dx : wp.vec2
Vector flux in x left
flux_f_xr_dx : wp.vec2
Vector flux in x right
flux_f_yl_dy : wp.vec2
Vector flux in y left
flux_f_yr_dy : wp.vec2
Vector flux in y right
dx : float
Cell size
dt : float
Time step size
Returns
-------
wp.vec2
Vector cell value with added flux
"""
f += -dt * dx * flux_f_xl_dx
f += dt * dx * flux_f_xr_dx
f += -dt * dx * flux_f_yl_dy
f += dt * dx * flux_f_yr_dy
return f
@wp.func
def euler_flux_2d(
rho_l: float,
rho_r: float,
vx_l: float,
vx_r: float,
vy_l: float,
vy_r: float,
p_l: float,
p_r: float,
gamma: float,
): # pragma: no cover
"""Compute Euler flux
Parameters
----------
rho_l : float
Density left
rho_r : float
Density right
vx_l : float
X velocity left
vx_r : float
X velocity right
vy_l : float
Y velocity left
vy_r : float
Y velocity right
p_l : float
Pressure left
p_r : float
Pressure right
gamma : float
Gas constant
Returns
-------
wp.vec4
Vector containing mass, momentum x, momentum y, and energy flux.
"""
# get energies
e_l = p_l / (gamma - 1.0) + 0.5 * rho_l * (vx_l * vx_l + vy_l * vy_l)
e_r = p_r / (gamma - 1.0) + 0.5 * rho_r * (vx_r * vx_r + vy_r * vy_r)
# averaged states
rho_ave = 0.5 * (rho_l + rho_r)
momx_ave = 0.5 * (rho_l * vx_l + rho_r * vx_r)
momy_ave = 0.5 * (rho_l * vy_l + rho_r * vy_r)
e_ave = 0.5 * (e_l + e_r)
p_ave = (gamma - 1.0) * (
e_ave - 0.5 * (momx_ave * momx_ave + momy_ave * momy_ave) / rho_ave
)
# compute fluxes
flux_mass = momx_ave
flux_momx = momx_ave * momx_ave / rho_ave + p_ave
flux_momy = momx_ave * momy_ave / rho_ave
flux_e = (e_ave + p_ave) * momx_ave / rho_ave
# compute wavespeed
c_l = wp.sqrt(gamma * p_l / rho_l) + wp.abs(vx_l)
c_r = wp.sqrt(gamma * p_r / rho_r) + wp.abs(vx_r)
c = wp.max(c_l, c_r)
# add stabilizing diffusion term
flux_mass -= c * 0.5 * (rho_l - rho_r)
flux_momx -= c * 0.5 * (rho_l * vx_l - rho_r * vx_r)
flux_momy -= c * 0.5 * (rho_l * vy_l - rho_r * vy_r)
flux_e -= c * 0.5 * (e_l - e_r)
return wp.vec4(flux_mass, flux_momx, flux_momy, flux_e)
@wp.kernel
def euler_primitive_to_conserved_batched_2d(
rho: wp.array3d(dtype=float),
vel: wp.array3d(dtype=wp.vec2),
p: wp.array3d(dtype=float),
mass: wp.array3d(dtype=float),
mom: wp.array3d(dtype=wp.vec2),
e: wp.array3d(dtype=float),
gamma: float,
vol: float,
lx: int,
ly: int,
): # pragma: no cover
"""Primitive Euler to conserved values
Parameters
----------
rho : wp.array3d
Density
vel : wp.array3d
Velocity
p : wp.array3d
Pressure
mass : wp.array3d
Mass
mom : wp.array3d
Momentum
e : wp.array3d
Energy
gamma : float
Gas constant
vol : float
Volume of cell
lx : int
Grid size x dim
ly : int
Grid size y dim
"""
# get index
b, i, j = wp.tid()
# get conserve values
rho_i_j = index_periodic_edges_batched_2d(rho, b, i, j, lx, ly)
vel_i_j = index_vec2_periodic_edges_batched_2d(vel, b, i, j, lx, ly)
p_i_j = index_periodic_edges_batched_2d(p, b, i, j, lx, ly)
# get primitive values
mass_i_j = rho_i_j * vol
mom_i_j = vel_i_j * rho_i_j * vol
e_i_j = (
p_i_j / (gamma - 1.0)
+ 0.5 * rho_i_j * (vel_i_j[0] * vel_i_j[0] + vel_i_j[1] * vel_i_j[1])
) * vol
# set values
mass[b, i, j] = mass_i_j
mom[b, i, j] = mom_i_j
e[b, i, j] = e_i_j
@wp.kernel
def euler_conserved_to_primitive_batched_2d(
mass: wp.array3d(dtype=float),
mom: wp.array3d(dtype=wp.vec2),
e: wp.array3d(dtype=float),
rho: wp.array3d(dtype=float),
vel: wp.array3d(dtype=wp.vec2),
p: wp.array3d(dtype=float),
gamma: float,
vol: float,
lx: int,
ly: int,
): # pragma: no cover
"""Conserved Euler to primitive value
Parameters
----------
mass : wp.array3d
Mass
mom : wp.array3d
Momentum
e : wp.array3d
Energy
rho : wp.array3d
Density
vel : wp.array3d
Velocity
p : wp.array3d
Pressure
gamma : float
Gas constant
vol : float
Cell volume
lx : int
Grid size X dim
ly : int
Grid size Y dim
"""
# get index
b, i, j = wp.tid()
# get conserve values
mass_i_j = index_periodic_edges_batched_2d(mass, b, i, j, lx, ly)
mom_i_j = index_vec2_periodic_edges_batched_2d(mom, b, i, j, lx, ly)
e_i_j = index_periodic_edges_batched_2d(e, b, i, j, lx, ly)
# get primitive values
rho_i_j = mass_i_j / vol
vel_i_j = mom_i_j / rho_i_j / vol
p_i_j = (
e_i_j / vol
- 0.5 * rho_i_j * (vel_i_j[0] * vel_i_j[0] + vel_i_j[1] * vel_i_j[1])
) * (gamma - 1.0)
# set values
rho[b, i, j] = rho_i_j
vel[b, i, j] = vel_i_j
p[b, i, j] = p_i_j
@wp.kernel
def euler_extrapolation_batched_2d(
rho: wp.array3d(dtype=float),
vel: wp.array3d(dtype=wp.vec2),
p: wp.array3d(dtype=float),
rho_xl: wp.array3d(dtype=float),
rho_xr: wp.array3d(dtype=float),
rho_yl: wp.array3d(dtype=float),
rho_yr: wp.array3d(dtype=float),
vel_xl: wp.array3d(dtype=wp.vec2),
vel_xr: wp.array3d(dtype=wp.vec2),
vel_yl: wp.array3d(dtype=wp.vec2),
vel_yr: wp.array3d(dtype=wp.vec2),
p_xl: wp.array3d(dtype=float),
p_xr: wp.array3d(dtype=float),
p_yl: wp.array3d(dtype=float),
p_yr: wp.array3d(dtype=float),
gamma: float,
dx: float,
dt: float,
lx: int,
ly: int,
): # pragma: no cover
"""Extrapolate Euler values to edges
Parameters
----------
rho : wp.array3d
Density
vel : wp.array3d
Velocity
p : wp.array3d
Pressure
rho_xl : wp.array3d
Density x left
rho_xr : wp.array3d
Density x right
rho_yl : wp.array3d
Density y left
rho_yr : wp.array3d
Density y right
vel_xl : wp.array3d
Velocity x left
vel_xr : wp.array3d
Velocity x right
vel_yl : wp.array3d
Velocity y left
vel_yr : wp.array3d
Velocity y right
p_xl : wp.array3d
Pressure x left
p_xr : wp.array3d
Pressure x right
p_yl : wp.array3d
Pressure y left
p_yr : wp.array3d
Pressure y right
gamma : float
Gas constant
dx : float
Cell size
dt : float
Time step size
lx : int
Grid size x
ly : int
Grid size y
"""
# get index
b, i, j = wp.tid()
# volume
vol = dx * dx
# get rho stensil
rho_1_1 = index_periodic_edges_batched_2d(rho, b, i, j, lx, ly)
rho_2_1 = index_periodic_edges_batched_2d(rho, b, i + 1, j, lx, ly)
rho_1_2 = index_periodic_edges_batched_2d(rho, b, i, j + 1, lx, ly)
rho_0_1 = index_periodic_edges_batched_2d(rho, b, i - 1, j, lx, ly)
rho_1_0 = index_periodic_edges_batched_2d(rho, b, i, j - 1, lx, ly)
# get momentum stensil
vel_1_1 = index_vec2_periodic_edges_batched_2d(vel, b, i, j, lx, ly)
vel_2_1 = index_vec2_periodic_edges_batched_2d(vel, b, i + 1, j, lx, ly)
vel_1_2 = index_vec2_periodic_edges_batched_2d(vel, b, i, j + 1, lx, ly)
vel_0_1 = index_vec2_periodic_edges_batched_2d(vel, b, i - 1, j, lx, ly)
vel_1_0 = index_vec2_periodic_edges_batched_2d(vel, b, i, j - 1, lx, ly)
# get energy stensil
p_1_1 = index_periodic_edges_batched_2d(p, b, i, j, lx, ly)
p_2_1 = index_periodic_edges_batched_2d(p, b, i + 1, j, lx, ly)
p_1_2 = index_periodic_edges_batched_2d(p, b, i, j + 1, lx, ly)
p_0_1 = index_periodic_edges_batched_2d(p, b, i - 1, j, lx, ly)
p_1_0 = index_periodic_edges_batched_2d(p, b, i, j - 1, lx, ly)
# compute density grad
rho_dx = (rho_2_1 - rho_0_1) / (2.0 * dx)
rho_dy = (rho_1_2 - rho_1_0) / (2.0 * dx)
# compute velocity grad
vel_dx = (vel_2_1 - vel_0_1) / (2.0 * dx)
vel_dy = (vel_1_2 - vel_1_0) / (2.0 * dx)
# compute pressure grad
p_dx = (p_2_1 - p_0_1) / (2.0 * dx)
p_dy = (p_1_2 - p_1_0) / (2.0 * dx)
# extrapolate half time step density
rho_prime = rho_1_1 - 0.5 * dt * (
vel_1_1[0] * rho_dx
+ rho_1_1 * vel_dx[0]
+ vel_1_1[1] * rho_dy
+ rho_1_1 * vel_dy[1]
)
vx_prime = vel_1_1[0] - 0.5 * dt * (
vel_1_1[0] * vel_dx[0] + vel_1_1[1] * vel_dy[0] + (1.0 / rho_1_1) * p_dx
)
vy_prime = vel_1_1[1] - 0.5 * dt * (
vel_1_1[0] * vel_dx[1] + vel_1_1[1] * vel_dy[1] + (1.0 / rho_1_1) * p_dy
)
p_prime = p_1_1 - 0.5 * dt * (
gamma * p_1_1 * (vel_dx[0] + vel_dy[1]) + vel_1_1[0] * p_dx + vel_1_1[1] * p_dy
)
# extrapolate in space to face centers
rho_space_extra = extrapolate_to_face_2d(rho_prime, rho_dx, rho_dy, dx)
vx_space_extra = extrapolate_to_face_2d(vx_prime, vel_dx[0], vel_dy[0], dx)
vy_space_extra = extrapolate_to_face_2d(vy_prime, vel_dx[1], vel_dy[1], dx)
p_space_extra = extrapolate_to_face_2d(p_prime, p_dx, p_dy, dx)
# store values
rho_xl[b, i, j] = rho_space_extra[0]
rho_xr[b, i, j] = rho_space_extra[1]
rho_yl[b, i, j] = rho_space_extra[2]
rho_yr[b, i, j] = rho_space_extra[3]
vel_xl[b, i, j] = wp.vec2(vx_space_extra[0], vy_space_extra[0])
vel_xr[b, i, j] = wp.vec2(vx_space_extra[1], vy_space_extra[1])
vel_yl[b, i, j] = wp.vec2(vx_space_extra[2], vy_space_extra[2])
vel_yr[b, i, j] = wp.vec2(vx_space_extra[3], vy_space_extra[3])
p_xl[b, i, j] = p_space_extra[0]
p_xr[b, i, j] = p_space_extra[1]
p_yl[b, i, j] = p_space_extra[2]
p_yr[b, i, j] = p_space_extra[3]
@wp.kernel
def euler_get_flux_batched_2d(
rho_xl: wp.array3d(dtype=float),
rho_xr: wp.array3d(dtype=float),
rho_yl: wp.array3d(dtype=float),
rho_yr: wp.array3d(dtype=float),
vel_xl: wp.array3d(dtype=wp.vec2),
vel_xr: wp.array3d(dtype=wp.vec2),
vel_yl: wp.array3d(dtype=wp.vec2),
vel_yr: wp.array3d(dtype=wp.vec2),
p_xl: wp.array3d(dtype=float),
p_xr: wp.array3d(dtype=float),
p_yl: wp.array3d(dtype=float),
p_yr: wp.array3d(dtype=float),
mass_flux_x: wp.array3d(dtype=float),
mass_flux_y: wp.array3d(dtype=float),
mom_flux_x: wp.array3d(dtype=wp.vec2),
mom_flux_y: wp.array3d(dtype=wp.vec2),
e_flux_x: wp.array3d(dtype=float),
e_flux_y: wp.array3d(dtype=float),
gamma: float,
lx: int,
ly: int,
): # pragma: no cover
"""Use extrapolated Euler values to compute fluxes
Parameters
----------
rho_xl : wp.array3d
Density x left
rho_xr : wp.array3d
Density x right
rho_yl : wp.array3d
Density y left
rho_yr : wp.array3d
Density y right
vel_xl : wp.array3d
Velocity x left
vel_xr : wp.array3d
Velocity x right
vel_yl : wp.array3d
Velocity y left
vel_yr : wp.array3d
Velocity y right
p_xl : wp.array3d
Pressure x left
p_xr : wp.array3d
Pressure x right
p_yl : wp.array3d
Pressure y left
p_yr : wp.array3d
Pressure y right
mass_flux_x : wp.array3d
Mass flux x
mass_flux_y : wp.array3d
Mass flux y
mom_flux_x : wp.array3d
Momentum flux x
mom_flux_y : wp.array3d
Momentum flux y
e_flux_x : wp.array3d
Energy flux x
e_flux_y : wp.array3d
Energy flux y
gamma : float
Gas constant
lx : int
Grid size x
ly : int
Grid size y
"""
# get index
b, i, j = wp.tid()
# get space extrapolation for faces
rho_xl_1 = index_periodic_edges_batched_2d(rho_xl, b, i + 1, j, lx, ly)
rho_xr_0 = index_periodic_edges_batched_2d(rho_xr, b, i, j, lx, ly)
rho_yl_1 = index_periodic_edges_batched_2d(rho_yl, b, i, j + 1, lx, ly)
rho_yr_0 = index_periodic_edges_batched_2d(rho_yr, b, i, j, lx, ly)
vel_xl_1 = index_vec2_periodic_edges_batched_2d(vel_xl, b, i + 1, j, lx, ly)
vel_xr_0 = index_vec2_periodic_edges_batched_2d(vel_xr, b, i, j, lx, ly)
vel_yl_1 = index_vec2_periodic_edges_batched_2d(vel_yl, b, i, j + 1, lx, ly)
vel_yr_0 = index_vec2_periodic_edges_batched_2d(vel_yr, b, i, j, lx, ly)
p_xl_1 = index_periodic_edges_batched_2d(p_xl, b, i + 1, j, lx, ly)
p_xr_0 = index_periodic_edges_batched_2d(p_xr, b, i, j, lx, ly)
p_yl_1 = index_periodic_edges_batched_2d(p_yl, b, i, j + 1, lx, ly)
p_yr_0 = index_periodic_edges_batched_2d(p_yr, b, i, j, lx, ly)
# compute fluxes
flux_x = euler_flux_2d(
rho_xl_1,
rho_xr_0,
vel_xl_1[0],
vel_xr_0[0],
vel_xl_1[1],
vel_xr_0[1],
p_xl_1,
p_xr_0,
gamma,
)
flux_y = euler_flux_2d(
rho_yl_1,
rho_yr_0,
vel_yl_1[1],
vel_yr_0[1],
vel_yl_1[0],
vel_yr_0[0],
p_yl_1,
p_yr_0,
gamma,
)
# set values
mass_flux_x[b, i, j] = flux_x[0]
mass_flux_y[b, i, j] = flux_y[0]
mom_flux_x[b, i, j] = wp.vec2(flux_x[1], flux_x[2])
mom_flux_y[b, i, j] = wp.vec2(flux_y[2], flux_y[1])
e_flux_x[b, i, j] = flux_x[3]
e_flux_y[b, i, j] = flux_y[3]
@wp.kernel
def euler_apply_flux_batched_2d(
mass_flux_x: wp.array3d(dtype=float),
mass_flux_y: wp.array3d(dtype=float),
mom_flux_x: wp.array3d(dtype=wp.vec2),
mom_flux_y: wp.array3d(dtype=wp.vec2),
e_flux_x: wp.array3d(dtype=float),
e_flux_y: wp.array3d(dtype=float),
mass: wp.array3d(dtype=float),
mom: wp.array3d(dtype=wp.vec2),
e: wp.array3d(dtype=float),
dx: float,
dt: float,
lx: int,
ly: int,
): # pragma: no cover
"""Apply fluxes to Euler values
Parameters
----------
mass_flux_x : wp.array3d
Mass flux X
mass_flux_y : wp.array3d
Mass flux Y
mom_flux_x : wp.array3d
Momentum flux X
mom_flux_y : wp.array3d
Momentum flux Y
e_flux_x : wp.array3d
Energy flux X
e_flux_y : wp.array3d
Energy flux Y
mass : wp.array3d
Mass
mom : wp.array3d
Momentum
e : wp.array3d
Energy
dx : float
Cell size
dt : float
Time step size
lx : int
Grid size x
ly : int
Grid size y
"""
# get index
b, i, j = wp.tid()
# get new mass
mass_1 = index_periodic_edges_batched_2d(mass, b, i, j, lx, ly)
mass_flux_x_1 = index_periodic_edges_batched_2d(mass_flux_x, b, i, j, lx, ly)
mass_flux_x_0 = index_periodic_edges_batched_2d(mass_flux_x, b, i - 1, j, lx, ly)
mass_flux_y_1 = index_periodic_edges_batched_2d(mass_flux_y, b, i, j, lx, ly)
mass_flux_y_0 = index_periodic_edges_batched_2d(mass_flux_y, b, i, j - 1, lx, ly)
new_mass = apply_flux_2d(
mass_1, mass_flux_x_1, mass_flux_x_0, mass_flux_y_1, mass_flux_y_0, dx, dt
)
# get new mom
mom_1 = index_vec2_periodic_edges_batched_2d(mom, b, i, j, lx, ly)
mom_flux_x_1 = index_vec2_periodic_edges_batched_2d(mom_flux_x, b, i, j, lx, ly)
mom_flux_x_0 = index_vec2_periodic_edges_batched_2d(mom_flux_x, b, i - 1, j, lx, ly)
mom_flux_y_1 = index_vec2_periodic_edges_batched_2d(mom_flux_y, b, i, j, lx, ly)
mom_flux_y_0 = index_vec2_periodic_edges_batched_2d(mom_flux_y, b, i, j - 1, lx, ly)
new_mom = apply_flux_vec2_2d(
mom_1, mom_flux_x_1, mom_flux_x_0, mom_flux_y_1, mom_flux_y_0, dx, dt
)
# get new energy
e_1 = index_periodic_edges_batched_2d(e, b, i, j, lx, ly)
e_flux_x_1 = index_periodic_edges_batched_2d(e_flux_x, b, i, j, lx, ly)
e_flux_x_0 = index_periodic_edges_batched_2d(e_flux_x, b, i - 1, j, lx, ly)
e_flux_y_1 = index_periodic_edges_batched_2d(e_flux_y, b, i, j, lx, ly)
e_flux_y_0 = index_periodic_edges_batched_2d(e_flux_y, b, i, j - 1, lx, ly)
new_e = apply_flux_2d(e_1, e_flux_x_1, e_flux_x_0, e_flux_y_1, e_flux_y_0, dx, dt)
# set values
mass[b, i, j] = new_mass
mom[b, i, j] = new_mom
e[b, i, j] = new_e
@wp.kernel
def initialize_kelvin_helmoltz_batched_2d(
rho: wp.array3d(dtype=float),
vel: wp.array3d(dtype=wp.vec2),
p: wp.array3d(dtype=float),
w: wp.array2d(dtype=float),
sigma: float,
lx: float,
ly: float,
nr_freq: int,
): # pragma: no cover
"""Initialize state for Kelvin Helmoltz Instability
Parameters
----------
rho : wp.array3d
Density
vel : wp.array3d
Velocity
p : wp.array3d
Pressure
w : wp.array2d
Perturbation frequency amplitude
sigma : float
Perturbation sigma
vol : float
Volume of cell
gamma : float
Gas constant
lx : float
Grid size x
ly : float
Grid size y
nr_freq : int
Number of frequencies in perturbation
"""
# get cell coords
b, i, j = wp.tid()
x = wp.float(i) / wp.float(lx)
y = wp.float(j) / wp.float(ly)
# initial flow bands
if wp.abs(y - 0.5) < 0.25:
ux = 0.5
r = 2.0
else:
ux = -0.5
r = 1.0
# perturbation
uy = wp.float32(0.0)
for f in range(nr_freq):
ff = wp.float32(f + 1)
uy += (
ff
* w[b, f]
* wp.sin(4.0 * 3.14159 * x * ff)
* (
wp.exp(-(y - 0.25) * (y - 0.25) / (2.0 * sigma * sigma))
+ wp.exp(-(y - 0.75) * (y - 0.75) / (2.0 * sigma * sigma))
)
)
u = wp.vec2(ux, uy)
# set values
rho[b, i, j] = r
vel[b, i, j] = u
p[b, i, j] = 2.5
|
modulus-main
|
modulus/datapipes/benchmarks/kernels/finite_volume.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import warp as wp
except ImportError:
print(
"""NVIDIA WARP is required for this datapipe. This package is under the
NVIDIA Source Code License (NVSCL). To install use:
pip install warp-lang
"""
)
raise SystemExit(1)
# TODO bug in warp mod function
@wp.func
def _mod_int(x: int, length: int): # pragma: no cover
"""Mod int
Parameters
----------
x : int
Int to mod
length : int
Mod by value
Returns
-------
int
Mod of x
"""
if x < 0:
return x + length
elif x > length - 1:
return x - length
return x
@wp.func
def index_zero_edges_batched_2d(
array: wp.array3d(dtype=float), b: int, x: int, y: int, lx: int, ly: int
): # pragma: no cover
"""Index batched 2d array with zero on edges
Parameters
----------
array : wp.array3d
Array to index
b : int
Batch index
x : int
X index
y : int
Y index
lx : int
Grid size x
ly : int
Grid size y
Returns
-------
float
Array value
"""
if x == -1:
return 0.0
elif x == lx:
return 0.0
elif y == -1:
return 0.0
elif y == ly:
return 0.0
else:
return array[b, x, y]
@wp.func
def index_clamped_edges_batched_2d(
array: wp.array3d(dtype=float), b: int, x: int, y: int, lx: int, ly: int
): # pragma: no cover
"""Index batched 2d array with edges clamped to same value
Parameters
----------
array : wp.array3d
Array to index
b : int
Batch index
x : int
X index
y : int
Y index
lx : int
Grid size x
ly : int
Grid size y
Returns
-------
float
Array value
"""
x = wp.clamp(x, 0, lx - 1)
y = wp.clamp(y, 0, ly - 1)
return array[b, x, y]
@wp.func
def index_periodic_edges_batched_2d(
array: wp.array3d(dtype=float), b: int, x: int, y: int, lx: int, ly: int
): # pragma: no cover
"""Index batched 2d array with periodic edges
Parameters
----------
array : wp.array3d
Array to index
b : int
Batch index
x : int
X index
y : int
Y index
lx : int
Grid size x
ly : int
Grid size y
Returns
-------
float
Array value
"""
x = _mod_int(x, lx)
y = _mod_int(y, ly)
return array[b, x, y]
@wp.func
def index_vec2_periodic_edges_batched_2d(
vec: wp.array3d(dtype=wp.vec2), b: int, x: int, y: int, lx: int, ly: int
): # pragma: no cover
"""Index batched 2d array of wp.vec2 with periodic edges
Parameters
----------
vec : wp.array3d
Array to index
b : int
Batch index
x : int
X index
y : int
Y index
lx : int
Grid size x
ly : int
Grid size y
Returns
-------
wp.vec2
Vector value
"""
x = _mod_int(x, lx)
y = _mod_int(y, ly)
return vec[b, x, y]
|
modulus-main
|
modulus/datapipes/benchmarks/kernels/indexing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import warp as wp
except ImportError:
print(
"""NVIDIA WARP is required for this datapipe. This package is under the
NVIDIA Source Code License (NVSCL). To install use:
pip install warp-lang
"""
)
raise SystemExit(1)
from .indexing import index_zero_edges_batched_2d, index_clamped_edges_batched_2d
@wp.kernel
def darcy_mgrid_jacobi_iterative_batched_2d(
darcy0: wp.array3d(dtype=float),
darcy1: wp.array3d(dtype=float),
permeability: wp.array3d(dtype=float),
source: float,
lx: int,
ly: int,
dx: float,
mgrid_reduction_factor: int,
): # pragma: no cover
"""Mult-grid jacobi step for Darcy equation.
Parameters
----------
darcy0 : wp.array3d
Darcy solution previous step
darcy1 : wp.array3d
Darcy solution for next step
permeability : wp.array3d
Permeability field for Darcy equation
source : float
Source value for Darcy equation
lx : int
Length of domain in x dim
ly : int
Length of domain in y dim
dx : float
Grid cell size
mgrid_reduction_factor : int
Current multi-grid running at
"""
# get index
b, x, y = wp.tid()
# update index from grid reduction factor
gx = mgrid_reduction_factor * x + (mgrid_reduction_factor - 1)
gy = mgrid_reduction_factor * y + (mgrid_reduction_factor - 1)
gdx = dx * wp.float32(mgrid_reduction_factor)
# compute darcy stensil
d_1_1 = index_zero_edges_batched_2d(darcy0, b, gx, gy, lx, ly)
d_0_1 = index_zero_edges_batched_2d(
darcy0, b, gx - mgrid_reduction_factor, gy, lx, ly
)
d_2_1 = index_zero_edges_batched_2d(
darcy0, b, gx + mgrid_reduction_factor, gy, lx, ly
)
d_1_0 = index_zero_edges_batched_2d(
darcy0, b, gx, gy - mgrid_reduction_factor, lx, ly
)
d_1_2 = index_zero_edges_batched_2d(
darcy0, b, gx, gy + mgrid_reduction_factor, lx, ly
)
# compute permeability stensil
p_1_1 = index_clamped_edges_batched_2d(permeability, b, gx, gy, lx, ly)
p_0_1 = index_clamped_edges_batched_2d(
permeability, b, gx - mgrid_reduction_factor, gy, lx, ly
)
p_2_1 = index_clamped_edges_batched_2d(
permeability, b, gx + mgrid_reduction_factor, gy, lx, ly
)
p_1_0 = index_clamped_edges_batched_2d(
permeability, b, gx, gy - mgrid_reduction_factor, lx, ly
)
p_1_2 = index_clamped_edges_batched_2d(
permeability, b, gx, gy + mgrid_reduction_factor, lx, ly
)
# compute terms
dx_squared = gdx * gdx
t_1 = p_1_1 * (d_0_1 + d_2_1 + d_1_0 + d_1_2) / dx_squared
t_2 = ((p_2_1 - p_0_1) * (d_2_1 - d_0_1)) / (2.0 * gdx)
t_3 = ((p_1_2 - p_1_0) * (d_1_2 - d_1_0)) / (2.0 * gdx)
# jacobi iterative method
d_star = (t_1 + t_2 + t_3 + source) / (p_1_1 * 4.0 / dx_squared)
# buffers get swapped each iteration
darcy1[b, gx, gy] = d_star
@wp.kernel
def mgrid_inf_residual_batched_2d(
phi0: wp.array3d(dtype=float),
phi1: wp.array3d(dtype=float),
inf_res: wp.array(dtype=float),
mgrid_reduction_factor: int,
): # pragma: no cover
"""Infinity norm for checking multi-grid solutions.
Parameters
----------
phi0 : wp.array3d
Previous solution
phi1 : wp.array3d
Current solution
inf_res : wp.array
Array to hold infinity norm value in
mgrid_reduction_factor : int
Current multi-grid running at
"""
b, x, y = wp.tid()
gx = mgrid_reduction_factor * x + (mgrid_reduction_factor - 1)
gy = mgrid_reduction_factor * y + (mgrid_reduction_factor - 1)
wp.atomic_max(inf_res, 0, wp.abs(phi0[b, gx, gy] - phi1[b, gx, gy]))
|
modulus-main
|
modulus/datapipes/benchmarks/kernels/finite_difference.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import h5py
import numpy as np
import torch
try:
import nvidia.dali as dali
import nvidia.dali.plugin.pytorch as dali_pth
except ImportError:
raise ImportError(
"DALI dataset requires NVIDIA DALI package to be installed. "
+ "The package can be installed at:\n"
+ "https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html"
)
from dataclasses import dataclass
from typing import Iterable, List, Union, Tuple
from pathlib import Path
from torch.utils.data import Dataset
from ..datapipe import Datapipe
from ..meta import DatapipeMetaData
Tensor = torch.Tensor
@dataclass
class MetaData(DatapipeMetaData):
name: str = "ERA5HDF5"
# Optimization
auto_device: bool = True
cuda_graphs: bool = True
# Parallel
ddp_sharding: bool = True
class ERA5HDF5Datapipe(Datapipe):
"""ERA5 DALI data pipeline for HDF5 files
Parameters
----------
data_dir : str
Directory where ERA5 data is stored
stats_dir : Union[str, None], optional
Directory to data statistic numpy files for normalization, if None, no normalization
will be used, by default None
channels : Union[List[int], None], optional
Defines which ERA5 variables to load, if None will use all in HDF5 file, by default None
batch_size : int, optional
Batch size, by default 1
stride : int, optional
Number of steps between input and output variables. For example, if the dataset
contains data at every 6 hours, a stride 1 = 6 hour delta t and
stride 2 = 12 hours delta t, by default 1
num_steps : int, optional
Number of timesteps are included in the output variables, by default 1
patch_size : Union[Tuple[int, int], int, None], optional
If specified, crops input and output variables so image dimensions are
divisible by patch_size, by default None
num_samples_per_year : int, optional
Number of samples randomly taken from each year. If None, all will be use, by default None
shuffle : bool, optional
Shuffle dataset, by default True
num_workers : int, optional
Number of workers, by default 1
device: Union[str, torch.device], optional
Device for DALI pipeline to run on, by default cuda
process_rank : int, optional
Rank ID of local process, by default 0
world_size : int, optional
Number of training processes, by default 1
"""
def __init__(
self,
data_dir: str,
stats_dir: Union[str, None] = None,
channels: Union[List[int], None] = None,
batch_size: int = 1,
num_steps: int = 1,
stride: int = 1,
patch_size: Union[Tuple[int, int], int, None] = None,
num_samples_per_year: Union[int, None] = None,
shuffle: bool = True,
num_workers: int = 1,
device: Union[str, torch.device] = "cuda",
process_rank: int = 0,
world_size: int = 1,
):
super().__init__(meta=MetaData())
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.data_dir = Path(data_dir)
self.stats_dir = Path(stats_dir) if not stats_dir is None else None
self.channels = channels
self.stride = stride
self.num_steps = num_steps
self.num_samples_per_year = num_samples_per_year
self.process_rank = process_rank
self.world_size = world_size
if isinstance(patch_size, int):
patch_size = (patch_size, patch_size)
self.patch_size = patch_size
# Set up device, needed for pipeline
if isinstance(device, str):
device = torch.device(device)
# Need a index id if cuda
if device.type == "cuda" and device.index == None:
device = torch.device("cuda:0")
self.device = device
# check root directory exists
if not self.data_dir.is_dir():
raise IOError(f"Error, data directory {self.data_dir} does not exist")
if not self.stats_dir is None and not self.stats_dir.is_dir():
raise IOError(f"Error, stats directory {self.stats_dir} does not exist")
self.parse_dataset_files()
self.load_statistics()
self.pipe = self._create_pipeline()
def parse_dataset_files(self) -> None:
"""Parses the data directory for valid HDF5 files and determines training samples
Raises
------
ValueError
In channels specified or number of samples per year is not valid
"""
# get all input data files
self.data_paths = sorted(self.data_dir.glob("????.h5"))
for data_path in self.data_paths:
self.logger.info(f"ERA5 file found: {data_path}")
self.n_years = len(self.data_paths)
self.logger.info(f"Number of years: {self.n_years}")
# get total number of examples and image shape from the first file,
# assuming other files have exactly the same format.
self.logger.info(f"Getting file stats from {self.data_paths[0]}")
with h5py.File(self.data_paths[0], "r") as f:
# truncate the dataset to avoid out-of-range sampling
data_samples_per_year = f["fields"].shape[0] - self.num_steps * self.stride
self.img_shape = f["fields"].shape[2:]
# If channels not provided, use all of them
if self.channels is None:
self.channels = [i for i in range(f["fields"].shape[1])]
# If num_samples_per_year use all
if self.num_samples_per_year is None:
self.num_samples_per_year = data_samples_per_year
# Adjust image shape if patch_size defined
if self.patch_size is not None:
self.img_shape = [
s - s % self.patch_size[i] for i, s in enumerate(self.img_shape)
]
self.logger.info(f"Input image shape: {self.img_shape}")
# Get total length
self.total_length = self.n_years * self.num_samples_per_year
self.length = self.total_length
# Sanity checks
if max(self.channels) >= f["fields"].shape[1]:
raise ValueError(
f"Provided channel has indexes greater than the number \
of fields {f['fields'].shape[1]}"
)
if self.num_samples_per_year > data_samples_per_year:
raise ValueError(
f"num_samples_per_year ({self.num_samples_per_year}) > number of \
samples available ({data_samples_per_year})!"
)
self.logger.info(f"Number of samples/year: {self.num_samples_per_year}")
self.logger.info(f"Number of channels available: {f['fields'].shape[1]}")
def load_statistics(self) -> None:
"""Loads ERA5 statistics from pre-computed numpy files
The statistic files should be of name global_means.npy and global_std.npy with
a shape of [1, C, 1, 1] located in the stat_dir.
Raises
------
IOError
If mean or std numpy files are not found
AssertionError
If loaded numpy arrays are not of correct size
"""
# If no stats dir we just skip loading the stats
if self.stats_dir is None:
self.mu = None
self.std = None
return
# load normalisation values
mean_stat_file = self.stats_dir / Path("global_means.npy")
std_stat_file = self.stats_dir / Path("global_stds.npy")
if not mean_stat_file.exists():
raise IOError(f"Mean statistics file {mean_stat_file} not found")
if not std_stat_file.exists():
raise IOError(f"Std statistics file {std_stat_file} not found")
# has shape [1, C, 1, 1]
self.mu = np.load(str(mean_stat_file))[:, self.channels]
# has shape [1, C, 1, 1]
self.sd = np.load(str(std_stat_file))[:, self.channels]
if not self.mu.shape == self.sd.shape == (1, len(self.channels), 1, 1):
raise AssertionError("Error, normalisation arrays have wrong shape")
def _create_pipeline(self) -> dali.Pipeline:
"""Create DALI pipeline
Returns
-------
dali.Pipeline
HDF5 DALI pipeline
"""
pipe = dali.Pipeline(
batch_size=self.batch_size,
num_threads=2,
prefetch_queue_depth=2,
py_num_workers=self.num_workers,
device_id=self.device.index,
py_start_method="spawn",
)
with pipe:
source = ERA5DaliExternalSource(
data_paths=self.data_paths,
num_samples=self.total_length,
channels=self.channels,
stride=self.stride,
num_steps=self.num_steps,
num_samples_per_year=self.num_samples_per_year,
batch_size=self.batch_size,
shuffle=self.shuffle,
process_rank=self.process_rank,
world_size=self.world_size,
)
# Update length of dataset
self.length = len(source) // self.batch_size
# Read current batch.
invar, outvar = dali.fn.external_source(
source,
num_outputs=2,
parallel=True,
batch=False,
)
if self.device.type == "cuda":
# Move tensors to GPU as external_source won't do that.
invar = invar.gpu()
outvar = outvar.gpu()
# Crop.
h, w = self.img_shape
invar = invar[:, :h, :w]
outvar = outvar[:, :, :h, :w]
# Standardize.
if not self.stats_dir is None:
invar = dali.fn.normalize(invar, mean=self.mu[0], stddev=self.sd[0])
outvar = dali.fn.normalize(outvar, mean=self.mu, stddev=self.sd)
# Set outputs.
pipe.set_outputs(invar, outvar)
return pipe
def __iter__(self):
# Reset the pipeline before creating an iterator to enable epochs.
self.pipe.reset()
# Create DALI PyTorch iterator.
return dali_pth.DALIGenericIterator([self.pipe], ["invar", "outvar"])
def __len__(self):
return self.length
class ERA5DaliExternalSource:
"""DALI Source for lazy-loading the HDF5 ERA5 files
Parameters
----------
data_paths : Iterable[str]
Directory where ERA5 data is stored
num_samples : int
Total number of training samples
channels : Iterable[int]
List representing which ERA5 variables to load
stride : int
Number of steps between input and output variables
num_steps : int
Number of timesteps are included in the output variables
num_samples_per_year : int
Number of samples randomly taken from each year
batch_size : int, optional
Batch size, by default 1
shuffle : bool, optional
Shuffle dataset, by default True
process_rank : int, optional
Rank ID of local process, by default 0
world_size : int, optional
Number of training processes, by default 1
Note
----
For more information about DALI external source operator:
https://docs.nvidia.com/deeplearning/dali/archives/dali_1_13_0/user-guide/docs/examples/general/data_loading/parallel_external_source.html
"""
def __init__(
self,
data_paths: Iterable[str],
num_samples: int,
channels: Iterable[int],
num_steps: int,
stride: int,
num_samples_per_year: int,
batch_size: int = 1,
shuffle: bool = True,
process_rank: int = 0,
world_size: int = 1,
):
self.data_paths = list(data_paths)
# Will be populated later once each worker starts running in its own process.
self.data_files = None
self.num_samples = num_samples
self.chans = list(channels)
self.num_steps = num_steps
self.stride = stride
self.num_samples_per_year = num_samples_per_year
self.batch_size = batch_size
self.shuffle = shuffle
self.last_epoch = None
self.indices = np.arange(num_samples)
# Shard from indices if running in parallel
self.indices = np.array_split(self.indices, world_size)[process_rank]
# Get number of full batches, ignore possible last incomplete batch for now.
# Also, DALI external source does not support incomplete batches in parallel mode.
self.num_batches = len(self.indices) // self.batch_size
def __call__(self, sample_info: dali.types.SampleInfo) -> Tuple[Tensor, Tensor]:
if sample_info.iteration >= self.num_batches:
raise StopIteration()
if self.data_files is None:
# This will be called once per worker. Workers are persistent,
# so there is no need to explicitly close the files - this will be done
# when corresponding pipeline/dataset is destroyed.
self.data_files = [h5py.File(path, "r") for path in self.data_paths]
# Shuffle before the next epoch starts.
if self.shuffle and sample_info.epoch_idx != self.last_epoch:
# All workers use the same rng seed so the resulting
# indices are the same across workers.
np.random.default_rng(seed=sample_info.epoch_idx).shuffle(self.indices)
self.last_epoch = sample_info.epoch_idx
# Get local indices from global index.
idx = self.indices[sample_info.idx_in_epoch]
year_idx = idx // self.num_samples_per_year
in_idx = idx % self.num_samples_per_year
data = self.data_files[year_idx]["fields"]
# Has [C,H,W] shape.
invar = data[in_idx, self.chans]
# Has [T,C,H,W] shape.
outvar = np.empty((self.num_steps,) + invar.shape, dtype=invar.dtype)
for i in range(self.num_steps):
out_idx = in_idx + (i + 1) * self.stride
outvar[i] = data[out_idx, self.chans]
return invar, outvar
def __len__(self):
return len(self.indices)
|
modulus-main
|
modulus/datapipes/climate/era5_hdf5.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/datapipes/climate/era5_netcdf.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .era5_hdf5 import ERA5HDF5Datapipe
|
modulus-main
|
modulus/datapipes/climate/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import math
import torch
from torch.utils.data import DataLoader
# distributed stuff
import torch.distributed as dist
from modulus.utils.sfno.distributed import comm
def init_distributed_io(params): # pragma: no cover
"""
Initialize distributed IO
"""
# set up sharding
if dist.is_initialized():
# this should always be safe now that data comm is orthogonal to
# model comms
params.data_num_shards = comm.get_size("data")
params.data_shard_id = comm.get_rank("data")
params.io_grid = [1, comm.get_size("h"), comm.get_size("w")]
params.io_rank = [0, comm.get_rank("h"), comm.get_rank("w")]
else:
params.data_num_shards = 1
params.data_shard_id = 0
params.io_grid = [1, 1, 1]
params.io_rank = [0, 0, 0]
return
# define IO grid:
params.io_grid = [1, 1, 1] if not hasattr(params, "io_grid") else params.io_grid
# to simplify, the number of total IO ranks has to be 1 or equal to the model parallel size
num_io_ranks = math.prod(params.io_grid)
assert (num_io_ranks == 1) or (num_io_ranks == comm.get_size("spatial"))
assert (params.io_grid[1] == comm.get_size("h")) or (params.io_grid[1] == 1)
assert (params.io_grid[2] == comm.get_size("w")) or (params.io_grid[2] == 1)
# get io ranks: mp_rank = x_coord + params.io_grid[0] * (ycoord + params.io_grid[1] * zcoord)
mp_rank = comm.get_rank("model")
params.io_rank = [0, 0, 0]
if params.io_grid[1] > 1:
params.io_rank[1] = comm.get_rank("h")
if params.io_grid[2] > 1:
params.io_rank[2] = comm.get_rank("w")
return
def get_dataloader(
params, files_pattern, device, train=True, final_eval=False
): # pragma: no cover
"""
Get the dataloader
"""
init_distributed_io(params)
if params.get("data_type", "not zarr") == "zarr":
from utils.dataloaders import zarr_helper as zarr
return zarr.get_data_loader(params, files_pattern, train)
elif params.get("multifiles", False):
from utils.dataloaders.data_loader_multifiles import (
MultifilesDataset as MultifilesDataset2D,
)
# multifiles dataset
dataset = MultifilesDataset2D(params, files_pattern, train)
sampler = (
DistributedSampler(
dataset,
shuffle=train,
num_replicas=params.data_num_shards,
rank=params.data_shard_id,
)
if (params.data_num_shards > 1)
else None
)
dataloader = DataLoader(
dataset,
batch_size=int(params.batch_size),
num_workers=params.num_data_workers,
shuffle=False, # (sampler is None),
sampler=sampler if train else None,
drop_last=True,
pin_memory=torch.cuda.is_available(),
)
elif params.enable_synthetic_data:
from modulus.datapipes.climate.sfno.dataloaders.data_loader_dummy import (
DummyLoader,
)
dataloader = DummyLoader(params, files_pattern, train, device)
dataset = types.SimpleNamespace(
in_channels=dataloader.in_channels,
out_channels=dataloader.out_channels,
img_shape_x=dataloader.img_shape_x,
img_shape_y=dataloader.img_shape_y,
img_crop_shape_x=dataloader.img_crop_shape_x,
img_crop_shape_y=dataloader.img_crop_shape_y,
img_crop_offset_x=dataloader.img_crop_offset_x,
img_crop_offset_y=dataloader.img_crop_offset_y,
img_local_shape_x=dataloader.img_local_shape_x,
img_local_shape_y=dataloader.img_local_shape_y,
img_local_offset_x=dataloader.img_local_offset_x,
img_local_offset_y=dataloader.img_local_offset_y,
)
# not needed for the no multifiles case
sampler = None
else:
from modulus.datapipes.climate.sfno.dataloaders.data_loader_dali_2d import (
ERA5DaliESDataloader as ERA5DaliESDataloader2D,
)
dataloader = ERA5DaliESDataloader2D(
params, files_pattern, train, final_eval=final_eval
)
dataset = types.SimpleNamespace(
in_channels=dataloader.in_channels,
out_channels=dataloader.out_channels,
img_shape_x=dataloader.img_shape_x,
img_shape_y=dataloader.img_shape_y,
img_crop_shape_x=dataloader.img_crop_shape_x,
img_crop_shape_y=dataloader.img_crop_shape_y,
img_crop_offset_x=dataloader.img_crop_offset_x,
img_crop_offset_y=dataloader.img_crop_offset_y,
img_local_shape_x=dataloader.img_local_shape_x,
img_local_shape_y=dataloader.img_local_shape_y,
img_local_offset_x=dataloader.img_local_offset_x,
img_local_offset_y=dataloader.img_local_offset_y,
)
if params.enable_benchy and train:
from benchy.torch import BenchmarkGenericIteratorWrapper
dataloader = BenchmarkGenericIteratorWrapper(dataloader, params.batch_size)
# not needed for the no multifiles case
sampler = None
if train:
return dataloader, dataset, sampler
else:
return dataloader, dataset
|
modulus-main
|
modulus/datapipes/climate/sfno/dataloader.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Protocol
import xarray
from dataclasses import dataclass
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
from typing import Any
import numpy as np
class Params(Protocol):
"""A protocol with the required input parameters
Useful for typechecking or editor autocompletion.
"""
in_channels: Any
out_channels: Any
batch_size: int
global_means_path: str
global_stds_path: str
@dataclass
class Metadata:
"""Image metadata required to initialize the model"""
img_shape_x: int
img_shape_y: int
in_channels: Any
out_channels: Any
img_crop_shape_x: int
img_crop_shape_y: int
img_crop_offset_x: int
img_crop_offset_y: int
img_local_shape_x: int
img_local_shape_y: int
img_local_offset_x: int
img_local_offset_y: int
def get_data_loader(
params: Params, files_pattern: str, train: bool
): # pragma: no cover
"""Matches interface used in trainer.py:Trainer"""
ds = xarray.open_zarr(files_pattern)
dataset = _xarray_to_dataset(params, ds, train=train)
# shape is (1, channel, 1, 1)
mean = np.load(params.global_means_path)
std = np.load(params.global_stds_path)
assert mean.shape == (1, len(ds.channel), 1, 1), mean.shape
assert not np.any(np.isnan(mean)), np.ravel(std)
assert std.shape == (1, len(ds.channel), 1, 1), std.shape
assert not np.any(np.isnan(std)), np.ravel(std)
def reset_pipeline(): # pragma: no cover
"""reset the pipeline to the beginning of the dataset"""
pass
def get_output_normalization(): # pragma: no cover
"""return the mean and std of the output channels"""
return mean[:, params.out_channels], std[:, params.out_channels]
def get_input_normalization(): # pragma: no cover
"""return the mean and std of the input channels"""
return mean[:, params.in_channels], std[:, params.in_channels]
def center(args): # pragma: no cover
"""normalize the input and output channels"""
x, y = args
xmean = mean[0, params.in_channels]
xstd = std[0, params.in_channels]
ymean = mean[0, params.out_channels]
ystd = std[0, params.out_channels]
return (x - xmean) / xstd, (y - ymean) / ystd
dataset = Map(dataset, center)
sampler = (
DistributedSampler(
dataset,
shuffle=train,
num_replicas=params.data_num_shards,
rank=params.data_shard_id,
)
if (params.data_num_shards > 1)
else None
)
dataloader = DataLoader(
dataset,
batch_size=int(params.batch_size),
num_workers=params.num_data_workers,
shuffle=False,
sampler=sampler if train else None,
drop_last=True,
pin_memory=torch.cuda.is_available(),
)
dataloader.get_output_normalization = get_output_normalization
dataloader.get_input_normalization = get_input_normalization
dataloader.reset_pipeline = reset_pipeline
shape = ds.fields.shape
nlon = shape[-1]
nlat = shape[-2]
metadata = Metadata(
img_shape_y=nlon,
img_shape_x=nlat,
in_channels=params.in_channels,
out_channels=params.out_channels,
img_crop_shape_x=nlat,
img_crop_shape_y=nlon,
img_crop_offset_x=0,
img_crop_offset_y=0,
img_local_shape_x=nlat,
img_local_shape_y=nlon,
img_local_offset_x=0,
img_local_offset_y=0,
)
if train:
return dataloader, metadata, sampler
else:
return dataloader, metadata
def _xarray_to_dataset(
params: Params, ds: xarray.Dataset, train: bool
): # pragma: no cover
year = ds.time.dt.year
if train:
mask = (year <= 2015) & (year >= 1980)
ds = ds.sel(time=mask)
else:
mask = (2015 < year) & (year <= 2017)
ds = ds.sel(time=mask)
return XarrayDataset(ds.fields, params.in_channels, params.out_channels)
class Map(Dataset):
"""Map"""
def __init__(self, data, func): # pragma: no cover
self.data = data
self.func = func
def __getitem__(self, i): # pragma: no cover
return self.func(self.data[i])
def __len__(self): # pragma: no cover
return len(self.data)
@dataclass
class XarrayDataset(Dataset):
"""A dataset that wraps an xarray.Dataset"""
data: xarray.DataArray
in_channels: Any = slice(None)
out_channels: Any = slice(None)
def _to_array(self, x): # pragma: no cover
return x.values
def __getitem__(self, i): # pragma: no cover
input_ = self.data.isel(time=i, channel=self.in_channels)
target = self.data.isel(time=i + 1, channel=self.out_channels)
x = self._to_array(input_)
y = self._to_array(target)
return x, y
def __len__(self): # pragma: no cover
times = self.data.time
if len(times) > 1:
return len(times) - 1
else:
return 0
|
modulus-main
|
modulus/datapipes/climate/sfno/dataloaders/zarr_helper.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
# distributed stuff
from modulus.utils.sfno.distributed import comm
# DALI stuff
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.fn as fn
import nvidia.dali.types as dali_types
from nvidia.dali.plugin.pytorch import DALIGenericIterator, LastBatchPolicy
# es helper
import modulus.datapipes.climate.sfno.dataloaders.dali_es_helper_2d as esh
class ERA5DaliESDataloader(object):
"""
ERA5 dataloader using DALI
"""
def get_pipeline(self): # pragma: no cover
"""
Get DALI pipeline
"""
pipeline = Pipeline(
batch_size=self.batchsize,
num_threads=2,
device_id=self.device_index,
py_num_workers=self.num_data_workers,
py_start_method="spawn",
seed=self.global_seed,
)
img_shape_x = self.img_shape_x
img_shape_y = self.img_shape_y
in_channels = self.in_channels
out_channels = self.out_channels
with pipeline:
# get input and target
data = fn.external_source(
source=self.extsource,
num_outputs=4 if self.add_zenith else 2,
layout=["FCHW", "FCHW", "FCHW", "FCHW"]
if self.add_zenith
else ["FCHW", "FCHW"],
batch=False,
no_copy=True,
parallel=True,
prefetch_queue_depth=self.num_data_workers,
)
if self.add_zenith:
inp, tar, izen, tzen = data
else:
inp, tar = data
# upload to GPU
inp = inp.gpu()
tar = tar.gpu()
if self.add_zenith:
izen = izen.gpu()
tzen = tzen.gpu()
# pad if requested:
if (self.extsource.read_pad[0] > 0) or (self.extsource.read_pad[1] > 0):
padded_shape = [self.img_local_shape_x, self.img_local_shape_y]
inp = fn.pad(inp, axis_names="HW", shape=padded_shape, device="gpu")
tar = fn.pad(tar, axis_names="HW", shape=padded_shape, device="gpu")
if self.add_zenith:
izen = fn.pad(
izen, axis_names="HW", shape=padded_shape, device="gpu"
)
tzen = fn.pad(
tzen, axis_names="HW", shape=padded_shape, device="gpu"
)
# roll if requested
if self.train and self.roll:
shift = fn.random.uniform(
device="cpu", dtype=dali_types.INT32, range=[0, img_shape_y]
)
inp = fn.cat(
inp[:, :, :, shift:], inp[:, :, :, :shift], device="gpu", axis=3
)
tar = fn.cat(
tar[:, :, :, shift:], tar[:, :, :, :shift], device="gpu", axis=3
)
if self.add_zenith:
izen = fn.cat(
izen[:, :, :, shift:],
izen[:, :, :, :shift],
device="gpu",
axis=3,
)
tzen = fn.cat(
tzen[:, :, :, shift:],
tzen[:, :, :, :shift],
device="gpu",
axis=3,
)
# normalize if requested
if self.normalize:
inp = fn.normalize(
inp,
device="gpu",
axis_names=self.norm_channels,
batch=self.norm_batch,
mean=self.in_bias,
stddev=self.in_scale,
)
tar = fn.normalize(
tar,
device="gpu",
axis_names=self.norm_channels,
batch=self.norm_batch,
mean=self.out_bias,
stddev=self.out_scale,
)
# add noise if requested
if self.add_noise:
inp = fn.noise.gaussian(
inp, device="gpu", stddev=self.noise_std, seed=self.local_seed
)
# add zenith angle if requested
if self.add_zenith:
pipeline.set_outputs(inp, tar, izen, tzen)
else:
pipeline.set_outputs(inp, tar)
return pipeline
def __init__(
self, params, location, train, seed=333, final_eval=False
): # pragma: no cover
self.num_data_workers = params.num_data_workers
self.host_prefetch_buffers = params["host_prefetch_buffers"]
self.device_index = torch.cuda.current_device()
self.batchsize = int(params.batch_size)
# set up seeds
# this one is the same on all ranks
self.global_seed = seed
# this one is the same for all ranks of the same model
model_id = comm.get_world_rank() // comm.get_size("model")
self.model_seed = self.global_seed + model_id
# this seed is supposed to be diffferent for every rank
self.local_seed = self.global_seed + comm.get_world_rank()
# we need to copy those
self.location = location
self.train = train
self.dt = params.dt
self.n_history = params.n_history
self.n_future = params.n_future if train else params.valid_autoreg_steps
self.in_channels = params.in_channels
self.out_channels = params.out_channels
self.add_noise = params.add_noise if train else False
self.noise_std = params.noise_std
self.add_zenith = params.add_zenith if hasattr(params, "add_zenith") else False
self.timestep_hours = (
params.timestep_hours if hasattr(params, "timestep_hours") else 6
)
if train:
self.n_samples = (
params.n_train_samples if hasattr(params, "n_train_samples") else None
)
self.n_samples_per_epoch = (
params.n_train_samples_per_epoch
if hasattr(params, "n_train_samples_per_epoch")
else None
)
else:
self.n_samples = (
params.n_eval_samples if hasattr(params, "n_eval_samples") else None
)
self.n_samples_per_epoch = (
params.n_eval_samples_per_epoch
if hasattr(params, "n_eval_samples_per_epoch")
else None
)
if final_eval:
self.n_samples = None
self.n_samples_per_epoch = None
# by default we normalize over space
self.norm_channels = "FHW"
self.norm_batch = False
if hasattr(params, "normalization_mode"):
split = params.data_normalization_mode.split("-")
self.norm_mode = split[0]
if len(split) > 1:
self.norm_channels = split[1]
if "B" in self.norm_channels:
self.norm_batch = True
self.norm_channels.replace("B", "")
else:
self.norm_mode = "offline"
# set sharding
self.num_shards = params.data_num_shards
self.shard_id = params.data_shard_id
# get cropping:
crop_size = [
params.crop_size_x if hasattr(params, "crop_size_x") else None,
params.crop_size_y if hasattr(params, "crop_size_y") else None,
]
crop_anchor = [
params.crop_anchor_x if hasattr(params, "crop_anchor_x") else 0,
params.crop_anchor_y if hasattr(params, "crop_anchor_y") else 0,
]
# get the image sizes
self.extsource = esh.GeneralES(
self.location,
max_samples=self.n_samples,
samples_per_epoch=self.n_samples_per_epoch,
train=self.train,
batch_size=self.batchsize,
dt=self.dt,
n_history=self.n_history,
n_future=self.n_future,
in_channels=self.in_channels,
out_channels=self.out_channels,
crop_size=crop_size,
crop_anchor=crop_anchor,
num_shards=self.num_shards,
shard_id=self.shard_id,
io_grid=params.io_grid,
io_rank=params.io_rank,
device_id=self.device_index,
truncate_old=True,
zenith_angle=self.add_zenith,
enable_logging=params.log_to_screen,
seed=333,
is_parallel=True,
host_prefetch_buffers=self.host_prefetch_buffers,
timestep_hours=self.timestep_hours,
)
# some image properties
self.img_shape_x = self.extsource.img_shape[0]
self.img_shape_y = self.extsource.img_shape[1]
self.img_crop_shape_x = self.extsource.crop_size[0]
self.img_crop_shape_y = self.extsource.crop_size[1]
self.img_crop_offset_x = self.extsource.crop_anchor[0]
self.img_crop_offset_y = self.extsource.crop_anchor[1]
self.img_local_shape_x = (
self.extsource.read_shape[0] + self.extsource.read_pad[0]
)
self.img_local_shape_y = (
self.extsource.read_shape[1] + self.extsource.read_pad[1]
)
self.img_local_offset_x = self.extsource.read_anchor[0]
self.img_local_offset_y = self.extsource.read_anchor[1]
self.img_local_pad_x = self.extsource.read_pad[0]
self.img_local_pad_y = self.extsource.read_pad[1]
# num steps
self.num_steps_per_epoch = self.extsource.num_steps_per_epoch
# load stats
self.normalize = True
self.roll = params.roll
# in
if self.norm_mode == "offline":
if params.normalization == "minmax":
mins = np.load(params.min_path)[:, self.in_channels]
maxes = np.load(params.max_path)[:, self.in_channels]
self.in_bias = mins
self.in_scale = maxes - mins
elif params.normalization == "zscore":
means = np.load(params.global_means_path)[:, self.in_channels]
stds = np.load(params.global_stds_path)[:, self.in_channels]
self.in_bias = means
self.in_scale = stds
elif params.normalization == "mixed":
means = np.load(params.global_means_path)[:, self.in_channels]
stds = np.load(params.global_stds_path)[:, self.in_channels]
self.in_bias = means
self.in_scale = stds
for i, c in enumerate(self.in_channels):
if params.channel_names[c][0] == "r":
self.in_bias[:, i] = 0.0
self.in_scale[:, i] = 150.0
elif params.normalization == "none":
N_in_channels = len(self.in_channels)
self.in_bias = np.zeros((1, N_in_channels, 1, 1))
self.in_scale = np.ones((1, N_in_channels, 1, 1))
# out
if params.normalization == "minmax":
mins = np.load(params.min_path)[:, self.out_channels]
maxes = np.load(params.max_path)[:, self.out_channels]
self.out_bias = mins
self.out_scale = maxes - mins
elif params.normalization == "zscore":
means = np.load(params.global_means_path)[:, self.out_channels]
stds = np.load(params.global_stds_path)[:, self.out_channels]
self.out_bias = means
self.out_scale = stds
elif params.normalization == "mixed":
means = np.load(params.global_means_path)[:, self.out_channels]
stds = np.load(params.global_stds_path)[:, self.out_channels]
self.out_bias = means
self.out_scale = stds
for o, c in enumerate(self.out_channels):
if params.channel_names[c][0] == "r": # replace with regex
self.out_bias[:, o] = 0.0
self.out_scale[:, o] = 150.0
elif params.normalization == "none":
N_out_channels = len(self.out_channels)
self.out_bias = np.zeros((1, N_out_channels, 1, 1))
self.out_scale = np.ones((1, N_out_channels, 1, 1))
# reformat the biases
if self.norm_channels == "FHW":
in_shape = (1, len(self.in_channels), 1, 1)
out_shape = (1, len(self.out_channels), 1, 1)
else:
in_shape = (1, *self.in_bias.shape)
out_shape = (1, *self.out_bias.shape)
self.in_bias = np.reshape(self.in_bias, in_shape)
self.in_scale = np.reshape(self.in_scale, in_shape)
self.out_bias = np.reshape(self.out_bias, out_shape)
self.out_scale = np.reshape(self.out_scale, out_shape)
else:
# in case of online normalization,
# we do not need to set it here
self.in_bias = None
self.in_scale = None
self.out_bias = None
self.out_scale = None
# create pipeline
self.pipeline = self.get_pipeline()
self.pipeline.start_py_workers()
self.pipeline.build()
# create iterator
outnames = ["inp", "tar"]
if self.add_zenith:
outnames += ["izen", "tzen"]
self.iterator = DALIGenericIterator(
[self.pipeline],
outnames,
auto_reset=True,
size=-1,
last_batch_policy=LastBatchPolicy.DROP,
prepare_first_batch=True,
)
def get_input_normalization(self): # pragma: no cover
"""Returns the input normalization parameters"""
if self.norm_mode == "offline":
return self.in_bias, self.in_scale
else:
return 0.0, 1.0
def get_output_normalization(self): # pragma: no cover
"""Returns the output normalization parameters"""
if self.norm_mode == "offline":
return self.out_bias, self.out_scale
else:
return 0.0, 1.0
def reset_pipeline(self): # pragma: no cover
"""Resets the pipeline"""
self.pipeline.reset()
self.iterator.reset()
def __len__(self): # pragma: no cover
return self.num_steps_per_epoch
def __iter__(self): # pragma: no cover
# self.iterator.reset()
for token in self.iterator:
inp = token[0]["inp"]
tar = token[0]["tar"]
if self.add_zenith:
izen = token[0]["izen"]
tzen = token[0]["tzen"]
if self.host_prefetch_buffers:
result = (
inp.to(torch.cuda.current_device()),
tar.to(torch.cuda.current_device()),
izen.to(torch.cuda.current_device()),
tzen.to(torch.cuda.current_device()),
)
else:
result = inp, tar, izen, tzen
else:
if self.host_prefetch_buffers:
result = inp.to(torch.cuda.current_device()), tar.to(
torch.cuda.current_device()
)
else:
result = inp, tar
yield result
|
modulus-main
|
modulus/datapipes/climate/sfno/dataloaders/data_loader_dali_2d.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import glob
import random
import numpy as np
from torch.utils.data import Dataset
import h5py
# import cv2
from modulus.utils.sfno.img_utils import reshape_fields
class MultifilesDataset(Dataset):
"""
Dataset class for loading data from multiple files
"""
def __init__(self, params, location, train): # pragma: no cover
self.params = params
self.location = location
self.train = train
self.dt = params.dt
self.n_history = params.n_history
self.in_channels = np.array(params.in_channels)
self.out_channels = np.array(params.out_channels)
self.n_in_channels = len(self.in_channels)
self.n_out_channels = len(self.out_channels)
self.crop_size_x = params.crop_size_x
self.crop_size_y = params.crop_size_y
self.roll = params.roll
self._get_files_stats()
def _get_files_stats(self): # pragma: no cover
self.files_paths = glob.glob(self.location + "/*.h5")
self.files_paths.sort()
self.n_years = len(self.files_paths)
with h5py.File(self.files_paths[0], "r") as _f:
logging.info("Getting file stats from {}".format(self.files_paths[0]))
self.n_samples_per_year = _f["fields"].shape[0]
# original image shape (before padding)
self.img_shape_x = _f["fields"].shape[2]
self.img_shape_y = _f["fields"].shape[3]
self.img_crop_shape_x = self.img_shape_x
self.img_crop_shape_y = self.img_shape_y
# set these for compatibility with the distributed dataloader. Doesn't support distributed mode as of now
self.img_local_offset_x = 0
self.img_local_offset_y = 0
self.img_local_shape_x = self.img_shape_x
self.img_local_shape_y = self.img_shape_y
self.n_samples_total = self.n_years * self.n_samples_per_year
self.files = [None for _ in range(self.n_years)]
logging.info("Number of samples per year: {}".format(self.n_samples_per_year))
logging.info(
"Found data at path {}. Number of examples: {}. Image Shape: {} x {} x {}".format(
self.location,
self.n_samples_total,
self.img_shape_x,
self.img_shape_y,
self.n_in_channels,
)
)
logging.info("Delta t: {} hours".format(6 * self.dt))
logging.info(
"Including {} hours of past history in training at a frequency of {} hours".format(
6 * self.dt * self.n_history, 6 * self.dt
)
)
def _open_file(self, year_idx): # pragma: no cover
_file = h5py.File(self.files_paths[year_idx], "r")
self.files[year_idx] = _file["fields"]
def __len__(self): # pragma: no cover
return self.n_samples_total
def __getitem__(self, global_idx): # pragma: no cover
year_idx = int(global_idx / self.n_samples_per_year) # which year we are on
local_idx = int(
global_idx % self.n_samples_per_year
) # which sample in that year we are on - determines indices for centering
# open image file
if self.files[year_idx] is None:
self._open_file(year_idx)
# if we are not at least self.dt*n_history timesteps into the prediction
if local_idx < self.dt * self.n_history:
local_idx += self.dt * self.n_history
# if we are on the last image in a year predict identity, else predict next timestep
step = 0 if local_idx >= self.n_samples_per_year - self.dt else self.dt
if self.train and self.roll:
y_roll = random.randint(0, self.img_shape_y)
else:
y_roll = 0
if self.train and (self.crop_size_x or self.crop_size_y):
rnd_x = random.randint(0, self.img_shape_x - self.crop_size_x)
rnd_y = random.randint(0, self.img_shape_y - self.crop_size_y)
else:
rnd_x = 0
rnd_y = 0
return reshape_fields(
self.files[year_idx][
(local_idx - self.dt * self.n_history) : (local_idx + 1) : self.dt,
self.in_channels,
],
"inp",
self.crop_size_x,
self.crop_size_y,
rnd_x,
rnd_y,
self.params,
y_roll,
self.train,
), reshape_fields(
self.files[year_idx][local_idx + step, self.out_channels],
"tar",
self.crop_size_x,
self.crop_size_y,
rnd_x,
rnd_y,
self.params,
y_roll,
self.train,
)
|
modulus-main
|
modulus/datapipes/climate/sfno/dataloaders/data_loader_multifiles.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import numpy as np
import cupy as cp
import cupyx as cpx
import h5py
import zarr
import logging
from itertools import groupby, accumulate
import operator
from bisect import bisect_right
# for nvtx annotation
import torch
# we need this for the zenith angle feature
import datetime
from modulus.utils.sfno.zenith_angle import cos_zenith_angle
class GeneralES(object):
"""Dali helper class"""
def _get_slices(self, lst): # pragma: no cover
for a, b in groupby(enumerate(lst), lambda pair: pair[1] - pair[0]):
b = list(b)
yield slice(b[0][1], b[-1][1] + 1)
# very important: the seed has to be constant across the workers, or otherwise mayhem:
def __init__(
self,
location,
max_samples,
samples_per_epoch,
train,
batch_size,
dt,
n_history,
n_future,
in_channels,
out_channels,
crop_size,
crop_anchor,
num_shards,
shard_id,
io_grid,
io_rank,
device_id=0,
truncate_old=True,
enable_logging=True,
zenith_angle=True,
seed=333,
is_parallel=True,
host_prefetch_buffers=False,
timestep_hours=6,
): # pragma: no cover
self.batch_size = batch_size
self.location = location
self.max_samples = max_samples
self.n_samples_per_epoch = samples_per_epoch
self.truncate_old = truncate_old
self.train = train
self.dt = dt
self.n_history = n_history
self.n_future = n_future
self.in_channels = in_channels
self.out_channels = out_channels
self.n_in_channels = len(in_channels)
self.n_out_channels = len(out_channels)
self.crop_size = crop_size
self.crop_anchor = crop_anchor
self.base_seed = seed
self.num_shards = num_shards
self.device_id = device_id
self.shard_id = shard_id
self.is_parallel = is_parallel
self.host_prefetch_buffers = host_prefetch_buffers
self.zenith_angle = zenith_angle
self.timestep_hours = timestep_hours
# set the read slices
# we do not support channel parallelism yet
assert io_grid[0] == 1
self.io_grid = io_grid[1:]
self.io_rank = io_rank[1:]
# parse the files
self._get_files_stats(enable_logging)
self.shuffle = True if train else False
# convert in_channels to list of slices:
self.in_channels_slices = list(self._get_slices(self.in_channels))
self.out_channels_slices = list(self._get_slices(self.out_channels))
# we need some additional static fields in this case
if self.zenith_angle:
longitude = np.linspace(0, 360, self.img_shape[1], endpoint=False)
latitude = np.linspace(90, -90, self.img_shape[0])
self.lon_grid, self.lat_grid = np.meshgrid(longitude, latitude)
self.lat_grid_local = self.lat_grid[
self.read_anchor[0] : self.read_anchor[0] + self.read_shape[0],
self.read_anchor[1] : self.read_anchor[1] + self.read_shape[1],
]
self.lon_grid_local = self.lon_grid[
self.read_anchor[0] : self.read_anchor[0] + self.read_shape[0],
self.read_anchor[1] : self.read_anchor[1] + self.read_shape[1],
]
# these things we want to read from a descriptor file ultimately:
self.dt_samples = 6
# HDF5 routines
def _get_stats_h5(self, enable_logging): # pragma: no cover
with h5py.File(self.files_paths[0], "r") as _f:
if enable_logging:
logging.info("Getting file stats from {}".format(self.files_paths[0]))
# original image shape (before padding)
self.img_shape = _f["fields"].shape[
2:4
] # - 1 #just get rid of one of the pixels
self.total_channels = _f["fields"].shape[1]
# get all sample counts
self.n_samples_year = []
for filename in self.files_paths:
with h5py.File(filename, "r") as _f:
self.n_samples_year.append(_f["fields"].shape[0])
return
def _get_year_h5(self, year_idx): # pragma: no cover
self.files[year_idx] = h5py.File(self.files_paths[year_idx], "r")
self.dsets[year_idx] = self.files[year_idx]["fields"]
return
def _get_data_h5(
self, inp, tar, dset, local_idx, start_x, end_x, start_y, end_y
): # pragma: no cover
off = 0
for slice_in in self.in_channels_slices:
start = off
end = start + (slice_in.stop - slice_in.start)
# inp[:, start:end, ...] = dset[(local_idx-self.dt*self.n_history):(local_idx+1):self.dt, slice_in, start_x:end_x, start_y:end_y]
dset.read_direct(
inp,
np.s_[
(local_idx - self.dt * self.n_history) : (local_idx + 1) : self.dt,
slice_in,
start_x:end_x,
start_y:end_y,
],
np.s_[:, start:end, ...],
)
off = end
off = 0
for slice_out in self.out_channels_slices:
start = off
end = start + (slice_out.stop - slice_out.start)
# tar[:, start:end, ...] = dset[(local_idx + self.dt):(local_idx + self.dt * (self.n_future + 1) + 1):self.dt, slice_out, start_x:end_x, start_y:end_y]
dset.read_direct(
tar,
np.s_[
(local_idx + self.dt) : (
local_idx + self.dt * (self.n_future + 1) + 1
) : self.dt,
slice_out,
start_x:end_x,
start_y:end_y,
],
np.s_[:, start:end, ...],
)
off = end
return inp, tar
# zarr functions
def _get_stats_zarr(self, enable_logging): # pragma: no cover
with zarr.convenience.open(self.files_paths[0], "r") as _f:
if enable_logging:
logging.info("Getting file stats from {}".format(self.files_paths[0]))
# original image shape (before padding)
self.img_shape = _f["/fields"].shape[
2:4
] # - 1 #just get rid of one of the pixels
self.total_channels = _f["/fields"].shape[1]
self.n_samples_year = []
for filename in self.files_paths:
with zarr.convenience.open(filename, "r") as _f:
self.n_samples_year.append(_f["/fields"].shape[0])
return
def _get_year_zarr(self, year_idx): # pragma: no cover
self.files[year_idx] = zarr.convenience.open(self.files_paths[year_idx], "r")
self.dsets[year_idx] = self.files[year_idx]["/fields"]
return
def _get_data_zarr(
self, inp, tar, dset, local_idx, start_x, end_x, start_y, end_y
): # pragma: no cover
off = 0
for slice_in in self.in_channels_slices:
start = off
end = start + (slice_in.stop - slice_in.start)
inp[:, start:end, ...] = dset[
(local_idx - self.dt * self.n_history) : (local_idx + 1) : self.dt,
slice_in,
start_x:end_x,
start_y:end_y,
]
off = end
off = 0
for slice_out in self.out_channels_slices:
start = off
end = start + (slice_out.stop - slice_out.start)
tar[:, start:end, ...] = dset[
(local_idx + self.dt) : (
local_idx + self.dt * (self.n_future + 1) + 1
) : self.dt,
slice_out,
start_x:end_x,
start_y:end_y,
]
off = end
return inp, tar
def _get_files_stats(self, enable_logging): # pragma: no cover
# check for hdf5 files
self.files_paths = []
self.location = (
[self.location] if not isinstance(self.location, list) else self.location
)
for location in self.location:
self.files_paths = self.files_paths + glob.glob(
os.path.join(location, "????.h5")
)
self.file_format = "h5"
# # TODO: probably requires fix to re-enable zarr
# if not self.files_paths:
# self.files_paths = glob.glob(os.path.join(self.location, "*.zarr"))
# self.file_format = "zarr"
if not self.files_paths:
raise IOError(
f"Error, the specified file path {self.location} does neither container h5 nor zarr files."
)
self.files_paths.sort()
# extract the years from filenames
self.years = [
int(os.path.splitext(os.path.basename(x))[0][-4:]) for x in self.files_paths
]
# get stats
self.n_years = len(self.files_paths)
# get stats from first file
if self.file_format == "h5":
self._get_stats_h5(enable_logging)
else:
self._get_stats_zarr(enable_logging)
# determine local read size:
# sanitize the crops first
if self.crop_size[0] is None:
self.crop_size[0] = self.img_shape[0]
if self.crop_size[1] is None:
self.crop_size[1] = self.img_shape[1]
assert self.crop_anchor[0] + self.crop_size[0] <= self.img_shape[0]
assert self.crop_anchor[1] + self.crop_size[1] <= self.img_shape[1]
# for x
read_shape_x = (self.crop_size[0] + self.io_grid[0] - 1) // self.io_grid[0]
read_anchor_x = self.crop_anchor[0] + read_shape_x * self.io_rank[0]
read_shape_x = min(read_shape_x, self.img_shape[0] - read_anchor_x)
# for y
read_shape_y = (self.crop_size[1] + self.io_grid[1] - 1) // self.io_grid[1]
read_anchor_y = self.crop_anchor[1] + read_shape_y * self.io_rank[1]
read_shape_y = min(read_shape_y, self.img_shape[1] - read_anchor_y)
self.read_anchor = [read_anchor_x, read_anchor_y]
self.read_shape = [read_shape_x, read_shape_y]
# compute padding
read_pad_x = (self.crop_size[0] + self.io_grid[0] - 1) // self.io_grid[
0
] - read_shape_x
read_pad_y = (self.crop_size[1] + self.io_grid[1] - 1) // self.io_grid[
1
] - read_shape_y
self.read_pad = [read_pad_x, read_pad_y]
# do some sample indexing gymnastics
self.year_offsets = list(accumulate(self.n_samples_year, operator.add))[:-1]
self.year_offsets.insert(0, 0)
self.n_samples_available = sum(self.n_samples_year)
if self.max_samples is not None:
self.n_samples_total = min(self.n_samples_available, self.max_samples)
else:
self.n_samples_total = self.n_samples_available
# do the sharding
self.n_samples_shard = self.n_samples_total // self.num_shards
if self.truncate_old:
self.n_samples_offset = self.n_samples_available - self.n_samples_total
else:
self.n_samples_offset = 0
# number of steps per epoch
self.num_steps_per_cycle = self.n_samples_shard // self.batch_size
if self.n_samples_per_epoch is None:
self.n_samples_per_epoch = self.n_samples_total
self.num_steps_per_epoch = self.n_samples_per_epoch // (
self.batch_size * self.num_shards
)
# we need those here
self.num_samples_per_cycle_shard = self.num_steps_per_cycle * self.batch_size
self.num_samples_per_epoch_shard = self.num_steps_per_epoch * self.batch_size
# prepare file lists
self.files = [None for _ in range(self.n_years)]
self.dsets = [None for _ in range(self.n_years)]
if enable_logging:
logging.info(
"Average number of samples per year: {:.1f}".format(
float(self.n_samples_total) / float(self.n_years)
)
)
logging.info(
"Found data at path {}. Number of examples: {}. Full image Shape: {} x {} x {}. Read Shape: {} x {} x {}".format(
self.location,
self.n_samples_available,
self.img_shape[0],
self.img_shape[1],
self.total_channels,
self.read_shape[0],
self.read_shape[1],
self.n_in_channels,
)
)
logging.info(
"Using {} from the total number of available samples with {} samples per epoch (corresponds to {} steps for {} shards with local batch size {})".format(
self.n_samples_total,
self.n_samples_per_epoch,
self.num_steps_per_epoch,
self.num_shards,
self.batch_size,
)
)
logging.info("Delta t: {} hours".format(self.timestep_hours * self.dt))
logging.info(
"Including {} hours of past history in training at a frequency of {} hours".format(
self.timestep_hours * self.dt * self.n_history,
self.timestep_hours * self.dt,
)
)
logging.info(
"Including {} hours of future targets in training at a frequency of {} hours".format(
self.timestep_hours * self.dt * self.n_future,
self.timestep_hours * self.dt,
)
)
# some state variables
self.last_cycle_epoch = None
self.index_permutation = None
# prepare buffers for double buffering
if not self.is_parallel:
self._init_buffers()
def _init_double_buff_host(self, n_tsteps, n_channels): # pragma: no cover
buffs = [
np.zeros(
(
n_tsteps,
n_channels,
self.read_shape[0],
self.read_shape[1],
),
dtype=np.float32,
),
np.zeros(
(
n_tsteps,
n_channels,
self.read_shape[0],
self.read_shape[1],
),
dtype=np.float32,
),
]
return buffs
def _init_double_buff_gpu(self, n_tsteps, n_channels): # pragma: no cover
buffs = [
cpx.zeros_pinned(
(
n_tsteps,
n_channels,
self.read_shape[0],
self.read_shape[1],
),
dtype=np.float32,
),
cpx.zeros_pinned(
(
n_tsteps,
n_channels,
self.read_shape[0],
self.read_shape[1],
),
dtype=np.float32,
),
]
return buffs
def _init_buffers(self): # pragma: no cover
# set device
self.device = cp.cuda.Device(self.device_id)
self.device.use()
self.current_buffer = 0
if self.host_prefetch_buffers:
self.inp_buffs = self._init_double_buff_host(
self.n_history + 1, self.n_in_channels
)
self.tar_buffs = self._init_double_buff_host(
self.n_future + 1, self.n_out_channels
)
else:
self.inp_buffs = self._init_double_buff_gpu(
self.n_history + 1, self.n_in_channels
)
self.tar_buffs = self._init_double_buff_gpu(
self.n_future + 1, self.n_out_channels
)
if self.zenith_angle:
if self.host_prefetch_buffers:
self.zen_inp_buffs = self._init_double_buff_host(self.n_history + 1, 1)
self.zen_tar_buffs = self._init_double_buff_host(self.n_future + 1, 1)
else:
self.zen_inp_buffs = self._init_double_buff_gpu(self.n_history + 1, 1)
self.zen_tar_buffs = self._init_double_buff_gpu(self.n_future + 1, 1)
return
def _compute_zenith_angle(
self, zen_inp, zen_tar, local_idx, year_idx
): # pragma: no cover
"""Computes solar zenith angle"""
# compute hours into the year
year = self.years[year_idx]
jan_01_epoch = datetime.datetime(year, 1, 1, 0, 0, 0)
# zenith angle for input
inp_times = np.asarray(
[
jan_01_epoch + datetime.timedelta(hours=idx * self.timestep_hours)
for idx in range(
local_idx - self.dt * self.n_history, local_idx + 1, self.dt
)
]
)
cos_zenith_inp = np.asarray(
[
np.expand_dims(
cos_zenith_angle(
inp_time, self.lon_grid_local, self.lat_grid_local
).astype(np.float32),
axis=0,
)
for inp_time in inp_times
]
)
zen_inp[...] = cos_zenith_inp[...]
# zenith angle for target
tar_times = np.asarray(
[
jan_01_epoch + datetime.timedelta(hours=idx * self.timestep_hours)
for idx in range(
local_idx + self.dt,
local_idx + self.dt * (self.n_future + 1) + 1,
self.dt,
)
]
)
cos_zenith_tar = np.asarray(
[
np.expand_dims(
cos_zenith_angle(
tar_time, self.lon_grid_local, self.lat_grid_local
).astype(np.float32),
axis=0,
)
for tar_time in tar_times
]
)
zen_tar[...] = cos_zenith_tar[...]
return
def __getstate__(self): # pragma: no cover
return self.__dict__.copy()
def __setstate__(self, state): # pragma: no cover
self.__dict__.update(state)
if self.file_format == "h5":
self.get_year_handle = self._get_year_h5
self.get_data_handle = self._get_data_h5
else:
self.get_year_handle = self._get_year_zarr
self.get_data_handle = self._get_data_zarr
if self.is_parallel:
self._init_buffers()
def __len__(self): # pragma: no cover
return self.n_samples_shard
def __del__(self): # pragma: no cover
for f in self.files:
if f is not None:
f.close()
def __call__(self, sample_info): # pragma: no cover
# compute global iteration index:
global_sample_idx = (
sample_info.idx_in_epoch
+ sample_info.epoch_idx * self.num_samples_per_epoch_shard
)
cycle_sample_idx = global_sample_idx % self.num_samples_per_cycle_shard
cycle_epoch_idx = global_sample_idx // self.num_samples_per_cycle_shard
# print(f'{"TRAIN" if self.train else "VALIDATION"} ITER INFO:', sample_info.idx_in_epoch, self.num_samples_per_epoch_shard)
# check if epoch is done
if sample_info.iteration >= self.num_steps_per_epoch:
# print(f'{"TRAIN" if self.train else "VALIDATION"} END OF EPOCH TRIGGERED FOR', sample_info.idx_in_epoch, self.num_samples_per_epoch_shard, sample_info.iteration, self.num_steps_per_epoch)
raise StopIteration
torch.cuda.nvtx.range_push("GeneralES:__call__")
# shuffle the data and shard
if cycle_epoch_idx != self.last_cycle_epoch:
self.last_cycle_epoch = cycle_epoch_idx
# generate a unique seed and permutation:
rng = np.random.default_rng(seed=self.base_seed + cycle_epoch_idx)
if self.shuffle:
self.index_permutation = self.n_samples_offset + rng.permutation(
self.n_samples_total
)
else:
self.index_permutation = self.n_samples_offset + np.arange(
self.n_samples_total
)
# shard the data
start = self.n_samples_shard * self.shard_id
end = start + self.n_samples_shard
self.index_permutation = self.index_permutation[start:end]
# determine local and sample idx
sample_idx = self.index_permutation[cycle_sample_idx]
year_idx = (
bisect_right(self.year_offsets, sample_idx) - 1
) # subtract 1 because we do 0-based indexing
local_idx = sample_idx - self.year_offsets[year_idx]
# if we are not at least self.dt*n_history timesteps into the prediction
if local_idx < self.dt * self.n_history:
local_idx += self.dt * self.n_history
if local_idx >= (self.n_samples_year[year_idx] - self.dt * (self.n_future + 1)):
local_idx = (
self.n_samples_year[year_idx] - self.dt * (self.n_future + 1) - 1
)
if self.files[year_idx] is None:
self.get_year_handle(year_idx)
# handles to buffers
inp = self.inp_buffs[self.current_buffer]
tar = self.tar_buffs[self.current_buffer]
if self.zenith_angle:
zen_inp = self.zen_inp_buffs[self.current_buffer]
zen_tar = self.zen_tar_buffs[self.current_buffer]
self.current_buffer = (self.current_buffer + 1) % 2
# do the read
dset = self.dsets[year_idx]
# load slice of data:
start_x = self.read_anchor[0]
end_x = start_x + self.read_shape[0]
start_y = self.read_anchor[1]
end_y = start_y + self.read_shape[1]
# read data
inp, tar = self.get_data_handle(
inp, tar, dset, local_idx, start_x, end_x, start_y, end_y
)
# get time grid
if self.zenith_angle:
self._compute_zenith_angle(zen_inp, zen_tar, local_idx, year_idx)
result = inp, tar, zen_inp, zen_tar
else:
result = inp, tar
torch.cuda.nvtx.range_pop()
return result
|
modulus-main
|
modulus/datapipes/climate/sfno/dataloaders/dali_es_helper_2d.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import glob
import torch
import numpy as np
import h5py
# distributed stuff
from modulus.utils.sfno.distributed import comm
class DummyLoader(object):
"""
Dummy data loader for testing purposes
"""
def __init__(self, params, location, train, device): # pragma: no cover
self.params = params
self.location = location
self.train = train
self.dt = params.dt
self.batch_size = int(params.batch_size)
self.n_history = params.n_history
self.n_future = params.n_future if train else params.valid_autoreg_steps
self.in_channels = params.in_channels
self.out_channels = params.out_channels
self.n_in_channels = len(self.in_channels)
self.n_out_channels = len(self.out_channels)
self.roll = params.roll
self.device = device
self.io_grid = params.io_grid[1:]
self.io_rank = params.io_rank[1:]
if train:
self.n_samples_per_epoch = (
params.n_train_samples_per_epoch
if hasattr(params, "n_train_samples_per_epoch")
else None
)
else:
self.n_samples_per_epoch = (
params.n_eval_samples_per_epoch
if hasattr(params, "n_eval_samples_per_epoch")
else None
)
# get cropping:
self.img_crop_shape_x = (
params.crop_size_x if hasattr(params, "crop_size_x") else None
)
self.img_crop_shape_y = (
params.crop_size_y if hasattr(params, "crop_size_y") else None
)
self.img_crop_offset_x = (
params.crop_anchor_x if hasattr(params, "crop_anchor_x") else 0
)
self.img_crop_offset_y = (
params.crop_anchor_y if hasattr(params, "crop_anchor_y") else 0
)
self._get_files_stats()
# zenith angle yes or no?
self.add_zenith = self.params.add_zenith
if self.add_zenith:
self.zen_dummy = torch.zeros(
(
self.batch_size,
self.n_history + 1,
1,
self.img_local_shape_x,
self.img_local_shape_y,
),
dtype=torch.float32,
device=self.device,
)
def _get_files_stats(self): # pragma: no cover
self.files_paths = glob.glob(self.location + "/*.h5")
if not self.files_paths:
logging.info(
"No input files found, specifying dataset properties from parameter inputs"
)
self.n_years = self.params.n_years
self.n_samples_per_year = self.params.n_samples_per_year
self.img_shape_x = self.params.img_shape_x
self.img_shape_y = self.params.img_shape_y
else:
self.files_paths.sort()
self.n_years = len(self.files_paths)
with h5py.File(self.files_paths[0], "r") as _f:
logging.info("Getting file stats from {}".format(self.files_paths[0]))
self.n_samples_per_year = _f["fields"].shape[0]
# original image shape (before padding)
self.img_shape_x = _f["fields"].shape[2]
self.img_shape_y = _f["fields"].shape[3]
# determine local read size:
# sanitize the crops first
if self.img_crop_shape_x is None:
self.img_crop_shape_x = self.img_shape_x
if self.img_crop_shape_y is None:
self.img_crop_shape_y = self.img_shape_y
assert self.img_crop_offset_x + self.img_crop_shape_x <= self.img_shape_x
assert self.img_crop_offset_y + self.img_crop_shape_y <= self.img_shape_y
# for x
read_shape_x = (self.img_crop_shape_x + self.io_grid[0] - 1) // self.io_grid[0]
read_anchor_x = self.img_crop_offset_x + read_shape_x * self.io_rank[0]
read_shape_x = min(read_shape_x, self.img_shape_x - read_anchor_x)
# for y
read_shape_y = (self.img_crop_shape_y + self.io_grid[1] - 1) // self.io_grid[1]
read_anchor_y = self.img_crop_offset_y + read_shape_y * self.io_rank[1]
read_shape_y = min(read_shape_y, self.img_shape_y - read_anchor_y)
# compute padding
self.img_local_pad_x = (
self.img_crop_shape_x + self.io_grid[0] - 1
) // self.io_grid[0] - read_shape_x
self.img_local_pad_y = (
self.img_crop_shape_y + self.io_grid[1] - 1
) // self.io_grid[1] - read_shape_y
self.img_local_offset_x = read_anchor_x
self.img_local_offset_y = read_anchor_y
self.img_local_shape_x = read_shape_x + self.img_local_pad_x
self.img_local_shape_y = read_shape_y + self.img_local_pad_y
# sharding
self.n_samples_total = (
self.n_samples_per_epoch
if self.n_samples_per_epoch is not None
else self.n_years * self.n_samples_per_year
)
self.n_samples_shard = self.n_samples_total // comm.get_size("data")
# channels
self.n_in_channels_local = self.n_in_channels
self.n_out_channels_local = self.n_out_channels
self.files = [None for _ in range(self.n_years)]
logging.info("Number of samples per year: {}".format(self.n_samples_per_year))
logging.info(
"Found data at path {}. Number of examples: {}. Image Shape: {} x {} x {}".format(
self.location,
self.n_samples_total,
self.img_shape_x,
self.img_shape_y,
self.n_in_channels_local,
)
)
logging.info(
"Including {} hours of past history in training at a frequency of {} hours".format(
6 * self.dt * self.n_history, 6 * self.dt
)
)
logging.info("WARNING: using dummy data")
# create tensors for dummy data
self.device = torch.device(f"cuda:{comm.get_local_rank()}")
self.inp = torch.zeros(
(
self.batch_size,
self.n_history + 1,
self.n_in_channels,
self.img_local_shape_x,
self.img_local_shape_y,
),
dtype=torch.float32,
device=self.device,
)
self.tar = torch.zeros(
(
self.batch_size,
self.n_future + 1,
self.n_out_channels_local,
self.img_local_shape_x,
self.img_local_shape_y,
),
dtype=torch.float32,
device=self.device,
)
# initialize output
self.inp.uniform_()
self.tar.uniform_()
self.in_bias = np.zeros((1, self.n_in_channels, 1, 1)).astype(np.float32)
self.in_scale = np.ones((1, self.n_in_channels, 1, 1)).astype(np.float32)
self.out_bias = np.zeros((1, self.n_out_channels_local, 1, 1)).astype(
np.float32
)
self.out_scale = np.ones((1, self.n_out_channels_local, 1, 1)).astype(
np.float32
)
def get_input_normalization(self): # pragma: no cover
"""Returns the input normalization parameters"""
return self.in_bias, self.in_scale
def get_output_normalization(self): # pragma: no cover
"""Returns the output normalization parameters"""
return self.out_bias, self.out_scale
def __len__(self): # pragma: no cover
return self.n_samples_shard
def __iter__(self): # pragma: no cover
self.sample_idx = 0
return self
def __next__(self): # pragma: no cover
if self.sample_idx < self.n_samples_shard:
self.sample_idx += 1
if self.add_zenith:
return self.inp, self.tar, self.zen_dummy, self.zen_dummy
else:
return self.inp, self.tar
else:
raise StopIteration()
|
modulus-main
|
modulus/datapipes/climate/sfno/dataloaders/data_loader_dummy.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def pytest_addoption(parser):
parser.addoption(
"--multigpu", action="store_true", default=False, help="run multigpu tests"
)
def pytest_configure(config):
config.addinivalue_line("markers", "multigpu: mark test as multigpu to run")
def pytest_collection_modifyitems(config, items):
if not config.getoption("--multigpu") and not config.getoption("-m"):
skip_multigpu = pytest.mark.skip(reason="need --multigpu option to run")
for item in items:
if "multigpu" in item.keywords:
item.add_marker(skip_multigpu)
|
modulus-main
|
test/conftest.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from modulus.models.mlp import FullyConnected
# import torch
# from modulus.module.derivatives import DerivWrapper
# net = FullyConnected(
# in_features=2,
# out_features=2,
# )
# p = net(torch.ones(1000, 2))
# print(p)
# net = DerivWrapper(
# net,
# input_keys=["x", "y"],
# output_keys=["u", "v"],
# deriv_keys=["u__x", "v__y", "u__x__y"],
# )
# input_dict = {"x": torch.ones(1000, 1), "y": torch.ones(1000, 1)}
# p = net(input_dict)
# print(p["u"][0])
|
modulus-main
|
test/derivs_test.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
@pytest.mark.multigpu
def test_multi_gpu():
num_gpus = torch.cuda.device_count()
assert num_gpus > 1, "Not enough GPUs available for test"
for i in range(num_gpus):
with torch.cuda.device(i):
tensor = torch.tensor([1.0, 2.0, 3.0], device=f"cuda:{i}")
assert tensor.sum() == 6.0
if __name__ == "__main__":
pytest.main([__file__])
|
modulus-main
|
test/test_multi_gpu_sample.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
import numpy as np
from modulus.metrics.general.mse import mse, rmse
from modulus.metrics.climate.acc import acc
import modulus.metrics.climate.reduction as clim_red
import modulus.metrics.climate.efi as efi
import modulus.metrics.general.reduction as gen_red
import modulus.metrics.general.histogram as hist
Tensor = torch.Tensor
@pytest.fixture
def test_data(channels=2, img_shape=(721, 1440)):
# create dummy data
time_means = (
np.pi / 2 * np.ones((channels, img_shape[0], img_shape[1]), dtype=np.float32)
)
# Set lat/lon in terms of degrees (for use with _compute_lat_weights)
x = np.linspace(-180, 180, img_shape[1], dtype=np.float32)
y = np.linspace(-90, 90, img_shape[0], dtype=np.float32)
xv, yv = np.meshgrid(x, y)
pred_tensor_np = np.cos(2 * np.pi * yv / (180))
targ_tensor_np = np.cos(np.pi * yv / (180))
return channels, x, y, pred_tensor_np, targ_tensor_np, time_means
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_climate_acc_mse(test_data, device, rtol: float = 1e-3, atol: float = 1e-3):
channels, lon, lat, pred_tensor_np, targ_tensor_np, time_means = test_data
lat = torch.from_numpy(lat).to(device)
lon = torch.from_numpy(lon).to(device)
pred_tensor = torch.from_numpy(pred_tensor_np).expand(channels, -1, -1).to(device)
targ_tensor = torch.from_numpy(targ_tensor_np).expand(channels, -1, -1).to(device)
means_tensor = torch.from_numpy(time_means).to(device)
# Independent of the time means, the ACC score for cos(2*x) and cos(x) is 1/8 π sqrt(15/(32 - 3 π^2))
# or about 0.98355. For derivation, note that the lat weight gives an extra factor of cos(x)/2 and
# p1 = int[ (cos(x) -y - E[cos(x)-y]) * (cos(2x) - y - E[cos(2x)-y])] = pi/24
# p2 = int[ (cos(2x) - y - E[cos(x) - y])^2 cos(x)/2 ] = 16/45
# p3 = int[ (cos(x) - y - E[cos(x) - y])^2 cos(x)/2 ] = 2/3 - pi^2/16 (here E[.] denotes mean)
# and acc = p / sqrt(p2 * p3) = 1/8 π sqrt(15/(32 - 3 π^2))
acc_ = acc(pred_tensor, targ_tensor, means_tensor, lat)
assert torch.allclose(
acc_,
0.9836 * torch.ones(channels).to(device),
rtol=rtol,
atol=atol,
)
# int( cos(x)^2 - cos(2x)^2 )dx, x = 0...2*pi = pi/4
# So MSE should be pi/4 / (pi) = 0.25
error = mse(pred_tensor**2, targ_tensor**2, dim=(1, 2))
rerror = rmse(pred_tensor**2, targ_tensor**2, dim=(1, 2))
assert torch.allclose(
error,
0.25 * torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
assert torch.allclose(
rerror,
np.sqrt(0.25) * torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_climate_reductions(test_data, device, rtol: float = 1e-3, atol: float = 1e-3):
channels, lon, lat, pred_tensor_np, targ_tensor_np, time_means = test_data
pred_tensor = torch.from_numpy(pred_tensor_np).expand(channels, -1, -1).to(device)
lat = torch.from_numpy(lat).to(device)
weights = clim_red._compute_lat_weights(lat)
# Check main class
ws = gen_red.WeightedStatistic(weights)
# Check that it normalizes
assert torch.allclose(
torch.sum(ws.weights),
torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
# Check when weights are 1 dimensional
weights = weights.flatten()
wm = gen_red.WeightedMean(weights)
our_weighted_mean = wm(pred_tensor, dim=1)
np_weighted_mean = np.average(pred_tensor.cpu(), weights=weights.cpu(), axis=1)
assert torch.allclose(
our_weighted_mean,
torch.from_numpy(np_weighted_mean).to(device),
rtol=rtol,
atol=atol,
)
# Check when weights are same shape as pred_tensor
weights = weights.unsqueeze(0)
weights = weights.unsqueeze(-1)
wm = gen_red.WeightedMean(weights)
our_weighted_mean = wm(pred_tensor, dim=1)
np_weighted_mean = np.average(
pred_tensor.cpu(), weights=weights.flatten().cpu(), axis=1
)
assert torch.allclose(
our_weighted_mean,
torch.from_numpy(np_weighted_mean).to(device),
rtol=rtol,
atol=atol,
)
# Check zonal mean == our_weighted_mean
zonal_mean = clim_red.zonal_mean(pred_tensor, lat, dim=1)
assert torch.allclose(
our_weighted_mean,
zonal_mean,
rtol=rtol,
atol=atol,
)
# Test variance
# Check when weights are 1 dimensional
weights = weights.flatten()
wv = clim_red.WeightedVariance(weights)
our_weighted_var = wv(pred_tensor, dim=1)
np_weighted_mean = np.average(pred_tensor.cpu(), weights=weights.cpu(), axis=1)
np_weighted_var = np.average(
(pred_tensor.cpu() - np_weighted_mean[:, None, ...]) ** 2,
weights=weights.cpu(),
axis=1,
)
assert torch.allclose(
our_weighted_var,
torch.from_numpy(np_weighted_var).to(device),
rtol=rtol,
atol=atol,
)
# Check zonal var == our_weighted_var
zonal_var = clim_red.zonal_var(pred_tensor, lat, dim=1)
zonal_std = clim_red.zonal_var(pred_tensor, lat, dim=1, std=True)
assert torch.allclose(
our_weighted_var,
zonal_var,
rtol=rtol,
atol=atol,
)
assert torch.allclose(
torch.sqrt(our_weighted_var),
zonal_std,
rtol=rtol,
atol=atol,
)
# Check global means and vars
global_mean = clim_red.global_mean(pred_tensor, lat)
assert torch.allclose(
torch.mean(our_weighted_mean, dim=-1),
global_mean,
rtol=rtol,
atol=atol,
)
# Global variance of cos(2x) should be
# int[ (cos(2x) - E[cos(2x)])^2 * cos(2x)/2 ] dx
# = int[ (cos(2x) - 1/3)^2 * cos(2x)/2 ] dx
# = 16/45
global_var = clim_red.global_var(pred_tensor, lat)
global_std = clim_red.global_var(pred_tensor, lat, std=True)
assert torch.allclose(
16 / 45 * torch.ones([1], device=device),
global_var,
rtol=rtol,
atol=atol,
)
assert torch.allclose(
4 / 3 / np.sqrt(5) * torch.ones([1], device=device),
global_std,
rtol=rtol,
atol=atol,
)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_climate_efi(test_data, device, rtol: float = 1e-1, atol: float = 1e-1):
one = torch.ones((1, 1), dtype=torch.float32, device=device)
bin_edges = hist.linspace(-10 * one, 10 * one, 30)
bin_mids = 0.5 * bin_edges[1:] + 0.5 * bin_edges[:-1]
clim_mean = torch.zeros((1, 1), dtype=torch.float32, device=device)
clim_std = torch.ones((1, 1), dtype=torch.float32, device=device)
# Test normal pdf and cdf
_, test_counts = hist.histogram(
torch.randn(1_000_000, 1, 1, dtype=torch.float32, device=device), bins=bin_edges
)
test_pdf = test_counts / torch.trapz(test_counts, bin_mids, dim=0)
test_cdf = torch.cumsum(
test_counts / torch.sum(test_counts, dim=0, keepdims=True), dim=0
)
clim_pdf = hist.normal_pdf(clim_mean, clim_std, bin_edges, grid="right")
clim_cdf = hist.normal_cdf(clim_mean, clim_std, bin_edges, grid="right")
assert torch.allclose(
torch.trapz((clim_pdf - test_pdf) ** 2, bin_mids, dim=0),
0.0 * one,
rtol=rtol,
atol=atol,
)
assert torch.allclose(
torch.trapz((clim_cdf - test_cdf) ** 2, bin_mids, dim=0),
0.0 * one,
rtol=rtol,
atol=atol,
)
x = torch.randn((1_000_000, 1, 1), dtype=torch.float32, device=device)
_, cdf = hist.cdf(x, bins=bin_edges)
e = efi.efi(cdf, bin_edges, clim_mean, clim_std)
assert torch.allclose(e, 0.0 * one, rtol=rtol, atol=atol)
x = 2.0 + 2.0 * torch.randn((1_000_000, 1, 1), dtype=torch.float32, device=device)
_, cdf = hist.cdf(x, bins=bin_edges)
e1 = efi.efi(cdf, bin_edges, clim_mean, clim_std)
assert torch.all(torch.ge(e1, 0.0 * one))
x = 0.1 * torch.randn((1_000_000, 1, 1), dtype=torch.float32, device=device)
_, cdf = hist.cdf(x, bins=bin_edges)
e2 = efi.efi(cdf, bin_edges, clim_mean, clim_std)
assert torch.allclose(e2, 0.0 * one, rtol=rtol, atol=atol)
ne = efi.normalized_entropy(test_pdf, bin_edges, clim_mean, clim_std)
assert torch.allclose(ne, 0.0 * one, rtol=rtol, atol=atol)
|
modulus-main
|
test/metrics/test_metrics_climate.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
import torch.distributed as dist
import numpy as np
import os
import modulus.metrics.general.histogram as hist
import modulus.metrics.general.ensemble_metrics as em
import modulus.metrics.general.crps as crps
import modulus.metrics.general.wasserstein as w
import modulus.metrics.general.calibration as cal
import modulus.metrics.general.entropy as ent
from modulus.distributed.manager import DistributedManager
Tensor = torch.Tensor
def get_disagreements(inputs, bins, counts, test):
"""
Utility for testing disagreements in the bin counts.
"""
sum_counts = torch.sum(counts, dim=0)
disagreements = torch.nonzero(sum_counts != test, as_tuple=True)
print("Disagreements: ", str(disagreements))
number_of_disagree = len(disagreements[0])
for i in range(number_of_disagree):
ind = [disagreements[0][i], disagreements[1][i], disagreements[2][i]]
print("Ind", ind)
print(
"Input ",
inputs[:, disagreements[0][i], disagreements[1][i], disagreements[2][i]],
)
print(
"Bins ",
bins[:, disagreements[0][i], disagreements[1][i], disagreements[2][i]],
)
print(
"Counts",
counts[:, disagreements[0][i], disagreements[1][i], disagreements[2][i]],
)
trueh = torch.histogram(
inputs[:, disagreements[0][i], disagreements[1][i], disagreements[2][i]],
bins[:, disagreements[0][i], disagreements[1][i], disagreements[2][i]],
)
print("True counts", trueh)
@pytest.mark.parametrize("device", ["cpu", "cuda:0"])
@pytest.mark.parametrize("input_shape", [(1, 72, 144), (1, 360, 720)])
def test_histogram(device, input_shape, rtol: float = 1e-3, atol: float = 1e-3):
DistributedManager._shared_state = {}
if (device == "cuda:0") and (not dist.is_initialized()):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12345"
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
DistributedManager.setup()
manager = DistributedManager()
dist.init_process_group(
"nccl", rank=manager.rank, world_size=manager.world_size
)
x = torch.randn([10, *input_shape], device=device)
y = torch.randn([5, *input_shape], device=device)
# Test linspace
start = torch.zeros(input_shape, device=device)
end = torch.ones(input_shape, device=device)
l = hist.linspace(start, end, 10)
assert l.shape[0] == 11
l_np = np.linspace(start.cpu(), end.cpu(), 11)
assert torch.allclose(
l,
torch.from_numpy(l_np).to(device),
rtol=rtol,
atol=atol,
)
# Test histogram correctness
xx = x[:, 0, 0, 0]
xx_np = xx.cpu().numpy()
bins, counts = hist.histogram(xx, bins=10)
counts_np, bins_np = np.histogram(xx_np, bins=10)
assert torch.allclose(
bins,
torch.from_numpy(bins_np).to(device),
rtol=rtol,
atol=atol,
)
assert torch.allclose(
counts,
torch.from_numpy(counts_np).to(device),
rtol=rtol,
atol=atol,
)
# Test low and high memory bin counts
bins = l
counts = torch.zeros([10, *input_shape], device=device)
counts_low_counts = hist._low_memory_bin_reduction_counts(x, bins, counts, 10)
counts_high_counts = hist._high_memory_bin_reduction_counts(x, bins, counts, 10)
counts_low_cdf = hist._low_memory_bin_reduction_cdf(x, bins, counts, 10)
counts_high_cdf = hist._high_memory_bin_reduction_cdf(x, bins, counts, 10)
assert torch.allclose(
counts_low_counts,
counts_high_counts,
rtol=rtol,
atol=atol,
)
assert torch.allclose(
counts_low_cdf,
counts_high_cdf,
rtol=rtol,
atol=atol,
)
binsx, countsx = hist.histogram(x, bins=10, verbose=True)
assert torch.allclose(
torch.sum(countsx, dim=0),
10 * torch.ones([1], dtype=torch.int64, device=device),
rtol=rtol,
atol=atol,
), get_disagreements(
x, binsx, countsx, 10 * torch.ones([1], dtype=torch.int64, device=device)
)
binsxy, countsxy = hist.histogram(x, y, bins=5)
assert torch.allclose(
torch.sum(countsxy, dim=0),
15 * torch.ones([1], dtype=torch.int64, device=device),
rtol=rtol,
atol=atol,
), get_disagreements(
y,
binsxy,
countsxy - countsx,
5 * torch.ones([1], dtype=torch.int64, device=device),
)
binsxy, countsxy = hist.histogram(x, y, bins=binsx)
assert torch.allclose(
torch.sum(countsxy, dim=0),
15 * torch.ones([1], dtype=torch.int64, device=device),
rtol=rtol,
atol=atol,
), get_disagreements(
y, binsxy, countsxy, 15 * torch.ones([1], dtype=torch.int64, device=device)
)
H = hist.Histogram(input_shape, bins=10, device=device)
binsx, countsx = H(x)
assert torch.allclose(
torch.sum(countsx, dim=0),
10 * torch.ones([1], dtype=torch.int64, device=device),
rtol=rtol,
atol=atol,
), get_disagreements(
x, binsx, countsx, 10 * torch.ones([1], dtype=torch.int64, device=device)
)
binsxy, countsxy = H.update(y)
if binsxy.shape[0] != binsx.shape[0]:
dbins = binsx[1, 0, 0, 0] - binsx[0, 0, 0, 0]
ind = torch.isclose(
binsxy[:, 0, 0, 0], binsx[0, 0, 0, 0], rtol=0.1 * dbins, atol=1e-3
).nonzero(as_tuple=True)[0]
new_counts = countsxy[ind : ind + 10] - countsx
else:
new_counts = countsxy - countsx
assert torch.allclose(
torch.sum(countsxy, dim=0),
15 * torch.ones([1], dtype=torch.int64, device=device),
rtol=rtol,
atol=atol,
), get_disagreements(
y, binsxy, new_counts, 5 * torch.ones([1], dtype=torch.int64, device=device)
)
_, pdf = H.finalize()
_, cdf = H.finalize(cdf=True)
assert torch.allclose(
cdf[-1],
torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
assert torch.allclose(
torch.sum(pdf, dim=0),
torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
if (device == "cuda:0") and (not dist.is_initialized()):
DistributedManager.cleanup()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_crps(device, rtol: float = 1e-3, atol: float = 1e-3):
# Uses eq (5) from Gneiting et al. https://doi.org/10.1175/MWR2904.1
# crps(N(0, 1), 0.0) = 2 / sqrt(2*pi) - 1/sqrt(pi) ~= 0.23...
x = torch.randn((1_000_000, 1), device=device, dtype=torch.float32)
y = torch.zeros((1,), device=device, dtype=torch.float32)
# Test pure crps
c = crps.crps(x, y, bins=1_000)
true_crps = (np.sqrt(2) - 1.0) / np.sqrt(np.pi)
assert torch.allclose(
c,
true_crps * torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
# Test when input is numpy array
c = crps.crps(x, y.cpu().numpy(), bins=1_000)
assert torch.allclose(
c,
true_crps * torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
# Test Gaussian CRPS
mm = torch.zeros([1], dtype=torch.float32, device=device)
vv = torch.ones([1], dtype=torch.float32, device=device)
gaussian_crps = crps._crps_gaussian(mm, vv, y)
assert torch.allclose(
gaussian_crps,
true_crps * torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
gaussian_crps = crps._crps_gaussian(mm, vv, y.cpu().numpy())
assert torch.allclose(
gaussian_crps,
true_crps * torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
# Test from counts
binsx, countsx = hist.histogram(x, bins=1_000)
assert torch.allclose(
torch.sum(countsx, dim=0),
1_000_000 * torch.ones([1], dtype=torch.int64, device=device),
rtol=rtol,
atol=atol,
)
c = crps._crps_from_counts(binsx, countsx, y)
assert torch.allclose(
c,
true_crps * torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
# Counts, numpy
c = crps._crps_from_counts(binsx, countsx, y.cpu().numpy())
assert torch.allclose(
c,
true_crps * torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
# Test from cdf
binsx, cdfx = hist.cdf(x, bins=1_000)
assert torch.allclose(
cdfx[-1],
torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
c = crps._crps_from_cdf(binsx, cdfx, y)
assert torch.allclose(
c,
true_crps * torch.ones([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
assert torch.allclose(
w.wasserstein(binsx, cdfx, cdfx),
torch.zeros([1], dtype=torch.float32, device=device),
rtol=rtol,
atol=atol,
)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_means_var(device, rtol: float = 1e-3, atol: float = 1e-3):
DistributedManager._shared_state = {}
if (device == "cuda:0") and (not dist.is_initialized()):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12345"
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
DistributedManager.setup()
manager = DistributedManager()
# Test Raises Error, since process_group is not initiated
with pytest.raises(RuntimeError) as e_info:
em.EnsembleMetrics((1, 72, 144), device=device)
dist.init_process_group(
"nccl", rank=manager.rank, world_size=manager.world_size
)
ens_metric = em.EnsembleMetrics((1, 72, 144), device=device)
with pytest.raises(NotImplementedError) as e_info:
ens_metric.__call__()
with pytest.raises(NotImplementedError) as e_info:
ens_metric.update()
with pytest.raises(NotImplementedError) as e_info:
ens_metric.finalize()
x = torch.randn((10, 1, 72, 144), device=device)
y = torch.randn((5, 1, 72, 144), device=device)
M = em.Mean((1, 72, 144), device=device)
meanx = M(x)
assert torch.allclose(meanx, torch.mean(x, dim=0))
meanxy = M.update(y)
assert torch.allclose(
meanxy, torch.mean(torch.cat((x, y), dim=0), dim=0), rtol=rtol, atol=atol
)
assert torch.allclose(meanxy, M.finalize(), rtol=rtol, atol=atol)
# Test _update_mean utility
_sumxy, _n = em._update_mean(meanx * 10, 10, y, batch_dim=0)
assert torch.allclose(meanxy, _sumxy / _n, rtol=rtol, atol=atol)
# Test with flattened y
_sumxy, _n = em._update_mean(meanx * 10, 10, y[0], batch_dim=None)
_sumxy, _n = em._update_mean(_sumxy, _n, y[1:], batch_dim=0)
assert torch.allclose(meanxy, _sumxy / _n, rtol=rtol, atol=atol)
V = em.Variance((1, 72, 144), device=device)
varx = V(x)
assert torch.allclose(varx, torch.var(x, dim=0))
varxy = V.update(y)
assert torch.allclose(
varxy, torch.var(torch.cat((x, y), dim=0), dim=0), rtol=rtol, atol=atol
)
varxy = V.finalize()
assert torch.allclose(
varxy, torch.var(torch.cat((x, y), dim=0), dim=0), rtol=rtol, atol=atol
)
stdxy = V.finalize(std=True)
assert torch.allclose(
stdxy, torch.std(torch.cat((x, y), dim=0), dim=0), rtol=rtol, atol=atol
)
# Test _update_var utility function
_sumxy, _sum2xy, _n = em._update_var(10 * meanx, 9 * varx, 10, y, batch_dim=0)
assert _n == 15
assert torch.allclose(varxy, _sum2xy / (_n - 1.0), rtol=rtol, atol=atol)
# Test with flattened array
# Test with flattened y
_sumxy, _sum2xy, _n = em._update_var(10 * meanx, 9 * varx, 10, y[0], batch_dim=None)
assert _n == 11
assert torch.allclose(
_sumxy / _n,
torch.mean(torch.cat((x, y[0][None, ...]), dim=0), dim=0),
rtol=rtol,
atol=atol,
)
assert torch.allclose(
_sum2xy / (_n - 1.0),
torch.var(torch.cat((x, y[0][None, ...]), dim=0), dim=0),
rtol=rtol,
atol=atol,
)
_sumxy, _sum2xy, _n = em._update_var(_sumxy, _sum2xy, _n, y[1:], batch_dim=0)
assert torch.allclose(varxy, _sum2xy / (_n - 1.0), rtol=rtol, atol=atol)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_calibration(device, rtol: float = 1e-2, atol: float = 1e-2):
x = torch.randn((10_000, 30, 30), device=device, dtype=torch.float32)
y = torch.randn((30, 30), device=device, dtype=torch.float32)
bin_edges, bin_counts = hist.histogram(x, bins=30)
# Test getting rank from histogram
ranks = cal.find_rank(bin_edges, bin_counts, y)
assert ranks.shape == y.shape
assert torch.all(torch.le(ranks, 1.0))
assert torch.all(torch.ge(ranks, 0.0))
# Test getting rank from histogram (numpy)
y = np.random.randn(30, 30)
ranks_np = cal.find_rank(bin_edges, bin_counts, y)
assert ranks_np.shape == y.shape
assert torch.all(torch.le(ranks_np, 1.0))
assert torch.all(torch.ge(ranks_np, 0.0))
ranks = ranks.flatten()
rank_bin_edges = torch.linspace(0, 1, 11).to(device)
rank_bin_edges, rank_counts = hist.histogram(ranks, bins=rank_bin_edges)
rps = cal._rank_probability_score_from_counts(rank_bin_edges, rank_counts)
assert rps > 0.0
assert rps < 1.0
assert torch.allclose(
rps, torch.zeros([1], device=device, dtype=torch.float32), rtol=rtol, atol=atol
)
rps = cal.rank_probability_score(ranks)
assert rps > 0.0
assert rps < 1.0
assert torch.allclose(
rps, torch.zeros([1], device=device, dtype=torch.float32), rtol=rtol, atol=atol
)
num_obs = 1000
x = torch.randn((1_000, num_obs, 10, 10), device=device, dtype=torch.float32)
bin_edges, bin_counts = hist.histogram(x, bins=20)
obs = torch.randn((num_obs, 10, 10), device=device, dtype=torch.float32)
ranks = cal.find_rank(bin_edges, bin_counts, obs)
assert ranks.shape == (num_obs, 10, 10)
rps = cal.rank_probability_score(ranks)
assert rps.shape == (10, 10)
assert torch.allclose(
rps, torch.zeros([1], device=device, dtype=torch.float32), rtol=rtol, atol=atol
)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_entropy(device, rtol: float = 1e-2, atol: float = 1e-2):
one = torch.ones([1], device=device, dtype=torch.float32)
x = torch.randn((100_000, 10, 10), device=device, dtype=torch.float32)
bin_edges, bin_counts = hist.histogram(x, bins=30)
entropy = ent._entropy_from_counts(bin_counts, bin_edges, normalized=False)
assert entropy.shape == (10, 10)
assert torch.allclose(
entropy, (0.5 + 0.5 * np.log(2 * np.pi)) * one, atol=atol, rtol=rtol
)
entropy = ent._entropy_from_counts(bin_counts, bin_edges, normalized=True)
assert torch.all(torch.le(entropy, one))
assert torch.all(torch.ge(entropy, 0.0 * one))
# Test Maximum Entropy
x = torch.rand((100_000, 10, 10), device=device, dtype=torch.float32)
bin_edges, bin_counts = hist.histogram(x, bins=30)
entropy = ent._entropy_from_counts(bin_counts, bin_edges, normalized=True)
assert entropy.shape == (10, 10)
assert torch.allclose(entropy, one, rtol=rtol, atol=atol)
# Test Relative Entropy
x = torch.randn((100_000, 10, 10), device=device, dtype=torch.float32)
bin_edges, x_bin_counts = hist.histogram(x, bins=30)
x1 = torch.randn((100_000, 10, 10), device=device, dtype=torch.float32)
_, x1_bin_counts = hist.histogram(x, bins=bin_edges)
x2 = 0.1 * torch.randn((100_000, 10, 10), device=device, dtype=torch.float32)
_, x2_bin_counts = hist.histogram(x, bins=bin_edges)
rel_ent_1 = ent._relative_entropy_from_counts(
x_bin_counts, x1_bin_counts, bin_edges
)
rel_ent_2 = ent._relative_entropy_from_counts(
x_bin_counts, x2_bin_counts, bin_edges
)
assert torch.all(torch.le(rel_ent_1, rel_ent_2))
assert torch.allclose(rel_ent_1, 0.0 * one)
assert torch.all(torch.ge(rel_ent_2, 0.0 * one))
|
modulus-main
|
test/metrics/test_metrics_general.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from modulus.distributed import DistributedManager
from modulus.distributed import gather_loss
# TODO: Need to figure out how to test parallel set up
def test_gather_loss():
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12345"
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
# Reset class state
DistributedManager._shared_state = {}
DistributedManager.initialize()
manager = DistributedManager()
assert manager.is_initialized()
loss = gather_loss(1.0, dst_rank=0, mean=True)
assert loss == 1.0
del os.environ["RANK"]
del os.environ["WORLD_SIZE"]
|
modulus-main
|
test/distributed/test_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from modulus.distributed import DistributedManager
# TODO: Need to figure out how to test parallel set up
def test_manager():
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12345"
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
# Reset class state
DistributedManager._shared_state = {}
DistributedManager.initialize()
print(DistributedManager())
manager = DistributedManager()
assert manager.is_initialized()
assert not manager.distributed, "Manager should be in serial mode"
assert manager.rank == 0
assert manager.world_size == 1
assert manager.local_rank == 0
del os.environ["RANK"]
del os.environ["WORLD_SIZE"]
def test_manager_slurm():
# Test distributed manager with Slurm variables
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12345"
os.environ["SLURM_PROCID"] = "0"
os.environ["SLURM_NPROCS"] = "1"
os.environ["SLURM_LOCALID"] = "0"
os.environ["SLURM_LAUNCH_NODE_IPADDR"] = "localhost"
# Reset class state
DistributedManager._shared_state = {}
DistributedManager.initialize()
manager = DistributedManager()
assert manager.is_initialized()
assert manager.rank == 0
assert manager.world_size == 1
assert manager.local_rank == 0
DistributedManager._shared_state = {}
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_NPROCS"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_LAUNCH_NODE_IPADDR"]
def test_manager_ompi():
# Test distributed manager with openMPI variables
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12345"
os.environ["OMPI_COMM_WORLD_RANK"] = "0"
os.environ["OMPI_COMM_WORLD_SIZE"] = "1"
os.environ["OMPI_COMM_WORLD_LOCAL_RANK"] = "0"
# Reset class state
DistributedManager._shared_state = {}
DistributedManager.initialize()
manager = DistributedManager()
assert manager.is_initialized()
assert manager.rank == 0
assert manager.world_size == 1
assert manager.local_rank == 0
DistributedManager._shared_state = {}
del os.environ["OMPI_COMM_WORLD_RANK"]
del os.environ["OMPI_COMM_WORLD_SIZE"]
del os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
def test_manager_specified_initialization():
# PyTorch env vars
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12345"
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
os.environ["LOCAL_RANK"] = "0"
# SLURM env vars
os.environ["SLURM_PROCID"] = "0"
os.environ["SLURM_NPROCS"] = "1"
os.environ["SLURM_LOCALID"] = "0"
os.environ["SLURM_LAUNCH_NODE_IPADDR"] = "localhost"
# OpenMPI env vars
os.environ["OMPI_COMM_WORLD_RANK"] = "0"
os.environ["OMPI_COMM_WORLD_SIZE"] = "1"
os.environ["OMPI_COMM_WORLD_LOCAL_RANK"] = "0"
# Test SLURM initialization
os.environ["MODULUS_DISTRIBUTED_INITIALIZATION_METHOD"] = "SLURM"
DistributedManager._shared_state = {}
DistributedManager.initialize()
manager = DistributedManager()
assert manager.is_initialized()
assert manager._initialization_method == "slurm"
assert not manager.distributed, "Manager should be in serial mode"
assert manager.rank == 0
assert manager.world_size == 1
assert manager.local_rank == 0
# Test OpenMPI initialization
os.environ["MODULUS_DISTRIBUTED_INITIALIZATION_METHOD"] = "OPENMPI"
DistributedManager._shared_state = {}
DistributedManager.initialize()
manager = DistributedManager()
assert manager.is_initialized()
assert manager._initialization_method == "openmpi"
assert not manager.distributed, "Manager should be in serial mode"
assert manager.rank == 0
assert manager.world_size == 1
assert manager.local_rank == 0
del os.environ["RANK"]
del os.environ["WORLD_SIZE"]
def test_manager_singleton():
# Test distributed manager singleton functions as expected
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "45678"
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
# Reset class state
DistributedManager._shared_state = {}
DistributedManager.initialize()
manager_1 = DistributedManager()
manager_1.broadcast_buffers = True
manager_1.find_unused_parameters = True
manager_2 = DistributedManager()
# Compare attributes
assert manager_1.rank == manager_2.rank
assert manager_1.world_size == manager_2.world_size
assert manager_1.local_rank == manager_2.local_rank
assert manager_1.device == manager_2.device
assert manager_1.distributed == manager_2.distributed
assert manager_1.cuda == manager_2.cuda
assert manager_1.group_names == manager_2.group_names
assert manager_1.group() == manager_2.group()
assert manager_1.group_size() == manager_2.group_size()
assert manager_1.group_rank() == manager_2.group_rank()
assert manager_1.group_name() == manager_2.group_name()
assert manager_1.broadcast_buffers == manager_2.broadcast_buffers
assert manager_1.find_unused_parameters == manager_2.find_unused_parameters
DistributedManager._shared_state = {}
def run_process_groups(rank, model_parallel_size, verbose):
os.environ["RANK"] = f"{rank}"
os.environ["WORLD_SIZE"] = f"{model_parallel_size}"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(12355)
DistributedManager._shared_state = {}
DistributedManager.initialize()
# Create model parallel process group
DistributedManager.create_process_subgroup(
"model_parallel", int(model_parallel_size), verbose=verbose
)
# Create data parallel process group for DDP allreduce
DistributedManager.create_orthogonal_process_group(
"data_parallel", "model_parallel", verbose=verbose
)
manager = DistributedManager()
assert manager.rank == rank
assert manager.rank == manager.group_rank(name="model_parallel")
assert 0 == manager.group_rank(name="data_parallel")
@pytest.mark.multigpu
def test_process_groups():
num_gpus = torch.cuda.device_count()
assert num_gpus == 2, "Not enough GPUs available for test"
model_parallel_size = 2
verbose = False # Change to True for debug
torch.multiprocessing.spawn(
run_process_groups,
args=(model_parallel_size, verbose),
nprocs=model_parallel_size,
start_method="spawn",
)
if __name__ == "__main__":
pytest.main([__file__])
|
modulus-main
|
test/distributed/test_manager.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from modulus.distributed import DistributedManager
from modulus.distributed.autograd import (
all_gather_v,
gather_v,
scatter_v,
indexed_all_to_all_v,
)
def run_test_scatter_v(rank, world_size):
os.environ["RANK"] = f"{rank}"
os.environ["WORLD_SIZE"] = f"{world_size}"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(12355)
DistributedManager.initialize()
manager = DistributedManager()
assert manager.is_initialized()
tensor_dim = 4
sizes = [r + 2 for r in range(world_size)]
tensor = torch.arange(world_size, device=f"cuda:{rank}", dtype=torch.float32) + 1
tensor = tensor.view(-1, 1).expand(-1, tensor_dim).contiguous()
tensor = tensor.repeat_interleave(
repeats=torch.tensor(sizes, device=f"cuda:{rank}"), dim=0
)
tensor.requires_grad_(True)
scattered_tensor = scatter_v(tensor, sizes, dim=0, src=0, group=None)
expected_tensor = torch.ones(
(sizes[rank], tensor_dim), device=f"cuda:{rank}", dtype=torch.float32
) * (rank + 1)
assert torch.allclose(expected_tensor, scattered_tensor)
grad_out = torch.ones_like(scattered_tensor) * (-1)
scattered_tensor.backward(gradient=grad_out)
if rank == 0:
expected_grad = torch.ones_like(tensor) * (-1)
assert torch.allclose(tensor.grad, expected_grad)
del os.environ["RANK"]
del os.environ["WORLD_SIZE"]
del os.environ["MASTER_ADDR"]
del os.environ["MASTER_PORT"]
DistributedManager.cleanup()
def run_test_gather_v(rank, world_size):
os.environ["RANK"] = f"{rank}"
os.environ["WORLD_SIZE"] = f"{world_size}"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(12355)
DistributedManager.initialize()
manager = DistributedManager()
assert manager.is_initialized()
tensor_dim = 4
tensor = (rank + 1) * torch.ones(
(rank + 2, tensor_dim), device=f"cuda:{rank}", dtype=torch.float32
)
tensor.requires_grad_(True)
sizes = [r + 2 for r in range(world_size)]
gathered_tensor = gather_v(tensor, sizes, dim=0, dst=0, group=None)
if rank == 0:
expected_tensor = (
torch.arange(world_size, device="cuda:0", dtype=torch.float32) + 1
)
expected_tensor = (
expected_tensor.view(-1, 1).expand(-1, tensor_dim).contiguous()
)
expected_tensor = expected_tensor.repeat_interleave(
repeats=torch.tensor(sizes, device="cuda:0"), dim=0
)
assert torch.allclose(expected_tensor, gathered_tensor)
grad_out = torch.ones_like(gathered_tensor) * (-1)
gathered_tensor.backward(gradient=grad_out)
expected_grad = torch.ones_like(tensor) * (-1)
assert torch.allclose(tensor.grad, expected_grad)
del os.environ["RANK"]
del os.environ["WORLD_SIZE"]
del os.environ["MASTER_ADDR"]
del os.environ["MASTER_PORT"]
DistributedManager.cleanup()
def run_test_all_gather_v(rank, world_size):
os.environ["RANK"] = f"{rank}"
os.environ["WORLD_SIZE"] = f"{world_size}"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(12355)
DistributedManager.initialize()
manager = DistributedManager()
assert manager.is_initialized()
tensor_dim = 4
tensor = (rank + 1) * torch.ones(
(rank + 2, tensor_dim), device=f"cuda:{rank}", dtype=torch.float32
)
tensor.requires_grad_(True)
sizes = [r + 2 for r in range(world_size)]
gathered_tensor = all_gather_v(tensor, sizes, dim=0, group=None)
expected_tensor = (
torch.arange(world_size, device=f"cuda:{rank}", dtype=torch.float32) + 1
)
expected_tensor = expected_tensor.view(-1, 1).expand(-1, tensor_dim).contiguous()
expected_tensor = expected_tensor.repeat_interleave(
repeats=torch.tensor(sizes, device=f"cuda:{rank}"), dim=0
)
assert torch.allclose(expected_tensor, gathered_tensor)
grad_out = torch.ones_like(gathered_tensor) * (-1)
gathered_tensor.backward(gradient=grad_out)
expected_grad = torch.ones_like(tensor) * (-1) * world_size
assert torch.allclose(tensor.grad, expected_grad)
del os.environ["RANK"]
del os.environ["WORLD_SIZE"]
del os.environ["MASTER_ADDR"]
del os.environ["MASTER_PORT"]
DistributedManager.cleanup()
def run_test_indexed_all_to_all_v(rank, world_size):
os.environ["RANK"] = f"{rank}"
os.environ["WORLD_SIZE"] = f"{world_size}"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(12355)
DistributedManager.initialize()
manager = DistributedManager()
assert manager.is_initialized()
# this test case is not ideal as it quite similar to the non-indexed case
# however, it is a first start to test correctness in general
tensor_dim = 4
tensor = torch.arange(1, world_size + 1, device=f"cuda:{rank}", dtype=torch.float32)
tensor = tensor.view(-1, 1).expand(-1, tensor_dim).contiguous()
tensor = tensor.repeat_interleave(repeats=rank + 1, dim=0)
tensor.requires_grad_(True)
sizes = [[r + 1 for _ in range(world_size)] for r in range(world_size)]
indices = [
torch.nonzero(tensor[:, 0] == (r + 1)).view(-1) for r in range(world_size)
]
gathered_tensor = indexed_all_to_all_v(
tensor, indices, sizes, dim=0, use_fp32=True, group=None
)
expected_size_along_dim = sum([sizes[r][rank] for r in range(world_size)])
expected_tensor = torch.ones(
(expected_size_along_dim, tensor_dim),
device=f"cuda:{rank}",
dtype=torch.float32,
) * (rank + 1)
assert torch.allclose(expected_tensor, gathered_tensor)
grad_out = torch.ones_like(gathered_tensor) * (-1)
gathered_tensor.backward(gradient=grad_out)
expected_grad = torch.ones_like(tensor) * (-1)
assert torch.allclose(tensor.grad, expected_grad)
del os.environ["RANK"]
del os.environ["WORLD_SIZE"]
del os.environ["MASTER_ADDR"]
del os.environ["MASTER_PORT"]
DistributedManager.cleanup()
def run_test_autograd_prim(func):
num_gpus = torch.cuda.device_count()
assert num_gpus >= 2, "Not enough GPUs available for test"
world_size = 2
torch.multiprocessing.spawn(
func,
args=(world_size,),
nprocs=world_size,
start_method="spawn",
)
@pytest.mark.multigpu
def test_scatter_v():
run_test_autograd_prim(run_test_scatter_v)
@pytest.mark.multigpu
def test_gather_v():
run_test_autograd_prim(run_test_gather_v)
@pytest.mark.multigpu
def test_all_gather_v():
run_test_autograd_prim(run_test_all_gather_v)
@pytest.mark.multigpu
def test_indexed_all_to_all_v_v():
run_test_autograd_prim(run_test_indexed_all_to_all_v)
if __name__ == "__main__":
pytest.main([__file__])
|
modulus-main
|
test/distributed/test_autograd.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
import torch
import torch.nn as nn
try:
import onnxruntime as ort
except:
ort = None
from pathlib import Path
from modulus.models.mlp import FullyConnected
from modulus.deploy.onnx import export_to_onnx_stream, run_onnx_inference
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
# TODO(akamenev): remove once the bug below is fixed.
# Version "1.14.0" is the custom local build where the bug is fixed.
def check_ort_version():
if ort is None:
return pytest.mark.skipif(
True,
reason="Proper ONNX runtime is not installed. 'pip install onnxruntime onnxruntime_gpu'",
)
elif ort.__version__ != "1.15.1":
return pytest.mark.skipif(
True,
reason="Must install custom ORT 1.15.1. Other versions do not work \
due to bug in IRFFT: https://github.com/microsoft/onnxruntime/issues/13236",
)
else:
return pytest.mark.skipif(False, reason="")
@pytest.fixture(params=["modulus", "pytorch"])
def model(request) -> str:
# Create fully-connected NN to test exporting
if request.param == "modulus":
# Modulus version with meta data
model = FullyConnected(
in_features=32,
out_features=8,
num_layers=1,
layer_size=8,
)
else:
# PyTorch version
model = nn.Sequential(
nn.Linear(32, 8),
nn.ReLU(),
nn.Linear(8, 8),
)
return model
@check_ort_version()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_onnx_bytestream(device, model, rtol: float = 1e-3, atol: float = 1e-3):
"""Test Modulus' export onnx stream function is consistent with file saving"""
model = model.to(device)
bsize = 8
invar = torch.randn(bsize, 32).to(device)
outvar = model(invar)
onnx_name = "model.onnx"
# Run ONNX using standard export to file approach
model = model.eval().cpu()
onnx_in_args = invar.cpu()
torch.onnx.export(
model.cpu(),
onnx_in_args,
onnx_name,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
opset_version=15,
verbose=False,
)
outvar_ort_file = run_onnx_inference(onnx_name, invar, device=device)
assert len(outvar_ort_file) == 1
outvar_ort_file = torch.Tensor(outvar_ort_file[0]).to(device)
# Run ONNX using built in stream util in Modulus
onnx_stream = export_to_onnx_stream(model, invar, verbose=False)
outvar_ort = run_onnx_inference(onnx_stream, invar, device=device)
assert len(outvar_ort) == 1
outvar_ort = torch.Tensor(outvar_ort[0]).to(device)
# Delete onnx model file
Path(onnx_name).unlink(missing_ok=False)
assert torch.allclose(outvar, outvar_ort_file, rtol, atol)
assert torch.allclose(outvar, outvar_ort, rtol, atol)
|
modulus-main
|
test/deploy/test_onnx_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
import torch
import torch.fft
import torch.nn as nn
import torch.onnx
import torch.onnx.utils
import modulus.models.layers.fft as fft
try:
import onnxruntime as ort
except:
ort = None
from typing import Tuple
from modulus.deploy.onnx import export_to_onnx_stream, run_onnx_inference
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
# TODO(akamenev): remove once the bug below is fixed.
# Version "1.14.0" is the custom local build where the bug is fixed.
def check_ort_version():
if ort is None:
return pytest.mark.skipif(
True,
reason="Proper ONNX runtime is not installed. 'pip install onnxruntime onnxruntime_gpu'",
)
elif ort.__version__ != "1.15.1":
return pytest.mark.skipif(
True,
reason="Must install custom ORT 1.15.1. Other versions do not work \
due to bug in IRFFT: https://github.com/microsoft/onnxruntime/issues/13236",
)
else:
return pytest.mark.skipif(False, reason="")
@pytest.fixture
def test_data() -> Tensor:
# Simple input with 3 signals which contain non-zero DC, real and imaginary parts.
# fmt: off
x = torch.tensor([
[1.0, 0.0, -1.0, 0.0],
[2.0, 0.0, 2.0, 0.0],
[0.0, -1.0, 0.0, 1.0]
])
# fmt: on
# Return as NHW.
return x.unsqueeze(0)
@pytest.fixture(params=[1, 2])
def test_data_2(request, test_data: Tensor) -> Tensor:
num_c = request.param
# To NHWC with identical channels.
return test_data.tile(1, num_c, 1, 1).permute(0, 2, 3, 1)
@pytest.fixture(params=["forward", "backward", "ortho"])
def norm(request) -> str:
return request.param
@pytest.mark.parametrize("dft_dim", [-1, 1])
def test_rfft_onnx_op(
test_data: Tensor, norm: str, dft_dim: int, rtol: float = 1e-5, atol: float = 1e-5
):
"""Test RFFT onnx forward operation is consistent with torch rfft"""
# Swap last dim with requested, if needed.
x = test_data.transpose(-1, dft_dim)
y_expected = torch.fft.rfft(x, dim=dft_dim, norm=norm)
y_actual = fft.rfft(x, dim=dft_dim, norm=norm)
assert torch.allclose(y_actual, y_expected, rtol, atol)
@check_ort_version()
@pytest.mark.parametrize("dft_dim", [-1, 1])
def test_rfft_ort_op(
test_data: Tensor, norm: str, dft_dim: int, rtol: float = 1e-5, atol: float = 1e-5
):
"""Test RFFT onnx runtime operation is consistent with torch rfft"""
x = test_data.transpose(-1, dft_dim)
class CustomRfft(nn.Module):
def forward(self, x):
return fft.rfft(x, dim=dft_dim, norm=norm)
model = CustomRfft()
output = model(x)
onnx_model = export_to_onnx_stream(model, x)
output_ort = run_onnx_inference(onnx_model, (x,))
assert len(output_ort) == 1
output_onnx = torch.Tensor(output_ort[0])
output_onnx = torch.view_as_complex(output_onnx)
assert torch.allclose(output, output_onnx, rtol, atol)
@pytest.mark.parametrize("dft_dim", [(-2, -1), (1, 2)])
def test_rfft2_onnx_op(
test_data_2: Tensor,
norm: str,
dft_dim: Tuple[int],
rtol: float = 1e-5,
atol: float = 1e-5,
):
"""Test 2D RFFT onnx forward operation is consistent with torch rfft2"""
x = test_data_2
# Swap dims from right to left.
x = x.transpose(2, dft_dim[-1]).transpose(1, dft_dim[-2])
y_expected = torch.fft.rfft2(x, dim=dft_dim, norm=norm)
y_actual = fft.rfft2(x, dim=dft_dim, norm=norm)
assert torch.allclose(y_actual, y_expected, rtol, atol)
@check_ort_version()
@pytest.mark.parametrize("dft_dim", [(-2, -1), (1, 2)])
def test_rfft2_ort_op(
test_data_2: Tensor,
norm: str,
dft_dim: Tuple[int],
rtol: float = 1e-5,
atol: float = 1e-5,
):
"""Test 2D RFFT onnx runtime operation is consistent with torch rfft2"""
x = test_data_2
x = x.transpose(2, dft_dim[-1]).transpose(1, dft_dim[-2])
class CustomRfft2(nn.Module):
def forward(self, x):
return fft.rfft2(x, dim=dft_dim, norm=norm)
model = CustomRfft2()
output = model(x)
onnx_model = export_to_onnx_stream(model, x)
output_ort = run_onnx_inference(onnx_model, (x,))
assert len(output_ort) == 1
output_onnx = torch.Tensor(output_ort[0])
output_onnx = torch.view_as_complex(output_onnx)
assert torch.allclose(output, output_onnx, rtol, atol)
@pytest.mark.parametrize("dft_dim", [-1, 1])
def test_irfft_onnx_op(
test_data: Tensor, norm: str, dft_dim: int, rtol: float = 1e-5, atol: float = 1e-5
):
"""Test IRFFT onnx forward operation is consistent with torch irfft"""
x = test_data.transpose(-1, dft_dim)
y = fft.rfft(x, dim=dft_dim, norm=norm)
x_actual = fft.irfft(y, dim=dft_dim, norm=norm)
assert torch.allclose(x_actual, x, rtol, atol)
@check_ort_version()
@pytest.mark.parametrize("dft_dim", [-1, 1])
def test_irfft_ort_op(
test_data: Tensor, norm: str, dft_dim: int, rtol: float = 1e-5, atol: float = 1e-5
):
"""Test IRFFT onnx runtime operation is consistent with torch irfft"""
x = test_data.transpose(-1, dft_dim)
x = fft.rfft(x, dim=dft_dim, norm=norm)
class CustomIrfft(nn.Module):
def forward(self, y):
return fft.irfft(y, dim=dft_dim, norm=norm)
model = CustomIrfft()
output = model(x)
x0 = torch.view_as_real(x)
onnx_model = export_to_onnx_stream(model, x0)
output_ort = run_onnx_inference(onnx_model, (x0,))
assert len(output_ort) == 1
output_onnx = torch.Tensor(output_ort[0])
assert torch.allclose(output, output_onnx, rtol, atol)
@pytest.mark.parametrize("dft_dim", [(-2, -1), (1, 2)])
def test_irfft2_onnx_op(
test_data_2: Tensor,
norm: str,
dft_dim: Tuple[int],
rtol: float = 1e-5,
atol: float = 1e-5,
):
"""Test 2D IRFFT onnx forward operation is consistent with torch irfft2"""
x = test_data_2
x = x.transpose(2, dft_dim[-1]).transpose(1, dft_dim[-2])
y = fft.rfft2(x, dim=dft_dim, norm=norm)
x_actual = fft.irfft2(y, dim=dft_dim, norm=norm)
assert torch.allclose(x_actual, x, rtol, atol)
@check_ort_version()
@pytest.mark.parametrize("dft_dim", [(-2, -1), (1, 2)])
def test_irfft2_ort_op(
test_data_2: Tensor,
norm: str,
dft_dim: Tuple[int],
rtol: float = 1e-5,
atol: float = 1e-5,
):
"""Test 2D IRFFT onnx runtime operation is consistent with torch irfft2"""
x = test_data_2
x = x.transpose(2, dft_dim[-1]).transpose(1, dft_dim[-2])
x = fft.rfft2(x, dim=dft_dim, norm=norm)
class CustomIrfft(nn.Module):
def forward(self, y):
return fft.irfft2(y, dim=dft_dim, norm=norm)
model = CustomIrfft()
output = model(x)
x0 = torch.view_as_real(x)
onnx_model = export_to_onnx_stream(model, x0)
output_ort = run_onnx_inference(onnx_model, (x0,))
assert len(output_ort) == 1
output_onnx = torch.Tensor(output_ort[0])
assert torch.allclose(output, output_onnx, rtol, atol)
@check_ort_version()
def test_roundtrip_ort(test_data_2: Tensor, rtol: float = 1e-5, atol: float = 1e-5):
"""Tests model with rfft2 and irfft2 combined in ORT session"""
x = test_data_2
class Roundtrip(nn.Module):
def forward(self, x):
y = fft.rfft2(x, dim=(1, 2), norm="backward")
return fft.irfft2(y, dim=(1, 2), norm="backward")
model = Roundtrip()
output = model(x)
onnx_model = export_to_onnx_stream(model, x)
output_ort = run_onnx_inference(onnx_model, (x,))
assert len(output_ort) == 1
output_onnx = torch.Tensor(output_ort[0])
assert torch.allclose(output, output_onnx, rtol, atol)
@check_ort_version()
def test_complex_ort_op(test_data: Tensor, rtol: float = 1e-5, atol: float = 1e-5):
"""Test ONNX compatible complex operations"""
x = test_data
class ComplexOps(nn.Module):
def forward(self, x):
res = fft.view_as_complex(x)
return fft.real(res), fft.imag(res)
# Stack along last dimension to get the tensor that mimics complex numbers.
x_cpl = torch.stack((x, 2 * x), dim=-1)
# Convert to PyTorch Complex dtype to get expected values.
output = torch.view_as_complex(x_cpl)
# Export to ONNX and run inference.
model = ComplexOps()
onnx_model = export_to_onnx_stream(model, x_cpl)
ort_outputs = run_onnx_inference(onnx_model, (x_cpl,))
assert len(ort_outputs) == 2
output_onnx_real = torch.Tensor(ort_outputs[0])
output_onnx_imag = torch.Tensor(ort_outputs[1])
assert torch.allclose(output.real, output_onnx_real, rtol, atol)
assert torch.allclose(output.imag, output_onnx_imag, rtol, atol)
@check_ort_version()
def test_roundtrip_ort(test_data_2: Tensor, rtol: float = 1e-5, atol: float = 1e-5):
"""Tests model with rfft2 and irfft2 combined in ORT session"""
x = test_data_2
class Roundtrip(nn.Module):
def forward(self, x):
y = fft.rfft2(x, dim=(1, 2), norm="backward")
return fft.irfft2(y, dim=(1, 2), norm="backward")
model = Roundtrip()
output = model(x)
onnx_model = export_to_onnx_stream(model, x)
output_ort = run_onnx_inference(onnx_model, (x,))
assert len(output_ort) == 1
output_onnx = torch.Tensor(output_ort[0])
assert torch.allclose(output, output_onnx, rtol, atol)
def test_onnx_rfft_checks(test_data: Tensor):
"""ONNX rfft error checks work, padding is not supported for ONNX RFFT"""
# Should test multiple dims, but this is good enough
itest_data = torch.stack([test_data, test_data], dim=-1)
try:
fft._rfft_onnx(test_data, [-1, -1], dim=(-2, -1), norm="backward")
raise AssertionError("ONNX RFFT should error outside ORT")
except AssertionError:
pass
try:
fft._irfft_onnx(itest_data, [-1, -1], dim=(-2, -1), norm="backward")
raise AssertionError("ONNX IRFFT should error outside ORT")
except AssertionError:
pass
try:
fft._rfft_onnx(test_data, [-1, -1, -1], dim=(-2, -1), norm="backward")
raise AssertionError(
"ONNX RFFT should error if user gives size larger than RFFT dim"
)
except AssertionError:
pass
try:
fft._rfft_onnx(test_data, [16, 16], dim=(-2, -1), norm="backward")
raise AssertionError("ONNX RFFT should RuntimeError if user attempts padding")
except RuntimeError:
pass
try:
fft._irfft_onnx(itest_data, [16, None], dim=(-2, -1), norm="backward")
raise AssertionError("ONNX IRFFT should RuntimeError if user attempts padding")
except RuntimeError:
pass
|
modulus-main
|
test/deploy/test_onnx_fft.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
from pathlib import Path
from modulus.utils import filesystem
def calculate_checksum(file_path):
sha256 = hashlib.sha256()
with open(file_path, "rb") as f:
while True:
data = f.read(8192)
if not data:
break
sha256.update(data)
calculated_checksum = sha256.hexdigest()
return calculated_checksum
def test_package(tmp_path: Path):
string = "hello"
afile = tmp_path / "a.txt"
afile.write_text(string)
path = "file://" + tmp_path.as_posix()
package = filesystem.Package(path, seperator="/")
path = package.get("a.txt")
with open(path) as f:
ans = f.read()
assert ans == string
def test_http_package():
test_url = "http://raw.githubusercontent.com/NVIDIA/modulus/main/docs/img"
package = filesystem.Package(test_url, seperator="/")
path = package.get("modulus-pipes.jpg")
known_checksum = "e075b2836d03f7971f754354807dcdca51a7875c8297cb161557946736d1f7fc"
assert calculate_checksum(path) == known_checksum
|
modulus-main
|
test/utils/test_filesystem.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
import torch
import torch.nn as nn
from modulus.models.mlp import FullyConnected
from modulus.utils import StaticCaptureTraining, StaticCaptureEvaluateNoGrad
from modulus.utils.capture import _StaticCapture
try:
from apex import optimizers
except:
pass
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
@pytest.fixture
def model():
return FullyConnected(2, 64, 2)
@pytest.fixture
def model2():
return FullyConnected(2, 32, 2)
@pytest.fixture
def logger():
logger = logging.getLogger("launch")
formatter = logging.Formatter(
"[%(asctime)s - %(name)s - %(levelname)s] %(message)s", datefmt="%H:%M:%S"
)
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(formatter)
streamhandler.setLevel(logging.INFO)
logger.addHandler(streamhandler)
return logger
@pytest.mark.parametrize(
"optim_type, device",
[("pytorch", "cuda:0"), ("apex", "cuda:0"), ("pytorch", "cpu")],
)
@pytest.mark.parametrize("use_graphs", [True, False])
@pytest.mark.parametrize(
"use_amp, amp_type",
[(True, torch.float16), (True, torch.bfloat16), (False, torch.float16)],
)
def test_capture_training(
model,
logger,
device,
optim_type,
use_graphs,
use_amp,
amp_type,
):
model = model.to(device)
input = torch.rand(8, 2).to(device)
output = torch.rand(8, 2).to(device)
# Set up optimizer
if optim_type == "pytorch":
optim = torch.optim.Adam(model.parameters(), lr=0.001)
else:
if optimizers:
optim = optimizers.FusedAdam(model.parameters(), lr=0.001)
else:
logger.warn("Apex not installed, skipping fused Adam tests")
return
# Create training step function with optimization wrapper
@StaticCaptureTraining(
model=model,
optim=optim,
logger=logger,
use_graphs=use_graphs,
use_amp=use_amp,
cuda_graph_warmup=1,
amp_type=amp_type,
)
def training_step(invar, outvar):
predvar = model(invar)
loss = torch.sum(torch.pow(predvar - outvar, 2))
return loss
# Sample training loop
for i in range(3):
loss = training_step(input, output)
input.copy_(torch.rand(8, 2).to(device))
assert loss > 0, "MSE loss should always be larger than zero"
# Test control via meta data
model.meta.cuda_graphs = use_graphs
model.meta.amp_gpu = use_amp
model.meta.amp_cpu = use_amp
# Create training step function with optimization wrapper
@StaticCaptureTraining(
model=model,
optim=optim,
logger=logger,
cuda_graph_warmup=1,
)
def training_step(invar, outvar):
predvar = model(invar)
loss = torch.sum(torch.pow(predvar - outvar, 2))
return loss
# Sample training loop
for i in range(3):
loss = training_step(input, output)
input.copy_(torch.rand(8, 2).to(device))
assert loss > 0, "MSE loss should always be larger than zero"
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("use_graphs", [True, False])
@pytest.mark.parametrize(
"use_amp, amp_type",
[(True, torch.float16), (True, torch.bfloat16), (False, torch.float16)],
)
def test_capture_evaluate(
model,
logger,
device,
use_graphs,
use_amp,
amp_type,
):
model = model.to(device)
input = torch.rand(8, 2).to(device)
# Create eval step function with optimization wrapper
@StaticCaptureEvaluateNoGrad(
model=model,
logger=logger,
use_graphs=use_graphs,
use_amp=use_amp,
cuda_graph_warmup=1,
amp_type=amp_type,
)
def eval_step(invar):
predvar = model(invar)
return predvar
# Sample eval loop
for i in range(3):
predvar = eval_step(input)
input.copy_(torch.rand(8, 2).to(device))
assert predvar.shape == torch.Size((8, 2))
def test_capture_errors():
# Test fail cases when capture should error
model = torch.nn.Sequential(
nn.Linear(2, 16),
nn.ReLU(),
nn.Linear(16, 2),
)
try:
StaticCaptureEvaluateNoGrad(model=model)
raise AssertionError(
"Static capture should error if model is not Modulus.Module"
)
except ValueError:
pass
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_capture_scaler_checkpointing(model, model2, device):
# Testing the class variables of AMP grad scaler for checkpointing
#
model = model.to(device)
model2 = model2.to(device)
optim = torch.optim.Adam(model.parameters(), lr=0.001)
optim2 = torch.optim.Adam(model2.parameters(), lr=0.001)
_StaticCapture.reset_state()
# Test if it can ignore invalid scalar dicts
capture1 = StaticCaptureTraining(model=model, optim=optim)
capture2 = StaticCaptureTraining(model=model2, optim=optim2)
state_dict = _StaticCapture.state_dict().copy()
# Reset state
del capture1
del capture2
_StaticCapture.reset_state()
# Load state dict
_StaticCapture.load_state_dict(state_dict)
capture1 = StaticCaptureTraining(model=model, optim=optim)
capture2 = StaticCaptureTraining(model=model2, optim=optim2)
assert state_dict == _StaticCapture.state_dict()
@pytest.mark.parametrize("device", ["cuda:0"])
def test_capture_scaler_checkpointing_ordering(model, model2, device):
# Testing the class variables of AMP grad scaler for checkpointing
#
model = model.to(device)
model2 = model2.to(device)
optim = torch.optim.Adam(model.parameters(), lr=0.001)
optim2 = torch.optim.Adam(model2.parameters(), lr=0.001)
_StaticCapture.reset_state()
# Hard code some non-default attributes for testing
capture1a = StaticCaptureTraining(model=model, optim=optim, label="capture1")
capture1a.scaler._init_scale = 2.0
capture1a.scaler._growth_factor = 1.0
capture2a = StaticCaptureTraining(model=model2, optim=optim2, label="capture2")
capture2a.scaler._init_scale = 3.0
capture2a.scaler._growth_factor = 4.0
state_dict = _StaticCapture.state_dict().copy()
# Reset state
_StaticCapture.reset_state()
# Create new captures and make sure they are not the same
# Change instantiation order
capture2b = StaticCaptureTraining(model=model2, optim=optim2, label="capture2")
capture1b = StaticCaptureTraining(model=model, optim=optim, label="capture1")
assert not capture1a.scaler.state_dict() == capture1b.scaler.state_dict()
assert not capture2a.scaler.state_dict() == capture2b.scaler.state_dict()
# Load state dict
_StaticCapture.load_state_dict(state_dict)
# Compar
assert capture1a.scaler.state_dict() == capture1b.scaler.state_dict()
assert capture2a.scaler.state_dict() == capture2b.scaler.state_dict()
|
modulus-main
|
test/utils/test_capture.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from io import StringIO
from contextlib import redirect_stdout
from modulus.utils.sfno.logging_utils import config_logger, disable_logging
def test_disable_logging():
log_buffer = StringIO()
with redirect_stdout(log_buffer):
config_logger()
with disable_logging():
logging.info("This message should not appear")
log_content = log_buffer.getvalue()
assert (
"This message should not appear" not in log_content
), "Disabled log message found in log_content"
|
modulus-main
|
test/utils/sfno/test_logging.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import numpy as np
from modulus.utils.sfno.img_utils import PeriodicPad2d, reshape_fields
def test_PeriodicPad2d():
pad_width = 1
pad = PeriodicPad2d(pad_width)
# Create a tensor with shape (batch_size, channels, height, width) = (1, 1, 3, 3)
tensor = torch.Tensor([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]])
padded_tensor = pad(tensor)
# Check if padding is correctly applied
assert padded_tensor.shape == torch.Size(
[1, 1, 5, 5]
), "Padding was not applied correctly"
def test_reshape_fields():
# Define a class to mock the params
class MockParams:
pass
# Create a mock params object
params = MockParams()
params.in_channels = [0, 1]
params.out_channels = [0, 1]
params.min_path = "min_mock.npy"
params.max_path = "max_mock.npy"
params.global_means_path = "global_means_mock.npy"
params.global_stds_path = "global_stds_mock.npy"
params.normalization = None
params.add_grid = False
params.gridtype = None
params.n_grid_channels = None
params.roll = False
# Create mock npy files
np.save(params.min_path, np.zeros((1, 2)))
np.save(params.max_path, np.ones((1, 2)))
np.save(params.global_means_path, np.zeros((1, 2)))
np.save(params.global_stds_path, np.ones((1, 2)))
# Create a numpy array for the test
img = np.ones((2, 2, 3, 3)) # shape (n_history+1, c, h, w)
# Call the function under test
reshaped_img = reshape_fields(
img, "inp", None, None, 0, 0, params, 0, False, normalize=False
)
# Check if the output shape is as expected
assert reshaped_img.shape == torch.Size(
[4, 3, 3]
), "reshape_fields did not return the expected shape"
# Remove mock npy files
os.remove(params.min_path)
os.remove(params.max_path)
os.remove(params.global_means_path)
os.remove(params.global_stds_path)
|
modulus-main
|
test/utils/sfno/test_img_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from torch.optim import lr_scheduler as lrs
from modulus.utils.sfno.warmup_scheduler import WarmupScheduler
def test_warmup_scheduler():
"""test warmup scheduler"""
param = nn.Parameter(torch.zeros((10), dtype=torch.float))
opt = torch.optim.Adam([param], lr=0.5)
start_lr = 0.01
num_warmup = 10
num_steps = 20
main_scheduler = lrs.CosineAnnealingLR(opt, num_steps, eta_min=0)
scheduler = WarmupScheduler(main_scheduler, num_warmup, start_lr)
for epoch in range(num_steps + num_warmup):
scheduler.step()
sd = scheduler.state_dict()
scheduler.load_state_dict(sd)
assert torch.allclose(torch.tensor(scheduler.get_last_lr()[0]), torch.tensor(0.0))
|
modulus-main
|
test/utils/sfno/test_warmup_scheduler.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from modulus.utils.sfno.YParams import ParamsBase, YParams
def test_ParamsBase():
p = ParamsBase()
p["foo"] = "bar"
assert p["foo"] == "bar"
assert p.foo == "bar"
assert p.get("foo") == "bar"
assert p.get("not_existing_key", "default_value") == "default_value"
assert "foo" in p
assert p.to_dict() == {"foo": "bar"}
def test_ParamsBase_from_json(tmp_path):
d = {"foo": "bar", "baz": 123}
p = tmp_path / "params.json"
p.write_text(json.dumps(d))
params = ParamsBase.from_json(p)
assert params["foo"] == "bar"
assert params["baz"] == 123
def test_YParams(tmp_path):
yaml_content = """
config1:
foo: bar
baz: 123
"""
p = tmp_path / "params.yaml"
p.write_text(yaml_content)
params = YParams(p, "config1")
assert params["foo"] == "bar"
assert params["baz"] == 123
assert params._yaml_filename == p
assert params._config_name == "config1"
|
modulus-main
|
test/utils/sfno/test_yparams.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
from modulus.utils.graphcast.graph_utils import latlon2xyz, xyz2latlon
@pytest.mark.parametrize("latlon", [[-27.0, 48.0], [0, 0], [62.0, -45.0]])
def test_coordinate_transform(latlon):
"""Test coordinate transformation from latlon to xyz and back."""
latlon = torch.tensor([latlon], dtype=torch.float)
xyz = latlon2xyz(latlon)
latlon_recovered = xyz2latlon(xyz)
assert torch.allclose(
latlon, latlon_recovered
), f"coordinate transformation failed, {latlon} != {latlon_recovered}"
|
modulus-main
|
test/utils/graphcast/test_coordinate_transform.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from modulus.utils.graphcast.loss import (
CustomCellAreaWeightedLossFunction,
CellAreaWeightedLossFunction,
)
def test_loss():
"""Tests if the custom loss function is equivalent to the default loss function."""
pred1 = torch.rand(1, 2, 721, 1440, device="cuda")
target1 = torch.rand(1, 2, 721, 1440, device="cuda")
area = torch.rand(721, 1440, device="cuda")
default_loss = CellAreaWeightedLossFunction(area)
custom_loss = CustomCellAreaWeightedLossFunction(area)
pred2 = pred1.clone().detach()
target2 = target1.clone().detach()
pred1.requires_grad_()
pred2.requires_grad_()
loss1 = default_loss(pred1, target1)
loss1.backward()
grad1 = pred1.grad
loss2 = custom_loss(pred2, target2)
loss2.backward()
grad2 = pred2.grad
atol = 1.0e-7
loss_diff = torch.abs(loss1 - loss2)
loss_diff_msg = (
f"loss diff - min/max/mean: {loss_diff.min()} / "
f"{loss_diff.max()} / {loss_diff.mean()}"
)
grad_diff = torch.abs(grad1 - grad2)
grad_diff_msg = (
f"grad diff - min/max/mean: {grad_diff.min()} / "
f"{grad_diff.max()} / {grad_diff.mean()}"
)
assert torch.allclose(loss1, loss2, atol=atol), loss_diff_msg + " for loss"
assert torch.allclose(grad1, grad2, atol=atol), grad_diff_msg + " for gradient"
|
modulus-main
|
test/utils/graphcast/test_loss.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import pytest
import random
import modulus.models.layers as layers
class SpectralConv4d(nn.Module):
"""Spectral 4D layer from https://github.com/gegewen/nested-fno/blob/main/FNO4D.py"""
def __init__(self, in_channels, out_channels, modes1, modes2, modes3, modes4):
super(SpectralConv4d, self).__init__()
"""
4D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.modes2 = modes2
self.modes3 = modes3
self.modes4 = modes4
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
dtype=torch.cfloat,
)
)
self.weights2 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
dtype=torch.cfloat,
)
)
self.weights3 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
dtype=torch.cfloat,
)
)
self.weights4 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
dtype=torch.cfloat,
)
)
self.weights5 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
dtype=torch.cfloat,
)
)
self.weights6 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
dtype=torch.cfloat,
)
)
self.weights7 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
dtype=torch.cfloat,
)
)
self.weights8 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
dtype=torch.cfloat,
)
)
# Complex multiplication
def compl_mul4d(self, input, weights):
# (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t)
return torch.einsum("bixyzt,ioxyzt->boxyzt", input, weights)
def forward(self, x):
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfftn(x, dim=[-4, -3, -2, -1])
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-4),
x.size(-3),
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[
:, :, : self.modes1, : self.modes2, : self.modes3, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, : self.modes1, : self.modes2, : self.modes3, : self.modes4],
self.weights1,
)
out_ft[
:, :, -self.modes1 :, : self.modes2, : self.modes3, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3, : self.modes4],
self.weights2,
)
out_ft[
:, :, : self.modes1, -self.modes2 :, : self.modes3, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3, : self.modes4],
self.weights3,
)
out_ft[
:, :, : self.modes1, : self.modes2, -self.modes3 :, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, : self.modes1, : self.modes2, -self.modes3 :, : self.modes4],
self.weights4,
)
out_ft[
:, :, -self.modes1 :, -self.modes2 :, : self.modes3, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3, : self.modes4],
self.weights5,
)
out_ft[
:, :, -self.modes1 :, : self.modes2, -self.modes3 :, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, -self.modes1 :, : self.modes2, -self.modes3 :, : self.modes4],
self.weights6,
)
out_ft[
:, :, : self.modes1, -self.modes2 :, -self.modes3 :, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, : self.modes1, -self.modes2 :, -self.modes3 :, : self.modes4],
self.weights7,
)
out_ft[
:, :, -self.modes1 :, -self.modes2 :, -self.modes3 :, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, -self.modes1 :, -self.modes2 :, -self.modes3 :, : self.modes4],
self.weights8,
)
# Return to physical space
x = torch.fft.irfftn(out_ft, s=(x.size(-4), x.size(-3), x.size(-2), x.size(-1)))
return x
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [1, 2, 3])
def test_conv_nd(device, dimension):
"""compare output of ConvNdKernel1Layer with that of layer for specfic n_dim"""
bsize = 8
in_channels = 4
out_channels = 2
tens_size = 16
conv_nd = layers.ConvNdKernel1Layer(in_channels, out_channels).to(device)
ini_w, ini_b = random.uniform(0, 1), random.uniform(0, 1)
if dimension == 1:
invar = torch.randn(bsize, in_channels, tens_size).to(device)
comp_nn = nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=True).to(
device
)
elif dimension == 2:
invar = torch.randn(bsize, in_channels, tens_size, tens_size).to(device)
comp_nn = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=True).to(
device
)
elif dimension == 3:
invar = torch.randn(bsize, in_channels, tens_size, tens_size, tens_size).to(
device
)
comp_nn = nn.Conv3d(in_channels, out_channels, kernel_size=1, bias=True).to(
device
)
nn.init.constant_(conv_nd.conv.bias, ini_b)
nn.init.constant_(conv_nd.conv.weight, ini_w)
nn.init.constant_(comp_nn.bias, ini_b)
nn.init.constant_(comp_nn.weight, ini_w)
with torch.no_grad():
assert torch.allclose(
conv_nd(invar), comp_nn(invar), rtol=1e-06, atol=1e-06
), f"ConvNdKernel1Layer output not identical to that of layer specific for {dim}d fields :("
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [1, 2, 3])
def test_conv_ndfc(device, dimension):
"""compare output of ConvNdFCLayer with that of layer for specfic n_dim"""
bsize = 8
in_channels = 4
out_channels = 2
tens_size = 16
conv_nd = layers.ConvNdFCLayer(in_channels, out_channels).to(device)
if dimension == 1:
invar = torch.randn(bsize, in_channels, tens_size).to(device)
comp_nn = layers.Conv1dFCLayer(in_channels, out_channels).to(device)
elif dimension == 2:
invar = torch.randn(bsize, in_channels, tens_size, tens_size).to(device)
comp_nn = layers.Conv2dFCLayer(in_channels, out_channels).to(device)
elif dimension == 3:
invar = torch.randn(bsize, in_channels, tens_size, tens_size, tens_size).to(
device
)
comp_nn = layers.Conv3dFCLayer(in_channels, out_channels).to(device)
# initialise weights, biases
torch.manual_seed(0)
conv_nd.reset_parameters()
torch.manual_seed(0)
comp_nn.reset_parameters()
with torch.no_grad():
assert torch.allclose(
conv_nd(invar), comp_nn(invar), rtol=1e-06, atol=1e-06
), f"ConvNdFCLayer output not identical to that of layer specific for {dim}d fields :("
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_spec_conv_4d(device):
"""compare output of SpectralConv4d with that of layer used in literature."""
bsize = 8
in_channels = 8
out_channels = 4
tens_size = 16
fno_modes = 6
torch.manual_seed(0)
spec_conv_orig = SpectralConv4d(
in_channels, out_channels, fno_modes, fno_modes, fno_modes, fno_modes
).to(device)
torch.manual_seed(0)
spec_conv_modulus = layers.SpectralConv4d(
in_channels, out_channels, fno_modes, fno_modes, fno_modes, fno_modes
).to(device)
invar = torch.randn(
bsize, in_channels, tens_size, tens_size, tens_size, tens_size
).to(device)
with torch.no_grad():
assert torch.allclose(
spec_conv_orig(invar), spec_conv_modulus(invar), rtol=1e-06, atol=1e-06
), f"SpectralConv4d output not identical to that of refrence layer"
|
modulus-main
|
test/models/test_nd_conv_layers.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import random
from modulus.models.srrn import SRResNet
from . import common
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_super_res_net_forward(device):
"""Test super_res_net forward pass"""
torch.manual_seed(0)
# Construct super_res_net model
model_3d = SRResNet(
in_channels=1,
out_channels=1,
).to(device)
bsize = 8
invar = torch.randn(bsize, 1, 4, 4, 4).to(device)
assert common.validate_forward_accuracy(model_3d, (invar,), atol=1e-3)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_super_res_net_constructor(device):
"""Test super_res_net constructor options"""
# Define dictionary of constructor args
in_channels = [random.randint(1, 3), random.randint(1, 3)]
arg_list = [
{
"in_channels": in_channels[0],
"out_channels": in_channels[0],
"large_kernel_size": 7,
"small_kernel_size": 3,
"conv_layer_size": 4,
"n_resid_blocks": 3,
"scaling_factor": 2,
},
{
"in_channels": in_channels[1],
"out_channels": in_channels[1],
"large_kernel_size": 7,
"small_kernel_size": 3,
"conv_layer_size": 3,
"n_resid_blocks": 4,
"scaling_factor": 2,
},
]
for i, kw_args in enumerate(arg_list):
# Construct FC model
model = SRResNet(**kw_args).to(device)
bsize = random.randint(1, 16)
invar = torch.randn(bsize, in_channels[i], 8, 8, 8).to(device)
outvar = model(invar)
assert outvar.shape == (bsize, kw_args["out_channels"], 16, 16, 16)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_super_res_net_optims(device):
"""Test super_res_net optimizations"""
def setup_model():
"""Set up fresh model and inputs for each optim test"""
# Construct super_res_net model
model = SRResNet(in_channels=2, out_channels=2, scaling_factor=2).to(device)
bsize = 4
invar = torch.randn(bsize, 2, 8, 8, 8).to(device)
return model, invar
# Ideally always check graphs first
model, invar = setup_model()
assert common.validate_cuda_graphs(model, (invar,))
# Check JIT
model, invar = setup_model()
assert common.validate_jit(model, (invar,))
# Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (invar,))
# Check Combo
model, invar = setup_model()
assert common.validate_combo_optims(model, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_super_res_net_checkpoint(device):
"""Test super_res_net checkpoint save/load"""
# Construct super_res_net model
model_1 = SRResNet(
in_channels=2, out_channels=2, n_resid_blocks=3, scaling_factor=2
).to(device)
model_2 = SRResNet(
in_channels=2, out_channels=2, n_resid_blocks=3, scaling_factor=2
).to(device)
bsize = random.randint(1, 4)
invar = torch.randn(bsize, 2, 8, 8, 8).to(device)
assert common.validate_checkpoint(model_1, model_2, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_super_res_net_deploy(device):
"""Test super_res_net deployment support"""
# Construct super_res_net model
model = SRResNet(
in_channels=1, out_channels=1, n_resid_blocks=4, scaling_factor=2
).to(device)
bsize = random.randint(1, 8)
invar = torch.randn(bsize, 1, 8, 8, 8).to(device)
assert common.validate_onnx_export(model, (invar,))
assert common.validate_onnx_runtime(model, (invar,))
|
modulus-main
|
test/models/test_super_res_net.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import random
from modulus.models.mlp import FullyConnected
from . import common
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_fully_connected_forward(device):
"""Test fully-connected forward pass"""
torch.manual_seed(0)
# Construct FC model
model = FullyConnected(
in_features=32,
out_features=8,
num_layers=1,
layer_size=8,
).to(device)
bsize = 8
invar = torch.randn(bsize, 32).to(device)
assert common.validate_forward_accuracy(model, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_fully_connected_constructor(device):
"""Test fully-connected constructor options"""
# Define dictionary of constructor args
arg_list = [
{
"in_features": random.randint(1, 16),
"out_features": random.randint(1, 16),
"layer_size": 16,
"num_layers": 2,
"skip_connections": False,
"adaptive_activations": False,
"weight_norm": False,
},
{
"in_features": random.randint(1, 16),
"out_features": random.randint(1, 16),
"layer_size": 16,
"num_layers": 4,
"activation_fn": ["relu", "silu"],
"skip_connections": True,
"adaptive_activations": True,
"weight_norm": True,
},
]
for kw_args in arg_list:
# Construct FC model
model = FullyConnected(**kw_args).to(device)
bsize = random.randint(1, 16)
invar = torch.randn(bsize, kw_args["in_features"]).to(device)
outvar = model(invar)
assert outvar.shape == (bsize, kw_args["out_features"])
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_fully_connected_optims(device):
"""Test fully-connected optimizations"""
def setup_model():
"""Set up fresh model and inputs for each optim test"""
# Construct FC model
model = FullyConnected(
in_features=32,
out_features=8,
num_layers=1,
layer_size=8,
).to(device)
bsize = random.randint(1, 16)
invar = torch.randn(bsize, 32).to(device)
return model, invar
# Ideally always check graphs first
model, invar = setup_model()
assert common.validate_cuda_graphs(model, (invar,))
# Check JIT
model, invar = setup_model()
assert common.validate_jit(model, (invar,))
# Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (invar,))
# Check Combo
model, invar = setup_model()
assert common.validate_combo_optims(model, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_fully_connected_checkpoint(device):
"""Test fully-connected checkpoint save/load"""
# Construct FC model
model_1 = FullyConnected(
in_features=4,
out_features=4,
num_layers=2,
layer_size=8,
).to(device)
model_2 = FullyConnected(
in_features=4,
out_features=4,
num_layers=2,
layer_size=8,
).to(device)
bsize = random.randint(1, 16)
invar = torch.randn(bsize, 4).to(device)
assert common.validate_checkpoint(model_1, model_2, (invar,))
@common.check_ort_version()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_fully_connected_deploy(device):
"""Test fully-connected deployment support"""
# Construct AFNO model
model = FullyConnected(
in_features=4,
out_features=4,
num_layers=2,
layer_size=8,
).to(device)
bsize = random.randint(1, 4)
invar = torch.randn(bsize, 4).to(device)
assert common.validate_onnx_export(model, (invar,))
assert common.validate_onnx_runtime(model, (invar,))
|
modulus-main
|
test/models/test_fully_connected.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import random
import numpy as np
from modulus.models.dlwp import DLWP
from . import common
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_dlwp_forward(device):
"""Test DLWP forward pass"""
torch.manual_seed(0)
# Construct model
model = DLWP(
nr_input_channels=2,
nr_output_channels=2,
nr_initial_channels=64,
activation_fn="leaky_relu",
depth=2,
clamp_activation=(None, 10.0),
).to(device)
bsize = 4
invar = torch.randn(bsize, 2, 6, 64, 64).to(device)
assert common.validate_forward_accuracy(
model, (invar,), file_name=f"dlwp_output.pth", atol=1e-3
)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("nr_input_channels", [2, 4])
@pytest.mark.parametrize("nr_output_channels", [2, 4])
@pytest.mark.parametrize("nr_initial_channels", [32, 64])
@pytest.mark.parametrize("depth", [2, 3, 4])
def test_dlwp_constructor(
device, nr_input_channels, nr_output_channels, nr_initial_channels, depth
):
"""Test DLWP constructor options"""
# Construct model
model = DLWP(
nr_input_channels=nr_input_channels,
nr_output_channels=nr_output_channels,
nr_initial_channels=nr_initial_channels,
depth=depth,
).to(device)
bsize = random.randint(1, 4)
invar = torch.randn(bsize, nr_input_channels, 6, 128, 128).to(device)
outvar = model(invar)
assert outvar.shape == (bsize, nr_output_channels, *invar.shape[2:])
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_dlwp_optims(device):
"""Test DLWP optimizations"""
def setup_model():
"""Setups up fresh DLWP model and inputs for each optim test"""
model = DLWP(
nr_input_channels=2,
nr_output_channels=2,
).to(device)
bsize = random.randint(1, 5)
invar = torch.randn(bsize, 2, 6, 16, 16).to(device)
return model, invar
# Ideally always check graphs first
model, invar = setup_model()
assert common.validate_cuda_graphs(model, (invar,))
# Check JIT
model, invar = setup_model()
assert common.validate_jit(model, (invar,))
# Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (invar,))
# Check Combo
model, invar = setup_model()
assert common.validate_combo_optims(model, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_dlwp_checkpoint(device):
"""Test DLWP checkpoint save/load"""
# Construct DLWP models
model_1 = DLWP(
nr_input_channels=2,
nr_output_channels=2,
).to(device)
model_2 = DLWP(
nr_input_channels=2,
nr_output_channels=2,
).to(device)
bsize = random.randint(1, 2)
invar = torch.randn(bsize, 2, 6, 64, 64).to(device)
assert common.validate_checkpoint(model_1, model_2, (invar,))
common.check_ort_version()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_dlwp_deploy(device):
"""Test DLWP deployment support"""
# Construct DLWP model
model = DLWP(
nr_input_channels=2,
nr_output_channels=2,
).to(device)
bsize = random.randint(1, 2)
invar = torch.randn(bsize, 2, 6, 64, 64).to(device)
assert common.validate_onnx_export(model, (invar,))
assert common.validate_onnx_runtime(model, (invar,))
def test_dlwp_implementation():
"""Test DLWP implementation compared to publication"""
model = DLWP(16, 12, 64, depth=2)
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
assert params == 2676376
|
modulus-main
|
test/models/test_dlwp.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fsspec
from modulus.utils.sfno.YParams import ParamsBase
from modulus.models.fcn_mip_plugin import sfno, graphcast_34ch, _CosZenWrapper, dlwp
from modulus.utils.filesystem import Package
from modulus.models.sfno.sfnonet import SphericalFourierNeuralOperatorNet
from modulus.models.dlwp import DLWP
from modulus.models.graphcast.graph_cast_net import GraphCastNet
from pathlib import Path
import numpy as np
import datetime
import torch
import json
import shutil
import os
import pytest
def _copy_directory(src, dst):
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
_copy_directory(s, d)
else:
shutil.copy2(s, d)
def save_ddp_checkpoint(model, check_point_path, del_device_buffer=False):
"""Save checkpoint with similar structure to the training checkpoints
The keys are prefixed with "module."
"""
model_state = {f"module.{k}": v for k, v in model.state_dict().items()}
if del_device_buffer:
# This buffer is not present in some trained model checkpoints
del model_state["module.device_buffer"]
checkpoint = {"model_state": model_state}
torch.save(checkpoint, check_point_path)
def save_checkpoint(model, check_point_path, del_device_buffer=False):
"""Save checkpoint with similar structure to the training checkpoints"""
model_state = model.state_dict()
if del_device_buffer:
# This buffer is not present in some trained model checkpoints
del model_state["module.device_buffer"]
torch.save(model_state, check_point_path)
def save_untrained_sfno(path):
config = {
"N_in_channels": 2,
"N_out_channels": 1,
"img_shape_x": 4,
"img_shape_y": 5,
"scale_factor": 1,
"num_layers": 2,
"num_blocks": 2,
"embed_dim": 2,
"nettype": "sfno",
"add_zenith": True,
}
params = ParamsBase()
params.update_params(config)
model = SphericalFourierNeuralOperatorNet(params)
config_path = path / "config.json"
with config_path.open("w") as f:
json.dump(params.to_dict(), f)
check_point_path = path / "weights.tar"
save_ddp_checkpoint(model, check_point_path, del_device_buffer=True)
url = f"file://{path.as_posix()}"
package = Package(url, seperator="/")
return package
def test_sfno(tmp_path):
package = save_untrained_sfno(tmp_path)
model = sfno(package, pretrained=True)
x = torch.ones(1, 1, model.model.h, model.model.w)
time = datetime.datetime(2018, 1, 1)
with torch.no_grad():
out = model(x, time=time)
assert out.shape == x.shape
def save_untrained_dlwp(path):
config = {
"nr_input_channels": 18,
"nr_output_channels": 14,
}
model = DLWP(
nr_input_channels=config["nr_input_channels"],
nr_output_channels=config["nr_output_channels"],
)
config_path = path / "config.json"
with config_path.open("w") as f:
json.dump(config, f)
check_point_path = path / "weights.pt"
save_checkpoint(model, check_point_path, del_device_buffer=False)
url = f"file://{path.as_posix()}"
package = Package(url, seperator="/")
return package
@pytest.mark.parametrize("batch_size", [1, 4])
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_dlwp(tmp_path, batch_size, device):
package = save_untrained_dlwp(tmp_path)
source_dir = "/data/nfs/modulus-data/plugin_data/dlwp/"
_copy_directory(source_dir, tmp_path)
model = dlwp(package, pretrained=True).to(device)
x = torch.ones(batch_size, 2, 7, 721, 1440).to(device)
time = datetime.datetime(2018, 1, 1)
with torch.no_grad():
out = model(x, time)
assert out.shape == x.shape
def save_untrained_graphcast(path):
icosphere_path = path / "icospheres.json"
config = {
"meshgraph_path": icosphere_path.as_posix(),
"static_dataset_path": None,
"input_dim_grid_nodes": 2,
"input_dim_mesh_nodes": 3,
"input_dim_edges": 4,
"output_dim_grid_nodes": 2,
"processor_layers": 3,
"hidden_dim": 2,
"do_concat_trick": True,
}
model = GraphCastNet(**config)
config_path = path / "config.json"
with config_path.open("w") as f:
json.dump(config, f)
check_point_path = path / "weights.tar"
save_ddp_checkpoint(model, check_point_path, del_device_buffer=False)
url = f"file://{path.as_posix()}"
package = Package(url, seperator="/")
return package
def test_graphcast(tmp_path):
source_dir = "/data/nfs/modulus-data/plugin_data/graphcast/"
_copy_directory(source_dir, tmp_path)
package = save_untrained_graphcast(
tmp_path
) # here package needs to load after icosphere.json is copied.
model = graphcast_34ch(package, pretrained=False)
x = torch.randn(1, 34, 721, 1440).to("cuda")
with torch.no_grad():
out = model(x)
assert out.shape == x.shape
@pytest.mark.parametrize("batch_size", [1, 2])
def test__CozZenWrapper(batch_size):
class I(torch.nn.Module):
def forward(self, x):
return x
model = I()
nx, ny = (3, 4)
lat = np.arange(nx)
lon = np.arange(ny)
x = torch.ones((batch_size, 1, nx, ny))
time = datetime.datetime(2018, 1, 1)
wrapper = _CosZenWrapper(model, lon, lat)
wrapper(x, time=time)
|
modulus-main
|
test/models/test_fcn_mip_plugin.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
test/models/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import random
from modulus.models.afno import AFNO
from . import common
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_afno_forward(device):
"""Test AFNO forward pass"""
torch.manual_seed(0)
model = AFNO(
img_size=(32, 32),
in_channels=2,
out_channels=1,
patch_size=(8, 8),
embed_dim=16,
depth=2,
num_blocks=2,
).to(device)
bsize = 2
invar = torch.randn(bsize, 2, 32, 32).to(device)
# Check output size
assert common.validate_forward_accuracy(model, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_afno_constructor(device):
"""Test AFNO constructor options"""
# Define dictionary of constructor args
arg_list = [
{
"img_size": (32, 32),
"in_channels": random.randint(1, 4),
"out_channels": random.randint(1, 4),
"patch_size": (8, 8),
"embed_dim": 4,
"depth": 2,
"num_blocks": 2,
},
{
"img_size": (8, 16),
"in_channels": random.randint(1, 4),
"out_channels": random.randint(1, 4),
"patch_size": (4, 4),
"embed_dim": 6,
"depth": 4,
"mlp_ratio": 2.0,
"drop_rate": 0.1,
"num_blocks": 1,
"sparsity_threshold": 0.05,
"hard_thresholding_fraction": 0.9,
},
]
for kw_args in arg_list:
# Construct FC model
model = AFNO(**kw_args).to(device)
bsize = random.randint(1, 16)
invar = torch.randn(
bsize,
kw_args["in_channels"],
kw_args["img_size"][0],
kw_args["img_size"][1],
).to(device)
outvar = model(invar)
assert outvar.shape == (
bsize,
kw_args["out_channels"],
kw_args["img_size"][0],
kw_args["img_size"][1],
)
# Also test failure case
try:
model = AFNO(
img_size=(32, 32),
in_channels=2,
out_channels=1,
patch_size=(8, 8),
embed_dim=7,
depth=1,
num_blocks=4,
).to(device)
raise AssertionError("Failed to error for invalid embed and block number")
except AssertionError:
pass
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_afno_optims(device):
"""Test AFNO optimizations"""
def setup_model():
"""Setups up fresh AFNO model and inputs for each optim test"""
model = AFNO(
img_size=(32, 32),
in_channels=2,
out_channels=2,
patch_size=(8, 8),
embed_dim=16,
depth=2,
num_blocks=2,
).to(device)
bsize = random.randint(1, 5)
invar = torch.randn(bsize, 2, 32, 32).to(device)
return model, invar
# Ideally always check graphs first
model, invar = setup_model()
assert common.validate_cuda_graphs(model, (invar,))
# Check JIT
model, invar = setup_model()
assert common.validate_jit(model, (invar,))
# Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (invar,))
# Check Combo
model, invar = setup_model()
assert common.validate_combo_optims(model, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_afno_checkpoint(device):
"""Test AFNO checkpoint save/load"""
# Construct AFNO models
model_1 = AFNO(
img_size=(32, 32),
in_channels=2,
out_channels=2,
patch_size=(8, 8),
embed_dim=8,
depth=2,
num_blocks=2,
).to(device)
model_2 = AFNO(
img_size=(32, 32),
in_channels=2,
out_channels=2,
patch_size=(8, 8),
embed_dim=8,
depth=2,
num_blocks=2,
).to(device)
bsize = random.randint(1, 5)
invar = torch.randn(bsize, 2, 32, 32).to(device)
assert common.validate_checkpoint(model_1, model_2, (invar,))
@common.check_ort_version()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_afno_deploy(device):
"""Test AFNO deployment support"""
# Construct AFNO model
model = AFNO(
img_size=(16, 16),
in_channels=2,
out_channels=2,
patch_size=(8, 8),
embed_dim=4,
depth=1, # Small depth for onnx export speed
num_blocks=2,
).to(device)
bsize = random.randint(1, 5)
invar = torch.randn(bsize, 2, 16, 16).to(device)
assert common.validate_onnx_export(model, (invar,))
assert common.validate_onnx_runtime(model, (invar,))
|
modulus-main
|
test/models/test_afno.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import random
from dataclasses import dataclass
from modulus.models.module import Module, ModelMetaData
from modulus.registry import ModelRegistry
from . import common
registry = ModelRegistry()
class CustomModel(torch.nn.Module):
"""Custom User Model"""
def __init__(self, in_features, out_features):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features)
def forward(self, x):
return self.linear(x)
@dataclass
class CustomMetaData(ModelMetaData):
"""Custom User Metadata for Model"""
name: str = "FullyConnected"
# Optimization
jit: bool = True
cuda_graphs: bool = True
amp: bool = True
torch_fx: bool = True
# Inference
onnx: bool = True
onnx_runtime: bool = True
# Physics informed
func_torch: bool = True
auto_grad: bool = True
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_from_torch_forward(device):
"""Test forward pass from PyTorch"""
torch.manual_seed(0)
# Construct CustomModulusModel
CustomModulusModel = Module.from_torch(CustomModel, CustomMetaData())
model = CustomModulusModel(in_features=32, out_features=8).to(device)
bsize = 8
invar = torch.randn(bsize, 32).to(device)
model(invar)
registry.__clear_registry__()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_from_torch_constructor(device):
"""Test constructor from PyTorch"""
CustomModulusModel = Module.from_torch(CustomModel, CustomMetaData())
model = CustomModulusModel(in_features=8, out_features=4).to(device)
assert isinstance(model, Module)
registry.__clear_registry__()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_from_torch_optims(device):
"""Test optimizations from PyTorch"""
def setup_model():
"""Set up fresh model and inputs for each optim test"""
# Construct CustomModulusModel
CustomModulusModel = Module.from_torch(CustomModel, CustomMetaData())
model = CustomModulusModel(in_features=32, out_features=8).to(device)
bsize = random.randint(1, 16)
invar = torch.randn(bsize, 32).to(device)
return model, invar
# Ideally always check graphs first
model, invar = setup_model()
assert common.validate_cuda_graphs(model, (invar,))
registry.__clear_registry__()
# Check JIT
model, invar = setup_model()
assert common.validate_jit(model, (invar,))
registry.__clear_registry__()
# Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (invar,))
registry.__clear_registry__()
# Check Combo
model, invar = setup_model()
assert common.validate_combo_optims(model, (invar,))
registry.__clear_registry__()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_from_torch_checkpoint(device):
"""Test checkpoint save/load from PyTorch"""
# Construct CustomModulusModel
CustomModulusModel = Module.from_torch(CustomModel, CustomMetaData())
model_1 = CustomModulusModel(in_features=4, out_features=4).to(device)
model_2 = CustomModulusModel(in_features=4, out_features=4).to(device)
bsize = random.randint(1, 16)
invar = torch.randn(bsize, 4).to(device)
assert common.validate_checkpoint(model_1, model_2, (invar,))
registry.__clear_registry__()
@common.check_ort_version()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_from_torch_deploy(device):
"""Test deployment support from PyTorch"""
# Construct CustomModulusModel
CustomModulusModel = Module.from_torch(CustomModel, CustomMetaData())
model = CustomModulusModel(in_features=4, out_features=4).to(device)
bsize = random.randint(1, 4)
invar = torch.randn(bsize, 4).to(device)
assert common.validate_onnx_export(model, (invar,))
assert common.validate_onnx_runtime(model, (invar,))
registry.__clear_registry__()
|
modulus-main
|
test/models/test_from_torch.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import random
from modulus.models.layers.activations import Identity, Stan, SquarePlus
from modulus.models.layers.fused_silu import (
FusedSiLU,
FusedSiLU_deriv_1,
FusedSiLU_deriv_2,
FusedSiLU_deriv_3,
)
from . import common
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_activation_identity(device):
"""Test identity function in layers"""
func = Identity().to(device)
# Random tensor of random size
tensor_dim = random.randint(1, 5)
tensor_size = torch.randint(low=1, high=8, size=(tensor_dim,)).tolist()
invar = torch.randn(*tensor_size, device=device)
outvar = func(invar)
assert common.compare_output(invar, outvar)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_activation_stan(device):
"""Test Stan function in layers"""
func = Stan(out_features=2).to(device)
# Doc string example handles accuracy
bsize = random.randint(1, 8)
invar = torch.randn(bsize, 2).to(device)
outvar = func(invar)
# Learnable param should be 1.0 init
tarvar = (invar + 1) * torch.tanh(invar)
assert common.compare_output(tarvar, outvar)
# Also test failure case
try:
func = Stan(out_features=random.randint(1, 3)).to(device)
invar = torch.randn(2, 4).to(device)
outvar = func(invar)
assert False, "Failed to error for invalid input feature dimension"
except ValueError:
pass
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_activation_squareplus(device):
"""Test square plus function in layers"""
func = SquarePlus().to(device)
func.b = 0
# Ones tensor of random size
tensor_dim = random.randint(1, 3)
tensor_size = torch.randint(low=1, high=4, size=(tensor_dim,)).tolist()
invar = torch.ones(*tensor_size, device=device)
outvar = func(invar)
assert common.compare_output(torch.ones_like(invar), outvar)
@pytest.mark.parametrize("device", ["cuda:0"])
def test_activation_fused_silu(device):
"""Test fused SiLU implementation"""
input = torch.randn(20, 20, dtype=torch.double, requires_grad=True, device=device)
assert torch.autograd.gradcheck(
FusedSiLU.apply, input, eps=1e-6, atol=1e-4
), "Failed FusedSiLU autograd check"
assert torch.autograd.gradcheck(
FusedSiLU_deriv_1.apply, input, eps=1e-6, atol=1e-4
), "Failes FusedSiLU_deriv_1 autograd check"
assert torch.autograd.gradcheck(
FusedSiLU_deriv_2.apply, input, eps=1e-6, atol=1e-4
), "Failes FusedSiLU_deriv_2 autograd check"
assert torch.autograd.gradcheck(
FusedSiLU_deriv_3.apply, input, eps=1e-6, atol=1e-4
), "Failes FusedSiLU_deriv_3 autograd check"
|
modulus-main
|
test/models/test_layers_activations.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import random
from modulus.models.mlp import FullyConnected
from modulus.models.fno import FNO
from . import common
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [1, 2, 3, 4])
def test_fno_forward(device, dimension):
"""Test FNO forward pass"""
torch.manual_seed(0)
# Construct FNO model
model = FNO(
in_channels=2,
out_channels=2,
decoder_layers=1,
decoder_layer_size=8,
dimension=dimension,
latent_channels=32,
num_fno_layers=4,
num_fno_modes=4,
padding=0,
).to(device)
bsize = 4
if dimension == 1:
invar = torch.randn(bsize, 2, 16).to(device)
elif dimension == 2:
invar = torch.randn(bsize, 2, 16, 16).to(device)
elif dimension == 3:
invar = torch.randn(bsize, 2, 16, 16, 16).to(device)
else:
invar = torch.randn(bsize, 2, 16, 16, 16, 16).to(device)
assert common.validate_forward_accuracy(
model, (invar,), file_name=f"fno{dimension}d_output.pth", atol=1e-3
)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_fno_constructor(device):
"""Test FNO constructor options"""
out_features = random.randint(1, 8)
# Define dictionary of constructor args
arg_list = []
for dimension in [1, 2, 3, 4]:
arg_list.append(
{
"in_channels": random.randint(1, 4),
"out_channels": out_features,
"decoder_layers": 1,
"decoder_layer_size": 8,
"dimension": dimension,
"latent_channels": 32,
"num_fno_layers": 2,
"num_fno_modes": 4,
"padding": 4,
"coord_features": False,
}
)
for kw_args in arg_list:
# Construct FC model
model = FNO(**kw_args).to(device)
bsize = random.randint(1, 4)
if kw_args["dimension"] == 1:
invar = torch.randn(bsize, kw_args["in_channels"], 8).to(device)
elif kw_args["dimension"] == 2:
invar = torch.randn(bsize, kw_args["in_channels"], 8, 8).to(device)
elif kw_args["dimension"] == 3:
invar = torch.randn(bsize, kw_args["in_channels"], 8, 8, 8).to(device)
else:
invar = torch.randn(bsize, kw_args["in_channels"], 8, 8, 8, 8).to(device)
outvar = model(invar)
assert outvar.shape == (bsize, out_features, *invar.shape[2:])
# Also test failure case
try:
model = FNO(
in_channels=2,
out_channels=2,
decoder_layers=1,
decoder_layer_size=8,
dimension=5,
latent_channels=32,
num_fno_layers=4,
num_fno_modes=4,
padding=0,
).to(device)
raise AssertionError("Failed to error for invalid dimension")
except NotImplementedError:
pass
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [1, 2, 3, 4])
def test_fno_optims(device, dimension):
"""Test FNO optimizations"""
def setup_model():
"""Setups up fresh FNO model and inputs for each optim test"""
model = FNO(
in_channels=2,
out_channels=2,
decoder_layers=1,
decoder_layer_size=8,
dimension=dimension,
latent_channels=4,
num_fno_layers=4,
num_fno_modes=4,
padding=0,
).to(device)
bsize = random.randint(1, 5)
if dimension == 1:
invar = torch.randn(bsize, 2, 8).to(device)
elif dimension == 2:
invar = torch.randn(bsize, 2, 8, 8).to(device)
elif dimension == 3:
invar = torch.randn(bsize, 2, 8, 8, 8).to(device)
else:
invar = torch.randn(bsize, 2, 8, 8, 8, 8).to(device)
return model, invar
# Ideally always check graphs first
model, invar = setup_model()
assert common.validate_cuda_graphs(model, (invar,))
# Check JIT
model, invar = setup_model()
assert common.validate_jit(model, (invar,))
# Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (invar,))
# Check Combo
model, invar = setup_model()
assert common.validate_combo_optims(model, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [1, 2, 3, 4])
def test_fno_checkpoint(device, dimension):
"""Test FNO checkpoint save/load"""
# Construct FNO models
model_1 = FNO(
in_channels=2,
out_channels=2,
decoder_layers=2,
decoder_layer_size=8,
dimension=dimension,
latent_channels=4,
num_fno_layers=2,
num_fno_modes=2,
padding=0,
).to(device)
model_2 = FNO(
in_channels=2,
out_channels=2,
decoder_layers=2,
decoder_layer_size=8,
dimension=dimension,
latent_channels=4,
num_fno_layers=2,
num_fno_modes=2,
padding=0,
).to(device)
bsize = random.randint(1, 2)
if dimension == 1:
invar = torch.randn(bsize, 2, 8).to(device)
elif dimension == 2:
invar = torch.randn(bsize, 2, 8, 8).to(device)
elif dimension == 3:
invar = torch.randn(bsize, 2, 8, 8, 8).to(device)
else:
invar = torch.randn(bsize, 2, 8, 8, 8, 8).to(device)
assert common.validate_checkpoint(model_1, model_2, (invar,))
@common.check_ort_version()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [1, 2, 3, 4])
def test_fnodeploy(device, dimension):
"""Test FNO deployment support"""
# Construct AFNO model
model = FNO(
in_channels=2,
out_channels=2,
decoder_layers=2,
decoder_layer_size=8,
dimension=dimension,
latent_channels=4,
num_fno_layers=2,
num_fno_modes=2,
padding=0,
).to(device)
bsize = random.randint(1, 2)
bsize = random.randint(1, 2)
if dimension == 1:
invar = torch.randn(bsize, 2, 4).to(device)
elif dimension == 2:
invar = torch.randn(bsize, 2, 4, 4).to(device)
elif dimension == 3:
invar = torch.randn(bsize, 2, 4, 4, 4).to(device)
else:
invar = torch.randn(bsize, 2, 4, 4, 4, 4).to(device)
assert common.validate_onnx_export(model, (invar,))
assert common.validate_onnx_runtime(model, (invar,))
|
modulus-main
|
test/models/test_fno.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import random
from modulus.models.rnn.rnn_one2many import One2ManyRNN
from modulus.models.rnn.rnn_seq2seq import Seq2SeqRNN
from . import common
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [2, 3])
def test_conv_rnn_one2many_forward(device, dimension):
"""Test model forward pass"""
torch.manual_seed(0)
# Construct model
model = One2ManyRNN(
input_channels=1,
dimension=dimension,
nr_latent_channels=8,
activation_fn="relu",
nr_downsamples=2,
nr_tsteps=4,
).to(device)
bsize = 2
if dimension == 2:
invar = torch.randn(bsize, 1, 1, 8, 8).to(device)
elif dimension == 3:
invar = torch.randn(bsize, 1, 1, 8, 8, 8).to(device)
else:
print("Dimension not supported")
assert common.validate_forward_accuracy(
model,
(invar,),
file_name=f"conv_rnn_one2many_{dimension}d_output.pth",
atol=1e-4,
)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [2, 3])
def test_conv_rnn_one2many_checkpoint(device, dimension):
"""Test model checkpoint save/load"""
# Construct the RNN models
model_1 = One2ManyRNN(
input_channels=1,
dimension=dimension,
nr_latent_channels=4,
activation_fn="relu",
nr_downsamples=2,
nr_tsteps=8,
).to(device)
model_2 = One2ManyRNN(
input_channels=1,
dimension=dimension,
nr_latent_channels=4,
activation_fn="relu",
nr_downsamples=2,
nr_tsteps=8,
).to(device)
bsize = random.randint(1, 2)
if dimension == 2:
invar = torch.randn(bsize, 1, 1, 8, 8).to(device)
elif dimension == 3:
invar = torch.randn(bsize, 1, 1, 8, 8, 8).to(device)
else:
print("Dimension not supported")
assert common.validate_checkpoint(model_1, model_2, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [2, 3])
def test_conv_rnn_one2many_optimizations(device, dimension):
"""Test model optimizations"""
def setup_model():
"Sets up fresh model for each optimization test"
model = One2ManyRNN(
input_channels=1,
dimension=dimension,
nr_latent_channels=8,
activation_fn="relu",
nr_downsamples=2,
nr_tsteps=2,
).to(device)
bsize = random.randint(1, 2)
if dimension == 2:
invar = torch.randn(bsize, 1, 1, 8, 8).to(device)
elif dimension == 3:
invar = torch.randn(bsize, 1, 1, 8, 8, 8).to(device)
else:
print("Dimension not supported")
return model, invar
# Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_conv_rnn_one2many_constructor(device):
"""Test model constructor"""
# Define dictionary of constructor args
arg_list = []
for dimension in [2, 3]:
arg_list.append(
{
"input_channels": 1,
"dimension": dimension,
"nr_latent_channels": random.randint(4, 8),
"activation_fn": "relu",
"nr_downsamples": random.randint(2, 3),
"nr_tsteps": random.randint(8, 16),
}
)
for kw_args in arg_list:
# Construct model
model = One2ManyRNN(**kw_args).to(device)
bsize = random.randint(1, 4)
if kw_args["dimension"] == 2:
invar = torch.randn(bsize, kw_args["input_channels"], 1, 8, 8).to(device)
else:
invar = torch.randn(bsize, kw_args["input_channels"], 1, 8, 8, 8).to(device)
outvar = model(invar)
assert outvar.shape == (
bsize,
kw_args["input_channels"],
kw_args["nr_tsteps"],
*invar.shape[3:],
)
# Also test failure case
try:
model = One2ManyRNN(
input_channels=1,
dimension=4,
nr_latent_channels=32,
activation_fn="relu",
nr_downsamples=2,
nr_tsteps=2,
).to(device)
raise AssertionError("Failed to error for invalid dimension")
except ValueError:
pass
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [2, 3])
def test_conv_rnn_seq2seq_forward(device, dimension):
"""Test model forward pass"""
torch.manual_seed(0)
# Construct model
model = Seq2SeqRNN(
input_channels=1,
dimension=dimension,
nr_latent_channels=4,
activation_fn="relu",
nr_downsamples=2,
nr_tsteps=8,
).to(device)
bsize = 2
if dimension == 2:
invar = torch.randn(bsize, 1, 8, 8, 8).to(device)
elif dimension == 3:
invar = torch.randn(bsize, 1, 8, 8, 8, 8).to(device)
else:
print("Dimension not supported")
assert common.validate_forward_accuracy(
model,
(invar,),
file_name=f"conv_rnn_seq2seq_{dimension}d_output.pth",
atol=1e-4,
)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [2, 3])
def test_conv_rnn_seq2seq_checkpoint(device, dimension):
"""Test model checkpoint save/load"""
# Construct the RNN models
model_1 = Seq2SeqRNN(
input_channels=1,
dimension=dimension,
nr_latent_channels=8,
activation_fn="relu",
nr_downsamples=2,
nr_tsteps=8,
).to(device)
model_2 = Seq2SeqRNN(
input_channels=1,
dimension=dimension,
nr_latent_channels=8,
activation_fn="relu",
nr_downsamples=2,
nr_tsteps=8,
).to(device)
bsize = random.randint(1, 2)
if dimension == 2:
invar = torch.randn(bsize, 1, 8, 8, 8).to(device)
elif dimension == 3:
invar = torch.randn(bsize, 1, 8, 8, 8, 8).to(device)
else:
print("Dimension not supported")
assert common.validate_checkpoint(model_1, model_2, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("dimension", [2, 3])
def test_conv_rnn_seq2seq_optimizations(device, dimension):
"""Test model optimizations"""
def setup_model():
"Sets up fresh model for each optimization test"
model = Seq2SeqRNN(
input_channels=1,
dimension=dimension,
nr_latent_channels=4,
activation_fn="relu",
nr_downsamples=2,
nr_tsteps=2,
).to(device)
bsize = random.randint(1, 2)
if dimension == 2:
invar = torch.randn(bsize, 1, 2, 8, 8).to(device)
elif dimension == 3:
invar = torch.randn(bsize, 1, 2, 8, 8, 8).to(device)
else:
print("Dimension not supported")
return model, invar
# Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_conv_rnn_seq2seq_constructor(device):
"""Test model constructor"""
# Define dictionary of constructor args
arg_list = []
for dimension in [2, 3]:
arg_list.append(
{
"input_channels": 1,
"dimension": dimension,
"nr_latent_channels": random.randint(4, 8),
"activation_fn": "relu",
"nr_downsamples": random.randint(2, 3),
"nr_tsteps": random.randint(2, 4),
}
)
for kw_args in arg_list:
# Construct model
model = One2ManyRNN(**kw_args).to(device)
bsize = random.randint(1, 4)
if kw_args["dimension"] == 2:
invar = torch.randn(
bsize, kw_args["input_channels"], kw_args["nr_tsteps"], 16, 16
).to(device)
else:
invar = torch.randn(
bsize, kw_args["input_channels"], kw_args["nr_tsteps"], 16, 16, 16
).to(device)
outvar = model(invar)
assert outvar.shape == (
bsize,
kw_args["input_channels"],
kw_args["nr_tsteps"],
*invar.shape[3:],
)
# Also test failure case
try:
model = Seq2SeqRNN(
input_channels=1,
dimension=4,
nr_latent_channels=4,
activation_fn="relu",
nr_downsamples=2,
nr_tsteps=2,
).to(device)
raise AssertionError("Failed to error for invalid dimension")
except ValueError:
pass
|
modulus-main
|
test/models/test_rnn.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
from modulus.models.rnn.layers import (
_ConvLayer,
_TransposeConvLayer,
_ConvResidualBlock,
)
@pytest.mark.parametrize("activation_fn", [torch.nn.ReLU(), torch.nn.Identity()])
@pytest.mark.parametrize("stride", [1, 3])
@pytest.mark.parametrize("dimension", [1, 2, 3])
def test_conv_layer(activation_fn, stride, dimension):
"""Test conv layer"""
in_channels = 16
out_channels = 16
kernel_size = 3
layer = _ConvLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
dimension=dimension,
activation_fn=activation_fn,
)
bsize = 2
fig_size = 18
input_size = (bsize, in_channels) + dimension * (fig_size,)
invar = torch.randn(size=input_size)
outvar = layer(invar)
if stride == 1:
size_out = fig_size
else:
size_out = (fig_size - 1 * (kernel_size - 1) - 1) / stride + 1
assert outvar.shape == (bsize, out_channels) + dimension * (size_out,)
@pytest.mark.parametrize("activation_fn", [torch.nn.ReLU(), torch.nn.Identity()])
@pytest.mark.parametrize("stride", [1, 3])
@pytest.mark.parametrize("dimension", [1, 2, 3])
def test_transconv_layer(activation_fn, stride, dimension):
"""Test transpose conv layer"""
in_channels = 16
out_channels = 16
kernel_size = 3
layer = _TransposeConvLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
dimension=dimension,
activation_fn=activation_fn,
)
bsize = 2
fig_size = 18
input_size = (bsize, in_channels) + dimension * (fig_size,)
invar = torch.randn(size=input_size)
outvar = layer(invar)
if stride == 1:
size_out = fig_size
else:
size_out = (fig_size - 1) * stride + 1 * (kernel_size - 1) + 1
assert outvar.shape == (bsize, out_channels) + dimension * (size_out,)
@pytest.mark.parametrize("activation_fn", [torch.nn.ReLU(), torch.nn.Identity()])
@pytest.mark.parametrize("begin_activation_fn", [True, False])
@pytest.mark.parametrize("gated", [True, False])
@pytest.mark.parametrize("layer_normalization", [True, False])
@pytest.mark.parametrize("dimension", [1, 2, 3])
def test_residual_block_layer(
activation_fn,
begin_activation_fn,
gated,
layer_normalization,
dimension,
):
"""Test residual block"""
stride = 1
in_channels = 16
out_channels = 16
# Just test constructor
layer = _ConvResidualBlock(
in_channels=in_channels,
out_channels=out_channels,
dimension=dimension,
gated=gated,
layer_normalization=layer_normalization,
activation_fn=activation_fn,
begin_activation_fn=begin_activation_fn,
stride=stride,
)
bsize = 2
fig_size = 18
input_size = (bsize, in_channels) + dimension * (fig_size,)
invar = torch.randn(size=input_size)
outvar = layer(invar)
size_out = fig_size
assert outvar.shape == (bsize, out_channels) + dimension * (size_out,)
|
modulus-main
|
test/models/test_rnn_layers.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import random
from modulus.models.pix2pix import Pix2Pix
from . import common
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_pix2pix_forward(device):
"""Test pix2pix forward pass"""
torch.manual_seed(0)
# Construct pix2pix model
model_3d = Pix2Pix(
in_channels=1,
out_channels=1,
dimension=3,
conv_layer_size=8,
n_downsampling=3,
n_upsampling=3,
n_blocks=3,
).to(device)
bsize = 8
invar = torch.randn(bsize, 1, 16, 16, 16).to(device)
assert common.validate_forward_accuracy(model_3d, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_pix2pix_constructor(device):
"""Test pix2pix constructor options"""
# Define dictionary of constructor args
arg_list = []
for dimension in [1, 2, 3]:
for padding_type in ["reflect", "replicate", "zero"]:
arg_list += [
{
"in_channels": random.randint(1, 3),
"out_channels": random.randint(1, 3),
"dimension": dimension,
"conv_layer_size": 16,
"n_downsampling": 3,
"n_upsampling": 3,
"n_blocks": 2,
"padding_type": padding_type,
},
{
"in_channels": random.randint(1, 3),
"out_channels": random.randint(1, 3),
"dimension": dimension,
"conv_layer_size": 8,
"n_downsampling": 2,
"n_upsampling": 2,
"n_blocks": 3,
"batch_norm": True,
"padding_type": padding_type,
},
]
for i, kw_args in enumerate(arg_list):
# Construct FC model
model = Pix2Pix(**kw_args).to(device)
bsize = random.randint(1, 16)
if kw_args["dimension"] == 1:
invar = torch.randn(bsize, kw_args["in_channels"], 16).to(device)
elif kw_args["dimension"] == 2:
invar = torch.randn(bsize, kw_args["in_channels"], 16, 16).to(device)
else:
invar = torch.randn(bsize, kw_args["in_channels"], 16, 16, 16).to(device)
outvar = model(invar)
assert outvar.shape == (bsize, kw_args["out_channels"], *invar.shape[2:])
# Also test failure case
try:
model = Pix2Pix(
in_channels=1,
out_channels=1,
dimension=4,
conv_layer_size=8,
n_downsampling=3,
n_upsampling=3,
n_blocks=3,
).to(device)
raise AssertionError("Failed to error for invalid dimension")
except NotImplementedError:
pass
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_pix2pix_optims(device):
"""Test pix2pix optimizations"""
def setup_model():
"""Set up fresh model and inputs for each optim test"""
# Construct pix2pix model
model = Pix2Pix(
in_channels=2,
out_channels=2,
dimension=1,
conv_layer_size=2,
n_downsampling=2,
n_upsampling=2,
n_blocks=3,
).to(device)
bsize = 4
invar = torch.randn(bsize, 2, 16).to(device)
return model, invar
# Ideally always check graphs first
model, invar = setup_model()
assert common.validate_cuda_graphs(model, (invar,))
# Check JIT
model, invar = setup_model()
assert common.validate_jit(model, (invar,))
# Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (invar,))
# Check Combo
model, invar = setup_model()
assert common.validate_combo_optims(model, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_pix2pix_checkpoint(device):
"""Test pix2pix checkpoint save/load"""
# Construct pix2pix model
model_1 = Pix2Pix(
in_channels=2,
out_channels=2,
dimension=2,
conv_layer_size=4,
n_downsampling=2,
n_upsampling=2,
n_blocks=2,
).to(device)
model_2 = Pix2Pix(
in_channels=2,
out_channels=2,
dimension=2,
conv_layer_size=4,
n_downsampling=2,
n_upsampling=2,
n_blocks=2,
).to(device)
bsize = random.randint(1, 4)
invar = torch.randn(bsize, 2, 16, 16).to(device)
assert common.validate_checkpoint(model_1, model_2, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_pix2pix_deploy(device):
"""Test pix2pix deployment support"""
# Construct pix2pix model
model = Pix2Pix(
in_channels=2,
out_channels=2,
dimension=3,
conv_layer_size=8,
n_downsampling=2,
n_upsampling=2,
n_blocks=2,
).to(device)
bsize = random.randint(1, 8)
invar = torch.randn(bsize, 2, 32, 32, 32).to(device)
assert common.validate_onnx_export(model, (invar,))
assert common.validate_onnx_runtime(model, (invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("upsample", [1, 2])
def test_pix2pix_upsample(device, upsample):
"""Test pix2pix upsampling functionality"""
# Construct pix2pix model
model = Pix2Pix(
in_channels=2,
out_channels=2,
dimension=2,
conv_layer_size=8,
n_downsampling=2,
n_upsampling=(2 + upsample),
n_blocks=2,
).to(device)
bsize = random.randint(1, 4)
invar = torch.randn(bsize, 2, 8, 8).to(device)
outvar = model(invar)
assert outvar.shape == (bsize, 2, 8 * 2 ** (upsample), 8 * 2 ** (upsample))
|
modulus-main
|
test/models/test_pix2pix.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from modulus.models import Module
from modulus.registry import ModelRegistry
class MockModel(Module):
def __init__(self, layer_size=16):
super().__init__()
self.layer_size = layer_size
self.layer = torch.nn.Linear(layer_size, layer_size)
def forward(self, x):
return self.layer(x)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_register_and_factory(device):
# Register the MockModel
registry = ModelRegistry()
registry.register(MockModel, "mock_model")
# Use factory to get the MockModel
RetrievedModel = registry.factory("mock_model")
# Check if the retrieved model is the same as the one registered
assert RetrievedModel == MockModel
# Check forward pass of RetrievedModel
layer_size = 16
invar = torch.randn(1, layer_size).to(device)
model = RetrievedModel(layer_size=layer_size).to(device)
outvar = model(invar)
assert outvar.shape == invar.shape
assert outvar.device == invar.device
print(registry.list_models())
registry.__clear_registry__()
|
modulus-main
|
test/models/test_model_factory.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import random
from modulus.models.layers import WeightNormLinear
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_weight_norm(device):
"""Test weight norm"""
in_features = random.randint(1, 8)
out_features = random.randint(1, 8)
# Construct FC model
wnorm = WeightNormLinear(
in_features=in_features,
out_features=out_features,
bias=True,
).to(device)
bsize = random.randint(1, 4)
invar = torch.randn(bsize, in_features).to(device)
outvar = wnorm(invar)
assert outvar.shape == (bsize, out_features)
assert (
wnorm.extra_repr()
== f"in_features={in_features}, out_features={out_features}, bias={True}"
)
wnorm = WeightNormLinear(
in_features=in_features,
out_features=out_features,
bias=False,
).to(device)
wnorm.reset_parameters()
assert wnorm.bias is None
|
modulus-main
|
test/models/test_layers_weightnorm.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import pkg_resources
@pytest.mark.parametrize(
"model_name",
[
"AFNO",
"DLWP",
"FNO",
"GraphCastNet",
"MeshGraphNet",
"FullyConnected",
"Pix2Pix",
"One2ManyRNN",
#'SphericalFourierNeuralOperatorNet',
"SRResNet",
],
)
def test_model_entry_points(model_name):
"""Test model entry points"""
# Get all the models exposed by the package
models = {
entry_point.name: entry_point
for entry_point in pkg_resources.iter_entry_points("modulus.models")
}
# Assert that the model is among them
assert model_name in models
# Try loading the model
try:
model = models[model_name].load()
except Exception as e:
pytest.fail(f"Failed to load {model_name}: {e}")
|
modulus-main
|
test/models/test_entrypoints.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from torch import nn
from modulus.models.sfno.activations import ComplexReLU, ComplexActivation
def test_ComplexReLU_cartesian():
relu = ComplexReLU(mode="cartesian")
z = torch.randn(2, 2, dtype=torch.cfloat)
output = relu(z)
assert torch.allclose(output.real, F.relu(z.real)) and torch.allclose(
output.imag, F.relu(z.imag)
)
def test_ComplexReLU_real():
relu = ComplexReLU(mode="real")
z = torch.randn(2, 2, dtype=torch.cfloat)
output = relu(z)
assert torch.allclose(output.real, F.relu(z.real)) and torch.allclose(
output.imag, z.imag
)
def test_ComplexActivation_cartesian():
activation = ComplexActivation(nn.ReLU(), mode="cartesian")
z = torch.randn(2, 2, dtype=torch.cfloat)
output = activation(z)
assert torch.allclose(output.real, F.relu(z.real)) and torch.allclose(
output.imag, F.relu(z.imag)
)
def test_ComplexActivation_modulus():
activation = ComplexActivation(nn.ReLU(), mode="modulus")
z = torch.randn(2, 2, dtype=torch.cfloat)
output = activation(z)
assert torch.allclose(output.abs(), F.relu(z.abs()))
|
modulus-main
|
test/models/sfno/test_activations.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import sys, os
import numpy as np
def fix_random_seeds():
"""Fix random seeds for reproducibility"""
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
|
modulus-main
|
test/models/sfno/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
script_path = os.path.abspath(__file__)
sys.path.append(os.path.join(os.path.dirname(script_path), ".."))
import common
import pytest
import torch
from utils import fix_random_seeds
from modulus.models.sfno.sfnonet import SphericalFourierNeuralOperatorNet
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize("checkpointing", [0, 2])
def test_sfno_forward(device, checkpointing):
"""Test sfno forward pass with & without checkpointing"""
in_chans = 2
h, w = 8, 16
params = {}
fix_random_seeds()
x = torch.randn(1, in_chans, h, w)
x = x.to(device)
# Construct sfno model
model = SphericalFourierNeuralOperatorNet(
params,
inp_shape=(h, w),
scale_factor=4,
in_chans=in_chans,
out_chans=in_chans,
embed_dim=16,
num_layers=2,
encoder_layers=1,
spectral_layers=2,
checkpointing=checkpointing,
).to(device)
assert common.validate_forward_accuracy(model, (x,), rtol=1e-3)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
@pytest.mark.parametrize(
"filter_type, operator_type, use_mlp, activation_function, pos_embed, \
normalization_layer, use_complex_kernels, factorization, separable, \
complex_network",
[
(
"non-linear",
"diagonal",
True,
"relu",
"direct",
"instance_norm",
True,
None,
False,
True,
),
(
"linear",
"diagonal",
False,
"gelu",
"frequency",
"instance_norm",
True,
"dense",
True,
False,
),
(
"non-linear",
"diagonal",
False,
"silu",
"none",
"none",
False,
"cp",
False,
True,
),
],
)
def test_sfno_constructor(
device,
filter_type,
operator_type,
use_mlp,
activation_function,
pos_embed,
normalization_layer,
use_complex_kernels,
factorization,
separable,
complex_network,
):
"""Test sfno constructor options"""
# Define dictionary of constructor args
in_chans = 2
h, w = 8, 16
batch_size = 2
arg_list = [
{
"params": {},
"inp_shape": (h, w),
"scale_factor": 4,
"in_chans": in_chans,
"out_chans": in_chans,
"embed_dim": 16,
"num_layers": 2,
"encoder_layers": 1,
"spectral_layers": 2,
"checkpointing": 0,
"filter_type": filter_type,
"operator_type": operator_type,
"use_mlp": use_mlp,
"activation_function": activation_function,
"pos_embed": pos_embed,
"normalization_layer": normalization_layer,
"use_complex_kernels": use_complex_kernels,
"factorization": factorization,
"separable": separable,
"complex_network": complex_network,
},
{
"params": {},
"inp_shape": (h, w),
"scale_factor": 4,
"in_chans": in_chans,
"out_chans": in_chans,
"embed_dim": 16,
"num_layers": 2,
"encoder_layers": 1,
"spectral_layers": 2,
"checkpointing": 0,
"filter_type": filter_type,
"operator_type": operator_type,
"use_mlp": use_mlp,
"activation_function": activation_function,
"pos_embed": pos_embed,
"normalization_layer": normalization_layer,
"use_complex_kernels": use_complex_kernels,
"factorization": factorization,
"separable": separable,
"complex_network": complex_network,
},
]
for kw_args in arg_list:
# Construct sfno model
model = SphericalFourierNeuralOperatorNet(**kw_args).to(device)
fix_random_seeds()
x = torch.randn(batch_size, in_chans, h, w)
x = x.to(device)
outvar = model(x)
assert outvar.shape == (batch_size, in_chans, h, w)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_sfno_optims(device):
"""Test sfno optimizations"""
def setup_model():
"""Set up fresh model and inputs for each optim test"""
in_chans = 2
h, w = 8, 16
fix_random_seeds()
x = torch.randn(1, in_chans, h, w)
x = x.to(device)
model_kwds = {
"params": {},
"inp_shape": (h, w),
"scale_factor": 4,
"in_chans": in_chans,
"out_chans": in_chans,
"embed_dim": 16,
"num_layers": 2,
"encoder_layers": 1,
"spectral_layers": 1,
"checkpointing": 0,
}
# Construct SFNO model
model = SphericalFourierNeuralOperatorNet(**model_kwds).to(device)
return model, (x,)
# # Ideally always check graphs first
model, invar = setup_model()
assert common.validate_cuda_graphs(model, (*invar,))
# # Check JIT
model, invar = setup_model()
assert common.validate_jit(model, (*invar,))
# # Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (*invar,))
# # Check Combo
model, invar = setup_model()
assert common.validate_combo_optims(model, (*invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_sfno_checkpoint(device):
"""Test sfno checkpoint save/load"""
in_chans = 4
h, w = 8, 16
fix_random_seeds()
x = torch.randn(1, in_chans, h, w)
x = x.to(device)
model_kwds = {
"params": {},
"inp_shape": (h, w),
"scale_factor": 3,
"in_chans": in_chans,
"out_chans": in_chans,
"embed_dim": 16,
"num_layers": 4,
"encoder_layers": 1,
"spectral_layers": 3,
"checkpointing": 0,
}
# Construct sfno model
model_1 = SphericalFourierNeuralOperatorNet(**model_kwds).to(device)
model_2 = SphericalFourierNeuralOperatorNet(**model_kwds).to(device)
x = torch.randn(1, in_chans, h, w)
x = x.to(device)
assert common.validate_checkpoint(
model_1,
model_2,
(x,),
)
@common.check_ort_version()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_sfno_deploy(device):
"""Test sfno deployment support"""
in_chans = 3
h, w = 8, 16
fix_random_seeds()
x = torch.randn(1, in_chans, h, w)
x = x.to(device)
model_kwds = {
"params": {},
"inp_shape": (h, w),
"scale_factor": 3,
"in_chans": in_chans,
"out_chans": in_chans,
"embed_dim": 16,
"num_layers": 4,
"encoder_layers": 1,
"spectral_layers": 3,
"checkpointing": 0,
}
# Construct SFNO model
model = SphericalFourierNeuralOperatorNet(**model_kwds).to(device)
assert common.validate_onnx_export(model, x)
assert common.validate_onnx_runtime(model, x)
|
modulus-main
|
test/models/sfno/test_sfno.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import modulus
import torch
import sys
from pathlib import Path
from typing import Tuple, Union
from contextlib import nullcontext
from .utils import dummy_loss_fn, compare_output
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
def save_output(output: Union[Tensor, Tuple[Tensor, ...]], file_name: Path):
"""Saves output of model to file
Parameters
----------
output : Union[Tensor, Tuple[Tensor, ...]]
Output from netwrok model
file_name : Path
File path
Raises
------
IOError
If file path has a parent directory that does not exist
ValueError
If model outputs are larger than 10mb
"""
if not file_name.parent.is_dir():
raise IOError(
f"Folder path, {file_name.parent}, for output accuracy data not found"
)
# Check size of outputs
output_size = 0
for out_tensor in output:
out_tensor = out_tensor.detach().contiguous().cpu()
output_size += out_tensor.element_size() * out_tensor.nelement()
if output_size > 10**7:
raise ValueError(
"Outputs are greater than 10mb which is too large for this test"
)
output_dict = {i: data.detach().contiguous().cpu() for i, data in enumerate(output)}
torch.save(output_dict, file_name)
@torch.no_grad()
def validate_forward_accuracy(
model: modulus.Module,
in_args: Tuple[Tensor] = (),
rtol: float = 1e-3,
atol: float = 1e-3,
file_name: Union[str, None] = None,
) -> bool:
"""Validates the accuracy of a model's forward pass with a reference output
The provided model should likely be initialized using a set seed, otherwise this will
likely fail. If the reference output tensor file cannot be found a new one will be
create but this test will error. Run the test again to pass it, be sure to commit the
output tensor file you have contributed a new model.
Parameters
----------
model : modulus.Module
Modulus module
in_args : Tuple[Tensor], optional
Input arguments, by default ()
rtol : float, optional
Relative tolerance of error allowed, by default 1e-3
atol : float, optional
Absolute tolerance of error allowed, by default 1e-3
file_name : Union[str, None], optional
Override the default file name of the stored target output, by default None
Returns
-------
bool
Test passed
Raises
------
IOError
Target output tensor file for this model was not found
"""
# Perform a foward pass of the model
output = model.forward(*in_args)
# Always use tuples for this comparison / saving
if isinstance(output, Tensor):
output = (output,)
# File name / path
# Output files should live in test/models/data
if file_name is None:
file_name = model.meta.name + "_output.pth"
file_name = (
Path(__file__).parents[1].resolve() / Path("data") / Path(file_name.lower())
)
# If file does not exist, we will create it then error
# Model should then reproduce it on next pytest run
if not file_name.exists():
save_output(output, file_name)
raise IOError(
f"Output check file {str(file_name)} wasn't found so one was created. Please re-run the test."
)
# Load tensor dictionary and check
else:
tensor_dict = torch.load(str(file_name))
output_target = tuple(
[value.to(model.device) for value in tensor_dict.values()]
)
return compare_output(output, output_target, rtol, atol)
|
modulus-main
|
test/models/common/fwdaccuracy.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import modulus
import torch
from typing import Tuple
from contextlib import nullcontext
from .utils import dummy_loss_fn, compare_output
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
@torch.no_grad()
def validate_jit(
model: modulus.Module,
in_args: Tuple[Tensor] = (),
rtol: float = 1e-5,
atol: float = 1e-5,
) -> bool:
"""Check network's JIT compatibility
This test checks if JIT works on the provided neural network.
JIT compilation is checked as well as making sure the original
and JIT model produce the same output.
Parameters
----------
model : modulus.Module
Modulus module
in_args : Tuple[Tensor], optional
Input arguments, by default ()
rtol : float, optional
Relative tolerance of error allowed, by default 1e-5
atol : float, optional
Absolute tolerance of error allowed, by default 1e-5
Returns
-------
bool
Test passed
Note
----
JIT must be turned on in the model's meta data for this test to run.
"""
if not model.meta.jit:
logger.warning("Model not marked as supporting JIT, skipping")
return True
output = model.forward(*in_args)
jit_model = torch.jit.script(model)
output_jit = jit_model(*in_args)
return compare_output(output, output_jit, rtol, atol)
def validate_cuda_graphs(
model: modulus.Module,
in_args: Tuple[Tensor] = (),
rtol: float = 1e-5,
atol: float = 1e-5,
warmup_length: int = 3,
) -> bool:
"""Check network's CUDA graphs compatibility
This test checks if CUDA graphs works on the provided neural network.
CUDA graph callable compiling is checked as well as making sure the original
and CUDA graph model produce the same output.
Parameters
----------
model : modulus.Module
Modulus module
in_args : Tuple[Tensor], optional
Input arguments, keywords not supported, by default ()
rtol : float, optional
Relative tolerance of error allowed, by default 1e-5
atol : float, optional
Absolute tolerance of error allowed, by default 1e-5
warmup_length: int, optional
Number of warm-up iterations when making graph callable
Returns
-------
bool
Test passed
Note
----
CUDA graphs graphs must be turned on in the model's meta data for this test to run.
Note
----
PyTorch's graph for the model and inputs must be completely clear! Meaning if you have
and inputs / outputs / parameters that are not detached this will cause an error.
"""
if not model.meta.cuda_graphs:
logger.warning("Model not marked as supporting CUDA graphs, skipping")
return True
if str(model.device) == "cpu":
logger.warning("Model on CPU, skipping cuda graph test.")
return True
# Regular forward pass
with torch.no_grad():
output = model.forward(*in_args)
# Create callable
graph_module = torch.cuda.make_graphed_callables(
model, sample_args=in_args, num_warmup_iters=warmup_length
)
output_graph = graph_module(*in_args)
return compare_output(output, output_graph, rtol, atol)
def validate_amp(
model: modulus.Module,
in_args: Tuple[Tensor] = (),
iterations: int = 3,
) -> bool:
"""Check network's AMP compatibility
This test checks if AMP works on the provided neural network.
Parameters
----------
model : modulus.Module
Modulus module
in_args : Tuple[Tensor], optional
Input arguments, keywords not supported, by default ()
iterations: int, optional
Number of iterations to test AMP with
Returns
-------
bool
Test passed
Note
----
AMP must be turned on in the model's meta data for this test to run.
"""
if not model.meta.amp_cpu and str(model.device) == "cpu":
logger.warning("Model not marked as supporting AMP CPU, skipping")
return True
elif not model.meta.amp_gpu:
logger.warning("Model not marked as supporting AMP GPU, skipping")
return True
# Only bfloat 16 supported for CPU, also no scalar backward
if str(model.device) == "cpu":
amp_device = "cpu"
amp_dtype = torch.bfloat16
scaler = torch.cuda.amp.GradScaler(enabled=False)
else:
amp_device = "cuda"
amp_dtype = torch.float16
scaler = torch.cuda.amp.GradScaler(enabled=True)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
for i in range(iterations):
with torch.autocast(amp_device, enabled=True, dtype=amp_dtype):
optimizer.zero_grad()
output = model.forward(*in_args)
loss = dummy_loss_fn(output)
# Backward call (scalar if GPU)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
return True
def validate_torch_fx() -> bool:
"""TODO"""
return True
def validate_combo_optims(
model: modulus.Module,
in_args: Tuple[Tensor] = (),
iterations: int = 2,
warmup_length: int = 11,
) -> bool:
"""Tests all model supported optimizations together
This test will dynamically change what optimizations are used based on the model's
meta data. This test should be regarded as the final. The goal is to just run and
clear the method without errors.
Parameters
----------
model : modulus.Module
Modulus module
in_args : Tuple[Tensor], optional
Input arguments, keywords not supported, by default ()
iterations : int, optional
Number of training iterations, by default 2
warmup_length : int, optional
Number of earm-up iterations before CUDA graph recording, by default 11
Returns
-------
bool
Test passed
Note
----
JIT will likely be phased out with Torch FX which will take priority in future.
"""
# Override warm up length with iterations if no cuda graphs
warmup_length = warmup_length if model.meta.cuda_graphs else iterations
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# Only bfloat 16 supported for CPU, also no scalar backward
if str(model.device) == "cpu":
amp_enabled = model.meta.amp_cpu
cuda_graphs_enabled = False
amp_device = "cpu"
amp_dtype = torch.bfloat16
scaler = torch.cuda.amp.GradScaler(enabled=False) # Always false on CPU
else:
amp_enabled = model.meta.amp_gpu
cuda_graphs_enabled = model.meta.cuda_graphs
amp_device = "cuda"
amp_dtype = torch.float16
scaler = torch.cuda.amp.GradScaler(enabled=amp_enabled)
# Torch script, need to save it as seperate model since TS model doesnt have meta
if model.meta.jit:
fwd_model = torch.jit.script(model)
else:
fwd_model = model
def foward(in_args):
"""Mini-forward function to capture in cuda graph if needed"""
# Test AMP
# This is a conditional context statement: https://stackoverflow.com/a/34798330
with torch.autocast(
amp_device, enabled=True, dtype=amp_dtype
) if model.meta.amp else nullcontext():
optimizer.zero_grad()
output = fwd_model(*in_args)
loss = dummy_loss_fn(output)
scaler.scale(loss).backward()
# Warmup stream (if cuda graphs)
with torch.cuda.stream(
torch.cuda.Stream()
) if cuda_graphs_enabled else nullcontext():
for i in range(warmup_length):
foward(in_args)
scaler.step(optimizer)
scaler.update()
# Test Cuda graphs
if cuda_graphs_enabled:
# Record cuda graphs
g = torch.cuda.CUDAGraph()
optimizer.zero_grad(set_to_none=True)
with torch.cuda.graph(g):
foward(in_args)
# Optimizer step outside for AMP support
scaler.step(optimizer)
scaler.update()
# Replay graph
for i in range(iterations):
g.replay()
scaler.step(optimizer)
scaler.update()
return True
|
modulus-main
|
test/models/common/optimization.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .optimization import (
validate_jit,
validate_amp,
validate_cuda_graphs,
validate_combo_optims,
)
from .checkpoints import validate_checkpoint
from .fwdaccuracy import validate_forward_accuracy
from .inference import validate_onnx_export, validate_onnx_runtime, check_ort_version
from .utils import compare_output
|
modulus-main
|
test/models/common/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import modulus
import torch
import shutil
from typing import Tuple
from pathlib import Path
from .utils import compare_output
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
@torch.no_grad()
def validate_checkpoint(
model_1: modulus.Module,
model_2: modulus.Module,
in_args: Tuple[Tensor] = (),
rtol: float = 1e-5,
atol: float = 1e-5,
) -> bool:
"""Check network's checkpoint safely saves and loads the state of the model
This test will check if a model's state is fully saved in its checkpoint. Two
seperately initialized models should be provided. One model will load the other's
checkpoint and produce the same output.
Parameters
----------
model_1 : modulus.Module
Modulus model to save checkpoint from
model_2 : modulus.Module
Modulus model to load checkpoint to
in_args : Tuple[Tensor], optional
Input arguments, by default ()
rtol : float, optional
Relative tolerance of error allowed, by default 1e-5
atol : float, optional
Absolute tolerance of error allowed, by default 1e-5
Returns
-------
bool
Test passed
"""
# First check fail safes of save/load functions
try:
model_1.save("folder_does_not_exist/checkpoint.mdlus")
except IOError:
pass
try:
model_1.load("does_not_exist.mdlus")
except IOError:
pass
# Now test forward passes
output_1 = model_1.forward(*in_args)
output_2 = model_2.forward(*in_args)
# Model outputs should initially be different
assert not compare_output(
output_1, output_2, rtol, atol
), "Model outputs should initially be different"
# Save checkpoint from model 1 and load it into model 2
model_1.save("checkpoint.mdlus")
model_2.load("checkpoint.mdlus")
# Forward with loaded checkpoint
output_2 = model_2.forward(*in_args)
loaded_checkpoint = compare_output(output_1, output_2, rtol, atol)
# Restore checkpoint with from_checkpoint, checks initialization of model directly from checkpoint
model_2 = modulus.Module.from_checkpoint("checkpoint.mdlus").to(model_1.device)
output_2 = model_2.forward(*in_args)
restored_checkpoint = compare_output(output_1, output_2, rtol, atol)
# Delete checkpoint file (it should exist!)
Path("checkpoint.mdlus").unlink(missing_ok=False)
return loaded_checkpoint and restored_checkpoint
|
modulus-main
|
test/models/common/checkpoints.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import torch
from typing import Tuple, Union
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
def dummy_loss_fn(data: Union[Tensor, Tuple[Tensor, ...]]):
"""Trivial summation loss for testing"""
# Output of tensor
if isinstance(data, torch.Tensor):
loss = data.sum()
# Output of tuple of tensors
elif isinstance(data, tuple):
# Loop through tuple of outputs
loss = 0
for data_tensor in data:
# If tensor use allclose
if isinstance(data_tensor, Tensor):
loss = data_tensor.sum()
else:
logger.error(
"Model returned invalid type for unit test, should be Tensor or Tuple[Tensor]"
)
loss = None
return loss
def compare_output(
output_1: Union[Tensor, Tuple[Tensor, ...]],
output_2: Union[Tensor, Tuple[Tensor, ...]],
rtol: float = 1e-5,
atol: float = 1e-5,
) -> bool:
"""Compares model outputs and returns if they are the same
Parameters
----------
output_1 : Union[Tensor, Tuple[Tensor, ...]]
Output one
output_2 : Union[Tensor, Tuple[Tensor, ...]]
Output two
rtol : float, optional
Relative tolerance of error allowed, by default 1e-5
atol : float, optional
Absolute tolerance of error allowed, by default 1e-5
Returns
-------
bool
If outputs are the same
"""
# Output of tensor
if isinstance(output_1, Tensor):
return torch.allclose(output_1, output_2, rtol, atol)
# Output of tuple of tensors
elif isinstance(output_1, tuple):
# Loop through tuple of outputs
for i, (out_1, out_2) in enumerate(zip(output_1, output_2)):
# If tensor use allclose
if isinstance(out_1, Tensor):
if not torch.allclose(out_1, out_2, rtol, atol):
logger.warning(f"Failed comparison between outputs {i}")
logger.warning(
f"Max Difference: {torch.amax(torch.abs(out_1 - out_2))}"
)
logger.warning(f"Difference: {out_1 - out_2}")
return False
# Otherwise assume primative
else:
if not out_1 == out_2:
return False
# Unsupported output type
else:
logger.error(
"Model returned invalid type for unit test, should be Tensor or Tuple[Tensor]"
)
return False
return True
|
modulus-main
|
test/models/common/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import modulus
import torch
import onnx
import pytest
try:
import onnxruntime as ort
except:
ort = None
from typing import Tuple
from pathlib import Path
from modulus.deploy.onnx import export_to_onnx_stream, run_onnx_inference
from .utils import compare_output
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
def check_ort_version():
if ort is None:
return pytest.mark.skipif(
True,
reason="Proper ONNX runtime is not installed. 'pip install onnxruntime onnxruntime_gpu'",
)
elif ort.__version__ != "1.15.1":
return pytest.mark.skipif(
True,
reason="Must install custom ORT 1.15.1. Other versions do not work \
due to bug in IRFFT: https://github.com/microsoft/onnxruntime/issues/13236",
)
else:
return pytest.mark.skipif(False, reason="")
@torch.no_grad()
def validate_onnx_export(
model: modulus.Module,
in_args: Tuple[Tensor] = (),
) -> bool:
"""Check network's ONNX export works
This just save a ONNX export, loads it back into Python and makes sure its valid.
Parameters
----------
model_1 : modulus.Module
Modulus model to save checkpoint from
in_args : Tuple[Tensor], optional
Input arguments, by default ()
Returns
-------
bool
Test passed
Note
----
ONNX must be turned on in the model's meta data for this test to run.
"""
if not model.meta.onnx_cpu and str(model.device) == "cpu":
logger.warning("Model not marked as supporting ONNX CPU, skipping")
return True
elif not model.meta.onnx_gpu:
logger.warning("Model not marked as supporting ONNX GPU, skipping")
return True
onnx_name = "model.onnx"
device = model.device
# Turn on eval mode for model and move it to CPU for export
model = model.eval().cpu()
onnx_in_args = tuple([arg.cpu() for arg in in_args])
torch.onnx.export(
model.cpu(),
onnx_in_args,
onnx_name,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
opset_version=15,
verbose=False,
)
# Load back into python from file
onnx_model = onnx.load(onnx_name)
model = model.to(device)
# Check that the model is well formed
try:
onnx.checker.check_model(onnx_model)
# Delete checkpoint file (it should exist!)
Path(onnx_name).unlink(missing_ok=False)
return True
except onnx.checker.ValidationError as e:
logger.error("Loaded ONNX model is not well formed: %s" % e)
# Delete checkpoint file (it should exist!)
Path(onnx_name).unlink(missing_ok=False)
return False
@torch.no_grad()
def validate_onnx_runtime(
model: modulus.Module,
in_args: Tuple[Tensor, ...] = (),
rtol: float = 1e-3,
atol: float = 1e-3,
) -> bool:
"""Check network's ONNX export is consistent with PyTorch forward pass using onnxruntime
This test will check to make sure that ONNX can export a model. It will then execute
a forward pass of the provide PyTorch model as well as ONNX version using a onnxruntime
session. It will then check to see if the outputs are the same
Parameters
----------
model_1 : modulus.Module
Modulus model to save checkpoint from
in_args : Tuple[Tensor], optional
Input arguments, by default ()
rtol : float, optional
Relative tolerance of error allowed, by default 1e-3
atol : float, optional
Absolute tolerance of error allowed, by default 1e-3
Returns
-------
bool
Test passed
Note
----
ONNX runtime must be turned on in the model's meta data for this test to run.
"""
if ort is None:
logger.warning("ONNX runtime not installed, skipping")
return True
if not model.meta.onnx_runtime:
logger.warning("Model not marked as supporting ONNX runtime, skipping")
return True
if not model.meta.onnx_cpu and str(model.device) == "cpu":
logger.warning("Model not marked as supporting ONNX CPU, skipping")
return True
elif not model.meta.onnx_gpu:
logger.warning("Model not marked as supporting ONNX GPU, skipping")
return True
# Now test regular forward pass
output = model.forward(*in_args)
if isinstance(output, Tensor):
output = (output,)
# Test ONNX forward
device = model.device
onnx_model = export_to_onnx_stream(model, in_args)
output_ort = run_onnx_inference(onnx_model, in_args, device=device)
output_ort = tuple(output.to(device) for output in output_ort)
# Model outputs should initially be different
return compare_output(output, output_ort, rtol, atol)
|
modulus-main
|
test/models/common/inference.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from utils import get_icosphere_path, fix_random_seeds
from modulus.models.graphcast.graph_cast_net import GraphCastNet
def test_cugraphops(num_channels=2, res_h=21, res_w=10):
"""Test cugraphops"""
icosphere_path = get_icosphere_path()
# Fix random seeds
fix_random_seeds()
# Random input
x = torch.randn(1, num_channels, res_h, res_w, device="cuda")
x_dgl = x.clone().detach()
for concat_trick in [False, True]:
for recomp_act in [False, True]:
# Fix random seeds
torch.manual_seed(0)
torch.cuda.manual_seed(0)
np.random.seed(0)
model = GraphCastNet(
meshgraph_path=icosphere_path,
static_dataset_path=None,
input_res=(res_h, res_w),
input_dim_grid_nodes=num_channels,
input_dim_mesh_nodes=3,
input_dim_edges=4,
output_dim_grid_nodes=num_channels,
processor_layers=3,
hidden_dim=4,
do_concat_trick=concat_trick,
use_cugraphops_decoder=True,
use_cugraphops_encoder=True,
use_cugraphops_processor=True,
recompute_activation=recomp_act,
).to("cuda")
# Fix random seeds again
fix_random_seeds()
model_dgl = GraphCastNet(
meshgraph_path=icosphere_path,
static_dataset_path=None,
input_res=(res_h, res_w),
input_dim_grid_nodes=num_channels,
input_dim_mesh_nodes=3,
input_dim_edges=4,
output_dim_grid_nodes=num_channels,
processor_layers=3,
hidden_dim=4,
do_concat_trick=concat_trick,
use_cugraphops_decoder=False,
use_cugraphops_encoder=False,
use_cugraphops_processor=False,
recompute_activation=False,
).to("cuda")
# Forward pass without checkpointing
x.requires_grad_()
y_pred = model(x)
loss = y_pred.sum()
loss.backward()
x_grad = x.grad
x_dgl.requires_grad_()
y_pred_dgl = model_dgl(x_dgl)
loss_dgl = y_pred_dgl.sum()
loss_dgl.backward()
x_grad_dgl = x_dgl.grad
# Check that the results are the same
assert torch.allclose(
y_pred_dgl, y_pred, atol=1.0e-6
), "testing DGL against cugraph-ops: outputs do not match!"
assert torch.allclose(
x_grad_dgl, x_grad, atol=1.0e-4, rtol=1.0e-3
), "testing DGL against cugraph-ops: gradients do not match!"
if __name__ == "__main__":
test_cugraphops()
|
modulus-main
|
test/models/graphcast/test_cugraphops.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
script_path = os.path.abspath(__file__)
sys.path.append(os.path.join(os.path.dirname(script_path), ".."))
import pytest
from utils import fix_random_seeds, create_random_input
import common
from utils import get_icosphere_path
from modulus.models.graphcast.graph_cast_net import GraphCastNet
icosphere_path = get_icosphere_path()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_graphcast_forward(device, num_channels=2, res_h=10, res_w=20):
"""Test graphcast forward pass"""
model_kwds = {
"meshgraph_path": icosphere_path,
"static_dataset_path": None,
"input_res": (res_h, res_w),
"input_dim_grid_nodes": num_channels,
"input_dim_mesh_nodes": 3,
"input_dim_edges": 4,
"output_dim_grid_nodes": num_channels,
"processor_layers": 3,
"hidden_dim": 4,
"do_concat_trick": True,
}
fix_random_seeds()
x = create_random_input(model_kwds["input_res"], model_kwds["input_dim_grid_nodes"])
x = x.to(device)
# Construct graphcast model
model = GraphCastNet(**model_kwds).to(device)
assert common.validate_forward_accuracy(model, (x,), rtol=1e-2)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_graphcast_constructor(
device, num_channels_1=2, num_channels_2=3, res_h=10, res_w=20
):
"""Test graphcast constructor options"""
# Define dictionary of constructor args
arg_list = [
{
"meshgraph_path": icosphere_path,
"static_dataset_path": None,
"input_res": (res_h, res_w),
"input_dim_grid_nodes": num_channels_1,
"input_dim_mesh_nodes": 3,
"input_dim_edges": 4,
"output_dim_grid_nodes": num_channels_1,
"processor_layers": 3,
"hidden_dim": 4,
"do_concat_trick": True,
},
{
"meshgraph_path": icosphere_path,
"static_dataset_path": None,
"input_res": (res_h, res_w),
"input_dim_grid_nodes": num_channels_2,
"input_dim_mesh_nodes": 3,
"input_dim_edges": 4,
"output_dim_grid_nodes": num_channels_2,
"processor_layers": 4,
"hidden_dim": 8,
"do_concat_trick": False,
},
]
for kw_args in arg_list:
# Construct GraphCast model
model = GraphCastNet(**kw_args).to(device)
x = create_random_input(
kw_args["input_res"], kw_args["input_dim_grid_nodes"]
).to(device)
outvar = model(x)
assert outvar.shape == (
1,
kw_args["output_dim_grid_nodes"],
*kw_args["input_res"],
)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_GraphCast_optims(device, num_channels=2, res_h=10, res_w=20):
"""Test GraphCast optimizations"""
def setup_model():
"""Set up fresh model and inputs for each optim test"""
model_kwds = {
"meshgraph_path": icosphere_path,
"static_dataset_path": None,
"input_res": (res_h, res_w),
"input_dim_grid_nodes": num_channels,
"input_dim_mesh_nodes": 3,
"input_dim_edges": 4,
"output_dim_grid_nodes": num_channels,
"processor_layers": 3,
"hidden_dim": 2,
"do_concat_trick": True,
}
fix_random_seeds()
x = create_random_input(
model_kwds["input_res"], model_kwds["input_dim_grid_nodes"]
)
x = x.to(device)
# Construct GraphCast model
model = GraphCastNet(**model_kwds).to(device)
return model, (x,)
# Ideally always check graphs first
model, invar = setup_model()
assert common.validate_cuda_graphs(model, (*invar,))
# Check JIT
model, invar = setup_model()
assert common.validate_jit(model, (*invar,))
# Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (*invar,))
# Check Combo
model, invar = setup_model()
assert common.validate_combo_optims(model, (*invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_graphcast_checkpoint(device, num_channels=2, res_h=10, res_w=20):
"""Test GraphCast checkpoint save/load"""
model_kwds = {
"meshgraph_path": icosphere_path,
"static_dataset_path": None,
"input_res": (res_h, res_w),
"input_dim_grid_nodes": num_channels,
"input_dim_mesh_nodes": 3,
"input_dim_edges": 4,
"output_dim_grid_nodes": num_channels,
"processor_layers": 3,
"hidden_dim": 2,
"do_concat_trick": True,
}
# Construct GraphCast model
model_1 = GraphCastNet(**model_kwds).to(device)
model_2 = GraphCastNet(**model_kwds).to(device)
x = create_random_input(model_kwds["input_res"], model_kwds["input_dim_grid_nodes"])
x = x.to(device)
assert common.validate_checkpoint(
model_1,
model_2,
(x,),
)
@common.check_ort_version()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_GraphCast_deploy(device, num_channels=2, res_h=10, res_w=20):
"""Test GraphCast deployment support"""
model_kwds = {
"meshgraph_path": icosphere_path,
"static_dataset_path": None,
"input_res": (res_h, res_w),
"input_dim_grid_nodes": num_channels,
"input_dim_mesh_nodes": 3,
"input_dim_edges": 4,
"output_dim_grid_nodes": num_channels,
"processor_layers": 3,
"hidden_dim": 2,
"do_concat_trick": True,
}
# Construct GraphCast model
model = GraphCastNet(**model_kwds).to(device)
x = create_random_input(model_kwds["input_res"], model_kwds["input_dim_grid_nodes"])
x = x.to(device)
assert common.validate_onnx_export(model, x)
assert common.validate_onnx_runtime(model, x)
|
modulus-main
|
test/models/graphcast/test_graphcast.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dgl
import torch
import sys, os
import numpy as np
def fix_random_seeds():
"""Fix random seeds for reproducibility"""
dgl.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
def create_random_input(input_res, dim):
"""Create random input for testing"""
return torch.randn(1, dim, *input_res)
def get_icosphere_path():
"""Get path to icosphere mesh"""
script_path = os.path.abspath(__file__)
icosphere_path = os.path.join(os.path.dirname(script_path), "icospheres.json")
return icosphere_path
|
modulus-main
|
test/models/graphcast/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
from utils import fix_random_seeds, create_random_input, get_icosphere_path
from modulus.models.graphcast.graph_cast_net import GraphCastNet
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_grad_checkpointing(device, num_channels=2, res_h=15, res_w=15):
"""Test gradient checkpointing"""
icosphere_path = get_icosphere_path()
# constants
model_kwds = {
"meshgraph_path": icosphere_path,
"static_dataset_path": None,
"input_res": (res_h, res_w),
"input_dim_grid_nodes": num_channels,
"input_dim_mesh_nodes": 3,
"input_dim_edges": 4,
"output_dim_grid_nodes": num_channels,
"processor_layers": 3,
"hidden_dim": 4,
"do_concat_trick": True,
}
num_steps = 2
# Fix random seeds
fix_random_seeds()
# Random input
x = create_random_input(
model_kwds["input_res"], model_kwds["input_dim_grid_nodes"]
).to(device)
# Instantiate the model
model = GraphCastNet(**model_kwds).to(device)
# Set gradient checkpointing
model.set_checkpoint_model(False)
model.set_checkpoint_encoder(True)
model.set_checkpoint_processor(2)
model.set_checkpoint_decoder(True)
# Forward pass with checkpointing
y_pred_checkpointed = x
for i in range(num_steps):
y_pred_checkpointed = model(y_pred_checkpointed)
# dummy loss
loss = y_pred_checkpointed.sum()
# compute gradients
loss.backward()
computed_grads_checkpointed = {}
for name, param in model.named_parameters():
computed_grads_checkpointed[name] = param.grad.clone()
# Fix random seeds
fix_random_seeds()
# Random input
x = create_random_input(
model_kwds["input_res"], model_kwds["input_dim_grid_nodes"]
).to(device)
# Instantiate the model
model = GraphCastNet(**model_kwds).to(device)
# Set gradient checkpointing
model.set_checkpoint_model(False)
model.set_checkpoint_encoder(False)
model.set_checkpoint_processor(1)
model.set_checkpoint_decoder(False)
# Forward pass without checkpointing
y_pred = x
for i in range(num_steps):
y_pred = model(y_pred)
# dummy loss
loss = y_pred.sum()
# compute gradients
loss.backward()
computed_grads = {}
for name, param in model.named_parameters():
computed_grads[name] = param.grad.clone()
# Compare the gradients
for name in computed_grads:
torch.allclose(
computed_grads_checkpointed[name], computed_grads[name]
), "Gradient do not match. Checkpointing failed!"
# Check that the results are the same
assert torch.allclose(
y_pred_checkpointed, y_pred
), "Outputs do not match. Checkpointing failed!"
|
modulus-main
|
test/models/graphcast/test_grad_checkpointing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from utils import get_icosphere_path, fix_random_seeds
from modulus.models.graphcast.graph_cast_net import GraphCastNet
icosphere_path = get_icosphere_path()
def test_concat_trick(num_channels=2, res_h=11, res_w=20):
"""Test concat trick"""
# Fix random seeds
fix_random_seeds()
# Random input
device = "cuda"
x = torch.rand(1, num_channels, res_h, res_w, device=device)
x_ct = x.clone().detach()
for recomp_act in [False, True]:
# Fix random seeds
torch.manual_seed(0)
torch.cuda.manual_seed(0)
np.random.seed(0)
# Instantiate the model
model = GraphCastNet(
meshgraph_path=icosphere_path,
static_dataset_path=None,
input_res=(res_h, res_w),
input_dim_grid_nodes=num_channels,
input_dim_mesh_nodes=3,
input_dim_edges=4,
output_dim_grid_nodes=num_channels,
processor_layers=3,
hidden_dim=4,
do_concat_trick=False,
recompute_activation=False,
).to("cuda")
# Fix random seeds again
fix_random_seeds()
# Instantiate the model with concat trick enabled
model_ct = GraphCastNet(
meshgraph_path=icosphere_path,
static_dataset_path=None,
input_res=(res_h, res_w),
input_dim_grid_nodes=num_channels,
input_dim_mesh_nodes=3,
input_dim_edges=4,
output_dim_grid_nodes=num_channels,
processor_layers=3,
hidden_dim=4,
do_concat_trick=True,
recompute_activation=recomp_act,
).to(device)
# Forward pass without checkpointing
x.requires_grad_()
y_pred = model(x)
loss = y_pred.sum()
loss.backward()
x_grad = x.grad
x_ct.requires_grad_()
y_pred_ct = model_ct(x_ct)
loss_ct = y_pred_ct.sum()
loss_ct.backward()
x_grad_ct = x_ct.grad
# Check that the results are the same
# tolerances quite large on GPU
assert torch.allclose(
y_pred_ct,
y_pred,
atol=5.0e-3,
), "Concat trick failed, outputs do not match!"
assert torch.allclose(
x_grad_ct,
x_grad,
atol=1.0e-2,
), "Concat trick failed, gradients do not match!"
if __name__ == "__main__":
test_concat_trick()
|
modulus-main
|
test/models/graphcast/test_concat_trick.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import random
import dgl
import numpy as np
import os, sys
script_path = os.path.abspath(__file__)
sys.path.append(os.path.join(os.path.dirname(script_path), ".."))
from modulus.models.meshgraphnet import MeshGraphNet
import common
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_meshgraphnet_forward(device):
"""Test mehsgraphnet forward pass"""
torch.manual_seed(0)
dgl.seed(0)
np.random.seed(0)
# Construct MGN model
model = MeshGraphNet(
input_dim_nodes=4,
input_dim_edges=3,
output_dim=2,
).to(device)
bsize = 2
num_nodes, num_edges = 20, 10
# NOTE dgl's random graph generator does not behave consistently even after fixing dgl's random seed.
# Instead, numpy adj matrices are created in COO format and are then converted to dgl graphs.
graphs = []
for _ in range(bsize):
src = torch.tensor([np.random.randint(num_nodes) for _ in range(num_edges)])
dst = torch.tensor([np.random.randint(num_nodes) for _ in range(num_edges)])
graphs.append(dgl.graph((src, dst)).to(device))
graph = dgl.batch(graphs)
node_features = torch.randn(graph.num_nodes(), 4).to(device)
edge_features = torch.randn(graph.num_edges(), 3).to(device)
assert common.validate_forward_accuracy(
model, (node_features, edge_features, graph)
)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_mehsgraphnet_constructor(device):
"""Test mehsgraphnet constructor options"""
# Define dictionary of constructor args
arg_list = [
{
"input_dim_nodes": random.randint(1, 10),
"input_dim_edges": random.randint(1, 4),
"output_dim": random.randint(1, 10),
"processor_size": random.randint(1, 15),
"num_layers_node_processor": 2,
"num_layers_edge_processor": 2,
"hidden_dim_node_encoder": 256,
"num_layers_node_encoder": 2,
"hidden_dim_edge_encoder": 256,
"num_layers_edge_encoder": 2,
"hidden_dim_node_decoder": 256,
"num_layers_node_decoder": 2,
},
{
"input_dim_nodes": random.randint(1, 5),
"input_dim_edges": random.randint(1, 8),
"output_dim": random.randint(1, 5),
"processor_size": random.randint(1, 15),
"num_layers_node_processor": 1,
"num_layers_edge_processor": 1,
"hidden_dim_node_encoder": 128,
"num_layers_node_encoder": 1,
"hidden_dim_edge_encoder": 128,
"num_layers_edge_encoder": 1,
"hidden_dim_node_decoder": 128,
"num_layers_node_decoder": 1,
},
]
for kw_args in arg_list:
# Construct mehsgraphnet model
model = MeshGraphNet(**kw_args).to(device)
bsize = random.randint(1, 16)
num_nodes, num_edges = random.randint(10, 25), random.randint(10, 20)
graph = dgl.batch(
[dgl.rand_graph(num_nodes, num_edges).to(device) for _ in range(bsize)]
)
node_features = torch.randn(bsize * num_nodes, kw_args["input_dim_nodes"]).to(
device
)
edge_features = torch.randn(bsize * num_edges, kw_args["input_dim_edges"]).to(
device
)
outvar = model(node_features, edge_features, graph)
assert outvar.shape == (bsize * num_nodes, kw_args["output_dim"])
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_meshgraphnet_optims(device):
"""Test meshgraphnet optimizations"""
def setup_model():
"""Set up fresh model and inputs for each optim test"""
# Construct MGN model
model = MeshGraphNet(
input_dim_nodes=2,
input_dim_edges=2,
output_dim=2,
).to(device)
bsize = random.randint(1, 8)
num_nodes, num_edges = random.randint(15, 30), random.randint(15, 25)
graph = dgl.batch(
[dgl.rand_graph(num_nodes, num_edges).to(device) for _ in range(bsize)]
)
node_features = torch.randn(bsize * num_nodes, 2).to(device)
edge_features = torch.randn(bsize * num_edges, 2).to(device)
return model, [node_features, edge_features, graph]
# Ideally always check graphs first
model, invar = setup_model()
assert common.validate_cuda_graphs(model, (*invar,))
# Check JIT
model, invar = setup_model()
assert common.validate_jit(model, (*invar,))
# Check AMP
model, invar = setup_model()
assert common.validate_amp(model, (*invar,))
# Check Combo
model, invar = setup_model()
assert common.validate_combo_optims(model, (*invar,))
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_meshgraphnet_checkpoint(device):
"""Test meshgraphnet checkpoint save/load"""
# Construct MGN model
model_1 = MeshGraphNet(
input_dim_nodes=4,
input_dim_edges=3,
output_dim=4,
).to(device)
model_2 = MeshGraphNet(
input_dim_nodes=4,
input_dim_edges=3,
output_dim=4,
).to(device)
bsize = random.randint(1, 8)
num_nodes, num_edges = random.randint(5, 15), random.randint(10, 25)
graph = dgl.batch(
[dgl.rand_graph(num_nodes, num_edges).to(device) for _ in range(bsize)]
)
node_features = torch.randn(bsize * num_nodes, 4).to(device)
edge_features = torch.randn(bsize * num_edges, 3).to(device)
assert common.validate_checkpoint(
model_1,
model_2,
(
node_features,
edge_features,
graph,
),
)
@common.check_ort_version()
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_meshgraphnet_deploy(device):
"""Test mesh-graph net deployment support"""
# Construct MGN model
model = MeshGraphNet(
input_dim_nodes=4,
input_dim_edges=3,
output_dim=4,
).to(device)
bsize = random.randint(1, 8)
num_nodes, num_edges = random.randint(5, 10), random.randint(10, 15)
graph = dgl.batch(
[dgl.rand_graph(num_nodes, num_edges).to(device) for _ in range(bsize)]
)
node_features = torch.randn(bsize * num_nodes, 4).to(device)
edge_features = torch.randn(bsize * num_edges, 3).to(device)
invar = (
node_features,
edge_features,
graph,
)
assert common.validate_onnx_export(model, invar)
assert common.validate_onnx_runtime(model, invar)
|
modulus-main
|
test/models/meshgraphnet/test_meshgraphnet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to check that copyright headers exists"""
import argparse
import itertools
import re
import sys
import json
from datetime import datetime
from pathlib import Path
def get_top_comments(_data):
"""
Get all lines where comments should exist
"""
lines_to_extract = []
for i, line in enumerate(_data):
# If empty line, skip
if line in ["", "\n", "", "\r", "\r\n"]:
continue
# If it is a comment line, we should get it
if line.startswith("#"):
lines_to_extract.append(i)
# Assume all copyright headers occur before any import or from statements
# and not enclosed in a comment block
elif "import" in line:
break
elif "from" in line:
break
comments = []
for line in lines_to_extract:
comments.append(_data[line])
return comments
def main():
with open(Path(__file__).parent.resolve() / Path("config.json")) as f:
config = json.loads(f.read())
print(f"License check config:")
print(json.dumps(config, sort_keys=True, indent=4))
current_year = int(datetime.today().year)
starting_year = 2023
python_header_path = Path(__file__).parent.resolve() / Path(
config["copyright_file"]
)
working_path = Path(__file__).parent.resolve() / Path(config["dir"])
exts = config["include-ext"]
with open(python_header_path, "r", encoding="utf-8") as original:
pyheader = original.read().split("\n")
pyheader_lines = len(pyheader)
# Build list of files to check
exclude_paths = [
(Path(__file__).parent / Path(path)).resolve().rglob("*")
for path in config["exclude-dir"]
]
all_exclude_paths = itertools.chain.from_iterable(exclude_paths)
exclude_filenames = [p for p in all_exclude_paths if p.suffix in exts]
filenames = [p for p in working_path.resolve().rglob("*") if p.suffix in exts]
filenames = [
filename for filename in filenames if filename not in exclude_filenames
]
problematic_files = []
gpl_files = []
for filename in filenames:
with open(str(filename), "r", encoding="utf-8") as original:
data = original.readlines()
data = get_top_comments(data)
if data and "# ignore_header_test" in data[0]:
continue
if len(data) < pyheader_lines - 1:
print(f"{filename} has less header lines than the copyright template")
problematic_files.append(filename)
continue
found = False
for i, line in enumerate(data):
if re.search(re.compile("Copyright.*NVIDIA.*", re.IGNORECASE), line):
found = True
# Check 1st line manually
year_good = False
for year in range(starting_year, current_year + 1):
year_line = pyheader[0].format(CURRENT_YEAR=year)
if year_line in data[i]:
year_good = True
break
year_line_aff = year_line.split(".")
year_line_aff = (
year_line_aff[0] + " & AFFILIATES." + year_line_aff[1]
)
if year_line_aff in data[i]:
year_good = True
break
if not year_good:
problematic_files.append(filename)
print(f"{filename} had an error with the year")
break
# while "opyright" in data[i]:
# i += 1
# for j in range(1, pyheader_lines):
# if pyheader[j] not in data[i + j - 1]:
# problematic_files.append(filename)
# print(f"{filename} missed the line: {pyheader[j]}")
# break
if found:
break
if not found:
print(f"{filename} did not match the regex: `Copyright.*NVIDIA.*`")
problematic_files.append(filename)
# test if GPL license exists
for lines in data:
if "gpl" in lines.lower():
gpl_files.append(filename)
break
if len(problematic_files) > 0:
print(
"test_header.py found the following files that might not have a copyright header:"
)
for _file in problematic_files:
print(_file)
if len(gpl_files) > 0:
print("test_header.py found the following files that might have GPL copyright:")
for _file in gpl_files:
print(_file)
assert len(problematic_files) == 0, "header test failed!"
assert len(gpl_files) == 0, "found gpl license, header test failed!"
print("Success: File headers look good!")
if __name__ == "__main__":
main()
|
modulus-main
|
test/ci_tests/header_check.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from typing import Tuple
from modulus.datapipes.benchmarks.kelvin_helmholtz import KelvinHelmholtz2D
from . import common
Tensor = torch.Tensor
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_kelvin_helmholtz_2d_constructor(device):
# construct data pipe
datapipe = KelvinHelmholtz2D(
resolution=32,
batch_size=1,
seq_length=2,
nr_perturbation_freq=5,
perturbation_range=0.1,
nr_snapshots=4,
iteration_per_snapshot=8,
gamma=5.0 / 3.0,
normaliser={"density": (0, 1), "velocity": (0, 1), "pressure": (0, 1)},
device=device,
)
# iterate datapipe is iterable
assert common.check_datapipe_iterable(datapipe)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_kelvin_helmholtz_2d_device(device):
# construct data pipe
datapipe = KelvinHelmholtz2D(
resolution=32,
batch_size=1,
seq_length=2,
nr_perturbation_freq=5,
perturbation_range=0.1,
nr_snapshots=4,
iteration_per_snapshot=32,
gamma=5.0 / 3.0,
normaliser={"density": (0, 1), "velocity": (0, 1), "pressure": (0, 1)},
device=device,
)
# iterate datapipe is iterable
for data in datapipe:
assert common.check_datapipe_device(data["density"], device)
assert common.check_datapipe_device(data["velocity"], device)
assert common.check_datapipe_device(data["pressure"], device)
break
@pytest.mark.parametrize("resolution", [32, 64])
@pytest.mark.parametrize("batch_size", [1, 2, 3])
@pytest.mark.parametrize("seq_length", [2, 3])
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_kelvin_helmholtz_2d_shape(resolution, batch_size, seq_length, device):
# construct data pipe
datapipe = KelvinHelmholtz2D(
resolution=resolution,
batch_size=batch_size,
seq_length=seq_length,
nr_perturbation_freq=5,
perturbation_range=0.1,
nr_snapshots=4,
iteration_per_snapshot=8,
gamma=5.0 / 3.0,
normaliser={"density": (0, 1), "velocity": (0, 1), "pressure": (0, 1)},
device=device,
)
# test single sample
for data in datapipe:
rho = data["density"]
vel = data["velocity"]
p = data["pressure"]
# check batch size
assert common.check_batch_size([rho, vel, p], batch_size)
# check sequence length
assert common.check_seq_length([rho, vel, p], seq_length, axis=1)
# check sequence length
assert common.check_channels([rho, p], 1, axis=2)
assert common.check_channels(vel, 2, axis=2)
# check grid dims
assert common.check_grid([rho, vel, p], (resolution, resolution), axis=(3, 4))
break
@pytest.mark.parametrize("device", ["cuda:0"])
def test_kelvin_helmholtz_cudagraphs(device):
# Preprocess function to convert dataloader output into Tuple of tensors
def input_fn(data) -> Tuple[Tensor, ...]:
return (data["density"], data["velocity"], data["pressure"])
# construct data pipe
datapipe = KelvinHelmholtz2D(
resolution=32,
batch_size=1,
seq_length=2,
nr_perturbation_freq=5,
perturbation_range=0.1,
nr_snapshots=4,
iteration_per_snapshot=8,
gamma=5.0 / 3.0,
normaliser={"density": (0, 1), "velocity": (0, 1), "pressure": (0, 1)},
device=device,
)
assert common.check_cuda_graphs(datapipe, input_fn)
|
modulus-main
|
test/datapipes/test_kelvin_helmholtz.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from typing import Tuple
from modulus.experimental.datapipes.climate import ClimateHDF5Datapipe
from . import common
Tensor = torch.Tensor
@pytest.fixture
def data_dir():
return "/data/nfs/modulus-data/datasets/hdf5/test/"
@pytest.fixture
def stats_dir():
return "/data/nfs/modulus-data/datasets/hdf5/stats/"
@pytest.fixture
def lsm_filename():
return "/data/nfs/modulus-data/datasets/hdf5/static/land_sea_mask.nc"
@pytest.fixture
def geopotential_filename():
return "/data/nfs/modulus-data/datasets/hdf5/static/geopotential.nc"
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_climate_hdf5_constructor(
data_dir, stats_dir, lsm_filename, geopotential_filename, device
):
# construct data pipe
datapipe = ClimateHDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=None,
stride=1,
dt=1,
start_year=2018,
num_steps=1,
patch_size=8,
num_samples_per_year=None,
batch_size=1,
lsm_filename=lsm_filename,
geopotential_filename=geopotential_filename,
use_cos_zenith=True,
use_latlon=True,
num_workers=1,
shuffle=False,
device=torch.device(device),
)
# iterate datapipe is iterable
common.check_datapipe_iterable(datapipe)
# check for failure from invalid dir
try:
# init datapipe with empty path
# if datapipe throws an IO error then this should pass
datapipe = ClimateHDF5Datapipe(
data_dir="/null_path",
stats_dir=stats_dir,
channels=None,
stride=1,
dt=1,
start_year=2018,
num_steps=1,
patch_size=8,
num_samples_per_year=None,
batch_size=1,
lsm_filename=lsm_filename,
geopotential_filename=geopotential_filename,
use_cos_zenith=True,
use_latlon=True,
num_workers=1,
shuffle=False,
device=torch.device(device),
)
raise IOError("Failed to raise error given null data path")
except IOError:
pass
# check for failure from invalid dir
try:
# init datapipe with empty path
# if datapipe throws an IO error then this should pass
datapipe = ClimateHDF5Datapipe(
data_dir=data_dir,
stats_dir="/null_path",
channels=None,
stride=1,
dt=1,
start_year=2018,
num_steps=1,
patch_size=8,
num_samples_per_year=None,
batch_size=1,
lsm_filename=lsm_filename,
geopotential_filename=geopotential_filename,
use_cos_zenith=True,
use_latlon=True,
num_workers=1,
shuffle=False,
device=torch.device(device),
)
raise IOError("Failed to raise error given null stats path")
except IOError:
pass
# check for failure from invalid num_samples_per_year
try:
datapipe = ClimateHDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=None,
stride=1,
dt=1,
start_year=2018,
num_steps=1,
patch_size=8,
num_samples_per_year=100,
batch_size=1,
lsm_filename=lsm_filename,
geopotential_filename=geopotential_filename,
use_cos_zenith=True,
use_latlon=True,
num_workers=1,
shuffle=False,
device=torch.device(device),
)
raise ValueError("Failed to raise error given invalid num_samples_per_year")
except ValueError:
pass
# check invalid channel
try:
datapipe = ClimateHDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=[20],
stride=1,
dt=1,
start_year=2018,
num_steps=1,
patch_size=8,
num_samples_per_year=None,
batch_size=1,
lsm_filename=lsm_filename,
geopotential_filename=geopotential_filename,
use_cos_zenith=True,
use_latlon=True,
num_workers=1,
shuffle=False,
device=torch.device(device),
)
raise ValueError("Failed to raise error given invalid channel id")
except ValueError:
pass
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_climate_hdf5_device(
data_dir, stats_dir, lsm_filename, geopotential_filename, device
):
# construct data pipe
datapipe = ClimateHDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=None,
stride=1,
dt=1,
start_year=2018,
num_steps=1,
patch_size=8,
num_samples_per_year=None,
batch_size=1,
lsm_filename=lsm_filename,
geopotential_filename=geopotential_filename,
use_cos_zenith=True,
use_latlon=True,
num_workers=1,
shuffle=False,
device=torch.device(device),
)
# test single sample
for data in datapipe:
common.check_datapipe_device(data[0]["state_seq"], device)
common.check_datapipe_device(data[0]["timestamps"], device)
common.check_datapipe_device(data[0]["land_sea_mask"], device)
common.check_datapipe_device(data[0]["geopotential"], device)
common.check_datapipe_device(data[0]["latlon"], device)
common.check_datapipe_device(data[0]["cos_latlon"], device)
common.check_datapipe_device(data[0]["cos_zenith"], device)
break
@pytest.mark.parametrize("data_channels", [[0, 1]])
@pytest.mark.parametrize("num_steps", [2])
@pytest.mark.parametrize("patch_size", [None])
@pytest.mark.parametrize("batch_size", [1, 2, 3])
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_climate_hdf5_shape(
data_dir,
stats_dir,
lsm_filename,
geopotential_filename,
data_channels,
num_steps,
patch_size,
batch_size,
device,
):
# construct data pipe
datapipe = ClimateHDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=data_channels,
stride=1,
dt=1,
start_year=2018,
num_steps=num_steps,
patch_size=patch_size,
num_samples_per_year=None,
batch_size=batch_size,
lsm_filename=lsm_filename,
geopotential_filename=geopotential_filename,
use_cos_zenith=True,
use_latlon=True,
num_workers=1,
shuffle=False,
device=torch.device(device),
)
# test single sample
for data in datapipe:
state_seq = data[0]["state_seq"]
timestamps = data[0]["timestamps"]
land_sea_mask = data[0]["land_sea_mask"]
geopotential = data[0]["geopotential"]
latlon = data[0]["latlon"]
cos_latlon = data[0]["cos_latlon"]
cos_zenith = data[0]["cos_zenith"]
# check batch size
assert common.check_batch_size(
[
state_seq,
timestamps,
land_sea_mask,
geopotential,
latlon,
cos_latlon,
cos_zenith,
],
batch_size,
)
# check seq length
assert common.check_seq_length([state_seq, timestamps, cos_zenith], num_steps)
# check channels
if data_channels is None:
nr_channels = 3
else:
nr_channels = len(data_channels)
assert common.check_channels(state_seq, nr_channels, axis=2)
# check grid dims
if patch_size is None:
patch_size = (721, 1440)
assert common.check_grid([state_seq, cos_zenith], patch_size, axis=(3, 4))
assert common.check_grid(
[land_sea_mask, geopotential, latlon, cos_latlon], patch_size, axis=(2, 3)
)
break
@pytest.mark.parametrize("num_steps", [1, 2])
@pytest.mark.parametrize("stride", [1, 3])
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_era5_hdf5_sequence(
data_dir, stats_dir, lsm_filename, geopotential_filename, num_steps, stride, device
):
# construct data pipe
datapipe = ClimateHDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=None,
stride=stride,
dt=1,
start_year=2018,
num_steps=num_steps,
patch_size=None,
num_samples_per_year=None,
batch_size=1,
lsm_filename=lsm_filename,
geopotential_filename=geopotential_filename,
use_cos_zenith=True,
use_latlon=True,
num_workers=1,
shuffle=False,
device=torch.device(device),
)
# get single sample
# TODO generalize tests for sequence type datapipes
for data in datapipe:
state_seq = data[0]["state_seq"]
break
# check if tensor has correct shape
print(state_seq[0, 0, 0, 0, 0])
if num_steps > 1:
print(state_seq[0, 1, 0, 0, 0])
assert common.check_sequence(
state_seq, start_index=0, step_size=stride, seq_length=num_steps, axis=1
)
@pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("stride", [1, 3])
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_era5_hdf5_shuffle(
data_dir, stats_dir, lsm_filename, geopotential_filename, shuffle, stride, device
):
# construct data pipe
datapipe = ClimateHDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=None,
stride=stride,
dt=1,
start_year=2018,
num_steps=2,
patch_size=None,
num_samples_per_year=None,
batch_size=1,
lsm_filename=lsm_filename,
geopotential_filename=geopotential_filename,
use_cos_zenith=True,
use_latlon=True,
num_workers=1,
shuffle=shuffle,
device=torch.device(device),
)
# get all samples
# TODO generalize this
tensors = []
for data in datapipe:
tensors.append(data[0]["state_seq"])
# check sample order
assert common.check_shuffle(tensors, shuffle, stride, 8)
@pytest.mark.parametrize("device", ["cuda:0"])
def test_era5_hdf5_cudagraphs(
data_dir, stats_dir, lsm_filename, geopotential_filename, device
):
# Preprocess function to convert dataloader output into Tuple of tensors
def input_fn(data) -> Tensor:
return data[0]["state_seq"]
# construct data pipe
datapipe = ClimateHDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=None,
stride=1,
dt=1,
start_year=2018,
num_steps=1,
patch_size=None,
num_samples_per_year=None,
batch_size=1,
lsm_filename=lsm_filename,
geopotential_filename=geopotential_filename,
use_cos_zenith=True,
use_latlon=True,
num_workers=1,
shuffle=False,
device=torch.device(device),
)
assert common.check_cuda_graphs(datapipe, input_fn)
|
modulus-main
|
test/datapipes/test_climate_hdf5.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from typing import Tuple
from modulus.datapipes.gnn.ahmed_body_dataset import AhmedBodyDataset
from . import common
Tensor = torch.Tensor
@pytest.fixture
def data_dir():
return "/data/nfs/modulus-data/datasets/ahmed_body/"
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_ahmed_body_constructor(data_dir, device):
# construct dataset
dataset = AhmedBodyDataset(
data_dir=data_dir,
split="train",
num_samples=2,
compute_drag=True,
)
# iterate datapipe is iterable
common.check_datapipe_iterable(dataset)
# check for failure from invalid dir
try:
# init dataset with empty path
# if dataset throws an IO error then this should pass
dataset = AhmedBodyDataset(
data_dir="/null_path",
split="train",
num_samples=2,
compute_drag=True,
)
raise IOError("Failed to raise error given null data path")
except IOError:
pass
# check invalid split
try:
# if dataset throws an IO error then this should pass
dataset = AhmedBodyDataset(
data_dir=data_dir,
invar_keys=[
"pos",
"normals",
"velocity",
"reynolds_number",
"length",
"width",
"height",
"ground_clearance",
"slant_angle",
"fillet_radius",
],
split="valid",
num_samples=2,
compute_drag=True,
)
raise IOError("Failed to raise error given invalid split")
except IOError:
pass
|
modulus-main
|
test/datapipes/test_ahmed_body.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
test/datapipes/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from typing import Tuple
from modulus.datapipes.benchmarks.darcy import Darcy2D
from . import common
Tensor = torch.Tensor
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_darcy_2d_constructor(device):
# construct data pipe
datapipe = Darcy2D(
resolution=64,
batch_size=1,
nr_permeability_freq=5,
max_permeability=2.0,
min_permeability=0.5,
max_iterations=300,
convergence_threshold=1e-4,
iterations_per_convergence_check=5,
nr_multigrids=4,
normaliser={"permeability": (0.0, 1.0), "darcy": (0.0, 1.0)},
device=device,
)
# iterate datapipe is iterable
assert common.check_datapipe_iterable(datapipe)
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_darcy_2d_device(device):
# construct data pipe
datapipe = Darcy2D(
resolution=64,
batch_size=1,
nr_permeability_freq=5,
max_permeability=2.0,
min_permeability=0.5,
max_iterations=300,
convergence_threshold=1e-4,
iterations_per_convergence_check=5,
nr_multigrids=4,
normaliser={"permeability": (0.0, 1.0), "darcy": (0.0, 1.0)},
device=device,
)
# iterate datapipe is iterable
for data in datapipe:
assert common.check_datapipe_device(data["permeability"], device)
assert common.check_datapipe_device(data["darcy"], device)
break
@pytest.mark.parametrize("resolution", [128, 64])
@pytest.mark.parametrize("batch_size", [1, 2, 3])
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_darcy_2d_shape(resolution, batch_size, device):
# construct data pipe
datapipe = Darcy2D(
resolution=resolution,
batch_size=batch_size,
nr_permeability_freq=5,
max_permeability=2.0,
min_permeability=0.5,
max_iterations=300,
convergence_threshold=1e-4,
iterations_per_convergence_check=5,
nr_multigrids=3,
normaliser={"permeability": (0.0, 1.0), "darcy": (0.0, 1.0)},
device=device,
)
# test single sample
for data in datapipe:
permeability = data["permeability"]
darcy = data["darcy"]
# check batch size
assert common.check_batch_size([permeability, darcy], batch_size)
# check channels
assert common.check_channels([permeability, darcy], 1, axis=1)
# check grid dims
assert common.check_grid(
[permeability, darcy], (resolution, resolution), axis=(2, 3)
)
break
@pytest.mark.parametrize("device", ["cuda:0"])
def test_darcy_cudagraphs(device):
# Preprocess function to convert dataloader output into Tuple of tensors
def input_fn(data) -> Tuple[Tensor, ...]:
return (data["permeability"], data["darcy"])
# construct data pipe
datapipe = Darcy2D(
resolution=64,
batch_size=1,
nr_permeability_freq=5,
max_permeability=2.0,
min_permeability=0.5,
max_iterations=300,
convergence_threshold=1e-4,
iterations_per_convergence_check=5,
nr_multigrids=4,
normaliser={"permeability": (0.0, 1.0), "darcy": (0.0, 1.0)},
device=device,
)
assert common.check_cuda_graphs(datapipe, input_fn)
|
modulus-main
|
test/datapipes/test_darcy.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from typing import Tuple
from modulus.datapipes.climate import ERA5HDF5Datapipe
from . import common
Tensor = torch.Tensor
@pytest.fixture
def data_dir():
return "/data/nfs/modulus-data/datasets/hdf5/test/"
@pytest.fixture
def stats_dir():
return "/data/nfs/modulus-data/datasets/hdf5/stats/"
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_era5_hdf5_constructor(data_dir, stats_dir, device):
# construct data pipe
datapipe = ERA5HDF5Datapipe(
data_dir=data_dir,
stats_dir=None,
channels=None,
stride=1,
num_steps=1,
patch_size=8,
num_samples_per_year=None,
batch_size=1,
num_workers=1,
shuffle=False,
device=torch.device(device),
)
# iterate datapipe is iterable
common.check_datapipe_iterable(datapipe)
# check for failure from invalid dir
try:
# init datapipe with empty path
# if datapipe throws an IO error then this should pass
datapipe = ERA5HDF5Datapipe(
data_dir="/null_path",
stats_dir="/null_path",
channels=None,
stride=1,
num_steps=1,
patch_size=None,
num_samples_per_year=1,
batch_size=1,
num_workers=1,
shuffle=False,
device=device,
)
raise IOError("Failed to raise error given null data path")
except IOError:
pass
# check for failure from invalid dir
try:
# init datapipe with empty path
# if datapipe throws an IO error then this should pass
datapipe = ERA5HDF5Datapipe(
data_dir=data_dir,
stats_dir="/null_path",
channels=None,
stride=1,
num_steps=1,
patch_size=None,
num_samples_per_year=1,
batch_size=1,
num_workers=1,
shuffle=False,
device=device,
)
raise IOError("Failed to raise error given null stats path")
except IOError:
pass
# check for failure from invalid num_samples_per_year
try:
datapipe = ERA5HDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=None,
stride=1,
num_steps=1,
patch_size=None,
num_samples_per_year=100,
batch_size=1,
num_workers=1,
shuffle=False,
device=device,
)
raise ValueError("Failed to raise error given invalid num_samples_per_year")
except ValueError:
pass
# check invalid channel
try:
datapipe = ERA5HDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=[20],
stride=1,
num_steps=1,
patch_size=None,
num_samples_per_year=1,
batch_size=1,
num_workers=1,
shuffle=False,
device=device,
)
raise ValueError("Failed to raise error given invalid channel id")
except ValueError:
pass
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_era5_hdf5_device(data_dir, stats_dir, device):
# construct data pipe
datapipe = ERA5HDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=None,
stride=1,
num_steps=1,
patch_size=None,
num_samples_per_year=None,
batch_size=1,
num_workers=1,
shuffle=False,
device=device,
)
# test single sample
for data in datapipe:
common.check_datapipe_device(data[0]["invar"], device)
common.check_datapipe_device(data[0]["outvar"], device)
break
@pytest.mark.parametrize("data_channels", [[0, 1]])
@pytest.mark.parametrize("num_steps", [2])
@pytest.mark.parametrize("patch_size", [None])
@pytest.mark.parametrize("batch_size", [1, 2, 3])
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_era5_hdf5_shape(
data_dir, stats_dir, data_channels, num_steps, patch_size, batch_size, device
):
# construct data pipe
datapipe = ERA5HDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=data_channels,
stride=1,
num_steps=num_steps,
patch_size=patch_size,
num_samples_per_year=None,
batch_size=batch_size,
num_workers=1,
shuffle=False,
device=device,
)
# test single sample
for data in datapipe:
input = data[0]["invar"]
output = data[0]["outvar"]
# check batch size
assert common.check_batch_size([input, output], batch_size)
# check seq length
assert common.check_seq_length(output, num_steps)
# check channels
if data_channels is None:
nr_channels = 3
else:
nr_channels = len(data_channels)
assert common.check_channels(input, nr_channels, axis=1)
assert common.check_channels(output, nr_channels, axis=2)
# check grid dims
if patch_size is None:
patch_size = (721, 1440)
assert common.check_grid(input, patch_size, axis=(2, 3))
assert common.check_grid(output, patch_size, axis=(3, 4))
break
@pytest.mark.parametrize("num_steps", [1, 2])
@pytest.mark.parametrize("stride", [1, 3])
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_era5_hdf5_sequence(data_dir, stats_dir, num_steps, stride, device):
# construct data pipe
datapipe = ERA5HDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=None,
stride=stride,
num_steps=num_steps,
patch_size=None,
num_samples_per_year=None,
batch_size=1,
num_workers=1,
shuffle=False,
device=device,
)
# get single sample
# TODO generalize tests for sequence type datapipes
for data in datapipe:
output = data[0]["outvar"]
break
# check if tensor has correct shape
assert common.check_sequence(
output, start_index=stride, step_size=stride, seq_length=num_steps, axis=1
)
@pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("stride", [1, 3])
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_era5_hdf5_shuffle(data_dir, stats_dir, shuffle, stride, device):
# construct data pipe
datapipe = ERA5HDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=None,
stride=stride,
num_steps=1,
patch_size=None,
num_samples_per_year=None,
batch_size=1,
num_workers=1,
shuffle=shuffle,
device=device,
)
# get all samples
# TODO generalize this
tensors = []
for data in datapipe:
tensors.append(data[0]["invar"])
# check sample order
assert common.check_shuffle(tensors, shuffle, stride, 8)
@pytest.mark.parametrize("device", ["cuda:0"])
def test_era5_hdf5_cudagraphs(data_dir, stats_dir, device):
# Preprocess function to convert dataloader output into Tuple of tensors
def input_fn(data) -> Tuple[Tensor, ...]:
return (data[0]["invar"], data[0]["outvar"])
# construct data pipe
datapipe = ERA5HDF5Datapipe(
data_dir=data_dir,
stats_dir=stats_dir,
channels=None,
stride=1,
num_steps=1,
patch_size=None,
num_samples_per_year=None,
batch_size=1,
num_workers=1,
device=device,
)
assert common.check_cuda_graphs(datapipe, input_fn)
|
modulus-main
|
test/datapipes/test_era5_hdf5.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from unittest.mock import MagicMock
from modulus.datapipes.climate.sfno.dataloaders.data_loader_dummy import DummyLoader
def test_dummyloader():
# Create a mock object for params
params = MagicMock()
params.dt = 1
params.batch_size = 1
params.n_history = 1
params.n_future = 1
params.in_channels = [1]
params.out_channels = [1]
params.roll = True
params.io_grid = [1, 1, 1]
params.io_rank = [1, 1, 1]
params.crop_size_x = 1
params.crop_anchor_x = 0
params.img_shape_x = 1
params.crop_size_y = 1
params.crop_anchor_y = 0
params.img_shape_y = 1
params.n_years = 1
params.n_samples_per_year = 1
# Define dummy arguments
location = "none"
train = True
device = "cuda:0"
# Create an instance of the class
dummyloader = DummyLoader(params, location, train, device)
# Check if the object is initialized correctly
assert dummyloader.dt == params.dt
assert dummyloader.batch_size == params.batch_size
assert dummyloader.n_history == params.n_history
assert dummyloader.n_future == params.n_future
assert dummyloader.in_channels == params.in_channels
assert dummyloader.out_channels == params.out_channels
assert dummyloader.roll == params.roll
assert dummyloader.io_grid == params.io_grid[1:]
assert dummyloader.io_rank == params.io_rank[1:]
assert dummyloader.location == location
assert dummyloader.train == train
# assert dummyloader.device == device
# Test the __len__ method
assert len(dummyloader) == 1
# Test the __iter__ method
assert iter(dummyloader) == dummyloader
# Test the get_input_normalization method
in_bias, in_scale = dummyloader.get_input_normalization()
assert in_bias.shape == (1, len(params.in_channels), 1, 1)
assert in_scale.shape == (1, len(params.in_channels), 1, 1)
# Test the get_output_normalization method
out_bias, out_scale = dummyloader.get_output_normalization()
assert out_bias.shape == (1, dummyloader.n_out_channels_local, 1, 1)
assert out_scale.shape == (1, dummyloader.n_out_channels_local, 1, 1)
|
modulus-main
|
test/datapipes/sfno/test_data_loader_dummy.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus
import torch
import logging
from typing import Tuple, Union, Callable
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
def dummy_loss_fn(data: Union[Tensor, Tuple[Tensor, ...]]):
"""Trivial summation loss for testing"""
# Output of tensor
if isinstance(data, torch.Tensor):
loss = data.sum()
# Output of tuple of tensors
elif isinstance(data, tuple):
# Loop through tuple of outputs
loss = 0
for data_tensor in data:
# If tensor use allclose
if isinstance(data_tensor, Tensor):
loss = data_tensor.sum()
else:
logger.error(
"Model returned invalid type for unit test, should be Tensor or Tuple[Tensor]"
)
loss = None
return loss
class MiniNetwork(torch.nn.Module):
"""Mini network with one parameter for testing cuda graph support of data pipes"""
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.randn(1))
def forward(self, inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:
output = tuple(self.param * invar for invar in inputs)
return output
def check_cuda_graphs(
datapipe: "modulus.Datapipe",
input_fn: Union[Callable, None] = None,
iterations: int = 5,
warmup_length: int = 3,
) -> bool:
"""Tests if a datapipe is compatable with cuda graphs
Parameters
----------
datapipe : modulus.Datapipe
Modulus data pipe to test
input_fn : Union[Callable, None], optional
Input pre-processing function to produce a tuple of tensors for model inputs, by default None
iterations : int, optional
Number of training iterations, by default 5
warmup_length : int, optional
Number of earm-up iterations before CUDA graph recording, by default 3
Returns
-------
bool
Test passed
Note
----
A torch module that accepts a tuple of tensors is used for testing cuda graphs with
the provided datapipe. If the datapipe does not provide a tuple of tensors by default,
one should use the `input_fn` to preprocess a batch to that form.
"""
if not datapipe.meta.cuda_graphs:
logger.warn("Datapipe does not support cuda graphs, skipping")
return True
model = MiniNetwork().cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
def foward(in_args):
optimizer.zero_grad()
output = model(in_args)
loss = dummy_loss_fn(output)
loss.backward()
# Warmup stream (if cuda graphs)
warmup_stream = torch.cuda.Stream()
with torch.cuda.stream(warmup_stream):
for _ in range(warmup_length):
inputs = next(iter(datapipe))
if input_fn:
inputs = input_fn(inputs)
foward(inputs)
optimizer.step()
# Record and replay cuda graphs
g = torch.cuda.CUDAGraph()
optimizer.zero_grad(set_to_none=True)
for i in range(iterations):
inputs = next(iter(datapipe))
if input_fn:
inputs = input_fn(inputs)
if i == 0: # Record
with torch.cuda.graph(g):
foward(inputs)
else: # Replay
g.replay()
# Optimizer step outside for AMP support
optimizer.step()
return True
|
modulus-main
|
test/datapipes/common/cuda_graphs.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus
import torch
import logging
logger = logging.getLogger("__name__")
def check_datapipe_iterable(
datapipe: "modulus.Datapipe", nr_iterations: int = 3
) -> bool:
"""Checks if datapipe is iterable
Parameters
----------
datapipe : modulus.Datapipe
datapipe to check if iterable
nr_iterations : int
number of iterations to check datapipe iterable
Returns
-------
bool
Test passed
"""
# Check if datapipe is iterable
try:
for i, data in enumerate(datapipe):
if i >= nr_iterations:
break
pass
assert len(datapipe) > 0 # even if infinite, len should return a int
return True
except:
logger.warning(f"Datapipe is not iterable")
return False
|
modulus-main
|
test/datapipes/common/iterator_check.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus
import torch
import logging
from typing import Tuple, Union
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
def check_datapipe_device(sample: Tensor, device: Union[str, torch.device]) -> bool:
"""Checks if datapipe loads sample to correct device
Parameters
----------
sample : Tensor
Torch tensor to check device on.
device : str
expected device to load too
Returns
-------
bool
Test passed
"""
if isinstance(device, str):
device = torch.device(device)
# Need a index id if cuda
if device.type == "cuda" and device.index == None:
device = torch.device("cuda:0")
# Check if sample is on correct device
if sample.device != device:
logger.warning(f"Datapipe loading sample on incorrect device")
logger.warning(f"Expected Device: {type(device)}")
logger.warning(f"Device: {type(sample.device)}")
return False
return True
|
modulus-main
|
test/datapipes/common/device.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus
import torch
import logging
from typing import Tuple, Union
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
def check_sequence(
tensor: Tensor, start_index: int, step_size: int, seq_length: int, axis: int = 0
) -> bool:
"""Checks if tensor has correct sequence. The tensor is expected to have a dimension that represents the sequence. Indexing this dimension should give a tensor of constant ints with the correct sequence number.
Parameters
----------
tensor : Tensor
tensor to check sequence on.
start_index : int
expected value of first tensor in sequence
step_size : int
step size in sequence
seq_length : int
expected sequence length
axis : int
axis of tensor to check sequence on
Returns
-------
bool
Test passed
"""
# convert tensors to int list
tensor_tags = [
int(tensor.select(axis, i).flatten()[0]) for i in range(tensor.shape[axis])
]
# correct seq
correct_seq = [step_size * i + start_index for i in range(seq_length)]
# check if seq matches epected
if correct_seq != tensor_tags:
logger.warning(f"Sequence does not match expected")
logger.warning(f"Expected Sequence: {correct_seq}")
logger.warning(f"Sequence order: {tensor_tags}")
return False
return True
|
modulus-main
|
test/datapipes/common/sequence.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .shape_check import check_batch_size, check_seq_length, check_channels, check_grid
from .iterator_check import check_datapipe_iterable
from .shuffle import check_shuffle
from .sequence import check_sequence
from .device import check_datapipe_device
from .cuda_graphs import check_cuda_graphs
|
modulus-main
|
test/datapipes/common/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus
import torch
import logging
from typing import Tuple, Union
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
def check_shuffle(
tensors: Tuple[Tensor, ...], shuffle: bool, stride: int, correct_length: int
) -> bool:
"""Checks if list of tensors is shuffled or not
Parameters
----------
tensors : Tuple[Tensor, ...]
tuple of tensors. Each tensor is expected to be constant and have an int value
coresponding to its place in the dataset.
stride : int
stride of the datapipe
shuffle : bool
if the list of tensors is expected to be shuffled or not
correct_length: int
Expected number of tensors
Returns
-------
bool
Test passed
"""
# convert tensors to int list
tensor_tags = [int(t.flatten()[0]) for t in tensors]
# check if number of samples has correct length
if correct_length - stride != len(tensor_tags):
logger.warning(f"Number of samples not matching expected")
logger.warning(f"Expected Number of Samples: {correct_length}")
logger.warning(f"Number of Samples: {len(tensor_tags)}")
return False
expected_tags = list(range(correct_length - stride))
# check if shuffle is false
if not shuffle:
if tensor_tags != expected_tags:
logger.warning(f"Shuffle is set to False however samples are not in order")
logger.warning(f"Expected order: {expected_tags}")
logger.warning(f"Sample order: {tensor_tags}")
return False
# check if shuffle is True
if shuffle:
if sorted(tensor_tags) != expected_tags:
logger.warning(
f"Shuffle is set to True however sorted samples don't match expected"
)
logger.warning(f"Expected order: {expected_tags}")
logger.warning(f"Sorted Sample order: {sorted(tensor_tags)}")
return False
return True
|
modulus-main
|
test/datapipes/common/shuffle.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus
import torch
import logging
from typing import Tuple, Union
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
def check_batch_size(
tensors: Union[Tensor, Tuple[Tensor, ...]], batch_size: int
) -> bool:
"""Checks if tensor has correct batch size
Parameters
----------
tensors : Union[Tensor, Tuple[Tensor, ...]]
tensors to check
batch_size : int
correct batch size
Returns
-------
bool
Test passed
"""
if isinstance(tensors, Tensor):
tensors = (tensors,)
# Check batch size for each tensor
for t in tensors:
t_batch_size = t.shape[0]
if t_batch_size != batch_size:
logger.warning(f"Batch size incorrect for tesnor")
logger.warning(f"Expected Batch Size: {batch_size}")
logger.warning(f"Tensor Batch Size: {t_batch_size}")
return False
return True
def check_seq_length(
tensors: Union[Tensor, Tuple[Tensor, ...]], seq_length: int, axis: int = 1
) -> bool:
"""Checks if tensor has correct seq length
Parameters
----------
tensors : Union[Tensor, Tuple[Tensor, ...]]
tensors to check
seq_length : int
correct seq length
axis : int
axis to check seq lenth on, default is 1
Returns
-------
bool
Test passed
"""
if isinstance(tensors, Tensor):
tensors = (tensors,)
# Check seq length for each tensor
for t in tensors:
t_seq_length = t.shape[axis]
if t_seq_length != seq_length:
logger.warning(f"Sequence length incorrect for tesnor")
logger.warning(f"Expected Sequence Length: {seq_length}")
logger.warning(f"Tensor Sequence Length: {t_seq_length}")
return False
return True
def check_channels(
tensors: Union[Tensor, Tuple[Tensor, ...]], channels: int, axis: int = 1
) -> bool:
"""Checks if tensor has correct channels
Parameters
----------
tensors : Union[Tensor, Tuple[Tensor, ...]]
tensors to check
channels : int
correct number of channels
axis : int
axis to check channels on, default is 1
Returns
-------
bool
Test passed
"""
if isinstance(tensors, Tensor):
tensors = (tensors,)
# Check channels for each tensor
for t in tensors:
t_channels = t.shape[axis]
if t_channels != channels:
logger.warning(f"Number of channels incorrect for tesnor")
logger.warning(f"Expected Channels: {channels}")
logger.warning(f"Tensor Channels: {t_channels}")
return False
return True
def check_grid(
tensors: Union[Tensor, Tuple[Tensor, ...]],
grid: Tuple[int, int],
axis: Tuple[int, int] = (2, 3),
) -> bool:
"""Checks if tensor has correct grid dimension
Parameters
----------
tensors : Union[Tensor, Tuple[Tensor, ...]]
tensors to check
grid : Tuple[int, int]
correct grid dimension
axis : Tuple[int, int]
axis to check grid dimension on, default is 2, 3
Returns
-------
bool
Test passed
"""
if isinstance(tensors, Tensor):
tensors = (tensors,)
# Check channels for each tensor
for t in tensors:
t_grid = (t.shape[axis[0]], t.shape[axis[1]])
if t_grid != grid:
logger.warning(f"Grid dimension incorrect for tesnor")
logger.warning(f"Expected Grid: {grid}")
logger.warning(f"Tensor Channels: {t_grid}")
return False
return True
|
modulus-main
|
test/datapipes/common/shape_check.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import sphinx_rtd_theme
from modulus import __version__ as version
project = "NVIDIA Modulus"
copyright = "2023, NVIDIA Modulus Team"
author = "NVIDIA Modulus Team"
release = version
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"recommonmark",
"sphinx.ext.mathjax",
"sphinx.ext.todo",
"sphinx.ext.autosectionlabel",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"nbsphinx",
]
source_suffix = {".rst": "restructuredtext", ".md": "markdown"}
pdf_documents = [
("index", "rst2pdf", "Sample rst2pdf doc", "Your Name"),
]
napoleon_custom_sections = ["Variable Shape"]
# -- Options for HTML output -------------------------------------------------
# HTML theme options
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"logo_only": True,
"display_version": True,
"prev_next_buttons_location": "bottom",
"style_external_links": False,
"style_nav_header_background": "#000000",
# Toc options
"collapse_navigation": False,
"sticky_navigation": False,
# 'navigation_depth': 10,
"sidebarwidth": 12,
"includehidden": True,
"titles_only": False,
}
# Additional html options
html_static_path = ["_static"]
html_css_files = [
"css/nvidia_styles.css",
]
html_js_files = ["js/pk_scripts.js"]
# html_last_updated_fmt = ''
# Additional sphinx switches
math_number_all = True
todo_include_todos = True
numfig = True
_PREAMBLE = r"""
\usepackage{amsmath}
\usepackage{esint}
\usepackage{mathtools}
\usepackage{stmaryrd}
"""
latex_elements = {
"preamble": _PREAMBLE,
# other settings go here
}
latex_preamble = [
(
"\\usepackage{amssymb}",
"\\usepackage{amsmath}",
"\\usepackage{amsxtra}",
"\\usepackage{bm}",
"\\usepackage{esint}",
"\\usepackage{mathtools}",
"\\usepackage{stmaryrd}",
),
]
autosectionlabel_maxdepth = 1
templates_path = ["_templates"]
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"README.md",
"CONTRIBUTING.md",
"LICENSE.txt",
]
source_suffix = {".rst": "restructuredtext", ".md": "markdown"}
pdf_documents = [
("index", "rst2pdf", "Sample rst2pdf doc", "Your Name"),
]
napoleon_custom_sections = ["Variable Shape"]
|
modulus-main
|
docs/conf.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import time
from nvidia.dali.pipeline import pipeline_def
parser = argparse.ArgumentParser(description='DALI HW decoder benchmark')
parser.add_argument('-b', dest='batch_size', help='batch size', default=1, type=int)
parser.add_argument('-d', dest='device_id', help='device id', default=0, type=int)
parser.add_argument('-g', dest='device', choices=['gpu', 'cpu'],
help='device to use', default='gpu',
type=str)
parser.add_argument('-w', dest='warmup_iterations', help='warmup iterations', default=0, type=int)
parser.add_argument('-t', dest='total_images', help='total images', default=100, type=int)
parser.add_argument('-j', dest='num_threads', help='num_threads', default=1, type=int)
parser.add_argument('-i', dest='images_dir', help='images dir')
parser.add_argument('-p', dest='pipeline', choices=['decoder', 'rn50'],
help='pipeline to test', default='decoder',
type=str)
parser.add_argument('--width_hint', dest="width_hint", default=0, type=int)
parser.add_argument('--height_hint', dest="height_hint", default=0, type=int)
parser.add_argument('--hw_load', dest='hw_load',
help='HW decoder workload (e.g. 0.66 means 66% of the batch)', default=0.75,
type=float)
args = parser.parse_args()
@pipeline_def(batch_size=args.batch_size,
num_threads=args.num_threads,
device_id=args.device_id,
seed=0)
def DecoderPipeline():
device = 'mixed' if args.device == 'gpu' else 'cpu'
jpegs, _ = fn.readers.file(file_root=args.images_dir)
images = fn.decoders.image(jpegs, device=device, output_type=types.RGB,
hw_decoder_load=args.hw_load, preallocate_width_hint=args.width_hint,
preallocate_height_hint=args.height_hint)
return images
@pipeline_def(batch_size=args.batch_size,
num_threads=args.num_threads,
device_id=args.device_id,
seed=0)
def RN50Pipeline():
device = 'mixed' if args.device == 'gpu' else 'cpu'
jpegs, _ = fn.readers.file(file_root=args.images_dir)
images = fn.decoders.image_random_crop(jpegs, device=device, output_type=types.RGB,
hw_decoder_load=args.hw_load,
preallocate_width_hint=args.width_hint,
preallocate_height_hint=args.height_hint)
images = fn.resize(images, resize_x=224, resize_y=224)
layout = types.NCHW
out_type = types.FLOAT16
coin_flip = fn.random.coin_flip(probability=0.5)
images = fn.crop_mirror_normalize(
images,
dtype=out_type,
output_layout=layout,
crop=(224, 224),
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
mirror=coin_flip)
return images
if args.pipeline == 'decoder':
pipe = DecoderPipeline()
elif args.pipeline == 'rn50':
pipe = RN50Pipeline()
else:
raise RuntimeError('Unsupported pipeline')
pipe.build()
for iteration in range(args.warmup_iterations):
output = pipe.run()
print('Warmup finished')
start = time.time()
test_iterations = args.total_images // args.batch_size
print('Test iterations: ', test_iterations)
for iteration in range(test_iterations):
output = pipe.run()
end = time.time()
total_time = end - start
print(test_iterations * args.batch_size / total_time, 'fps')
|
DALI-main
|
tools/hw_decoder_bench.py
|
#!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a tool to generate a stub (empty) implenentation of a C API header
# It matches match functions following the pattern:
# DLL_PUBLIC rettype func_name(A a, B b, ...);
# also including linebreaks and extra spaces.
#
# Usage:
# ./stubgen.py path/to/header.h > stub.c
import re
import sys
COPYRIGHT_NOTICE = """
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// THIS IS A STUB IMPLEMENTATION IMPLEMENTATION FILE ONLY MEANT FOR BUILD
// PURPOSES.
"""
def stubgen(header_filepath, out_file=sys.stdout):
header_text = ""
with open(header_filepath, "r") as file:
header_text = file.read()
print(COPYRIGHT_NOTICE, file=out_file)
print("#include \"{}\"\n\n".format(header_filepath), file=out_file)
FUNCTION_DECL_PATTERN = r"DLL_PUBLIC[\s]+(.*)[\s]+(.*)\(([^\)]*?)\);"
for entry in re.finditer(FUNCTION_DECL_PATTERN, header_text):
ret_type = entry.group(1)
func_name = entry.group(2)
args = entry.group(3)
print('{} {}({}) {{\n}}\n\n'.format(ret_type, func_name, args), file=out_file)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Produces an empty stub implementation of a C header')
parser.add_argument('header_filepath', metavar='header', type=str,
help='Path to the header file')
parser.add_argument('--output', metavar='output', type=str, help='Path to the output file')
args = parser.parse_args()
f = open(args.output, 'w+') if args.output is not None else sys.stdout
stubgen(args.header_filepath, out_file=f)
|
DALI-main
|
tools/stubgen.py
|
#!/usr/bin/python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
import sys
import tarfile
import time
from shutil import which
class IndexCreator:
"""Reads `Webdataset` data format, and creates index file
that enables random access.
Example usage:
----------
>> with IndexCreator('data/test.tar','data/test.idx') as ic:
>> ic.create_index()
>> !ls data/
test.tar test.idx
Parameters
----------
uri : str
Path to the archive file.
idx_path : str
Path to the index file, that will be created/overwritten.
"""
tar_block_size = 512
index_file_version = "v1.2"
def __init__(self, uri, idx_path, verbose=True):
self.uri = uri
self.idx_path = idx_path
self.fidx = open(self.idx_path, "w")
self.verbose = verbose
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
def open(self):
"""Opens the archive and index files and sets their read heads to 0."""
if self.fidx.closed:
self.fidx = open(self.idx_path, "w")
else:
self.fidx.seek(0)
def close(self):
"""Closes the archive and index files."""
if not self.fidx.closed:
self.fidx.close()
def reset(self):
"""Resets the archive and index files."""
self.close()
self.open()
@staticmethod
def split_name(filepath):
"""Splits the webdataset into the basename and the extension"""
dot_pos = filepath.find(".", filepath.rfind("/") + 1)
return filepath[:dot_pos], filepath[dot_pos + 1:]
def _get_data_tar(self):
"""Retreives the data about the offset, name and size of each component
using the gnu tar utility, while also filtering out non-file entries"""
tar_blocks_proc = subprocess.Popen(
["tar", "--list", "--block-num", "--file", self.uri],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
tar_types_sizes_proc = subprocess.Popen(
["tar", "--verbose", "--list", "--file", self.uri],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
tar_blocks = tar_blocks_proc.communicate()[0].split(b"\n") # block <n>: <filepath>
tar_types_sizes = tar_types_sizes_proc.communicate()[0].split(
b"\n"
) # <type>... <size> <date> <name>
# Extracting
for blocks_line, types_sizes_line in zip(tar_blocks, tar_types_sizes):
if not blocks_line or not types_sizes_line:
continue
name = str(blocks_line[blocks_line.find(b":") + 2:], "ascii")
entry_type = types_sizes_line[0:1]
if entry_type != b"-":
continue
offset = int(blocks_line[blocks_line.find(b"block") + 6:
blocks_line.find(b":")])
# according to https://www.loc.gov/preservation/digital/formats/fdd/fdd000531.shtml#:~:text=A%20tar%20(tape%20archive)%20file,are%20not%20compressed%20archive%20files. # noqa: E501, W505
# each data record is preceded by 512-byte header. `tar --list --block-num --file`
# return the position (counted in 512-byte blocks) of the header for a given entry.
# So the size of the header needs to be added to get the data offset
offset = (offset + 1) * 512
size = types_sizes_line[: -len(name)]
size = size[: size.rfind(b"-") - 8] # "... <size> 20yy-mm-...."
size = int(size[size.rfind(b" "):])
yield offset, name, size
def _get_data_tarfile(self):
"""Retreives the data about the offset, name and size of each component
using the tarfile module, while also filtering out non-file entries
Intended as a fallback for the gnu tar version (since it is much slower)"""
print(
"Warning: tar utility not found. Falling back to tarfile."
+ " Processing will most likely take much longer",
file=sys.stderr,
)
farchive = tarfile.open(self.uri)
for member in iter(farchive):
if member.type != tarfile.REGTYPE:
continue
offset = farchive.fileobj.tell()
yield offset, member.name, member.size
def create_index(self):
"""Creates the index file from a tar archive"""
self.reset()
pre_time = time.time()
counter = 0
report_step = 100000
if self.verbose:
print(f"time: {time.time() - pre_time:.2f} count: {counter} stage: collect")
# Aggregates extensions in samples
aggregated_data = []
last_basename = None
for offset, name, size in (
self._get_data_tar() if which("tar") is not None else self._get_data_tarfile()
):
if counter % report_step == 0 and counter > 0:
cur_time = time.time()
if self.verbose:
print(f"time: {cur_time - pre_time:.2f} count: {counter} stage: collect")
counter += 1
basename, extension = IndexCreator.split_name(name)
# check for the files starting with a dot (hidden files)
if not basename or basename.endswith("/"):
continue
if last_basename != basename:
aggregated_data.append([(extension, offset, size, name)])
last_basename = basename
else:
aggregated_data[-1].append((extension, offset, size, name))
if not aggregated_data:
raise ValueError("Webdataset Tar File empty")
# Constructs the index file out of the aggregated extensions
self.fidx.write(f"{IndexCreator.index_file_version} {len(aggregated_data)}\n")
for bundle in aggregated_data:
if counter % report_step == 0:
cur_time = time.time()
if self.verbose:
print(f"time: {cur_time - pre_time:.2f} count: {counter} stage: index")
self.fidx.write(" ".join(map(lambda component: " ".join(map(str, component)), bundle)))
self.fidx.write("\n")
counter += 1
cur_time = time.time()
if self.verbose:
print(f"time: {cur_time - pre_time:.2f} count: {counter} stage: done")
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Creates a webdataset index file for the use with the `fn.readers.webdataset`.",
)
parser.add_argument("archive", help="path to .tar file.")
parser.add_argument(
"index",
help="path to index file",
nargs="?",
)
args = parser.parse_args()
if args.index is None:
args.index = args.archive[: args.archive.find(".", args.archive.rfind("/") + 2)] + ".idx"
args.archive = os.path.abspath(args.archive)
args.index = os.path.abspath(args.index)
return args
def main():
args = parse_args()
creator = IndexCreator(args.archive, args.index)
creator.create_index()
creator.close()
if __name__ == "__main__":
main()
|
DALI-main
|
tools/wds2idx.py
|
#!/usr/bin/env python
# Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from sys import argv
def get_list_elm_match(value, elms):
""" Check if any element in the elms list matches the value """
return any(e in value for e in elms)
def check_ldd_out(lib, linked_lib, bundled_lib_names, allowed_libs):
# Gather all libs that may be linked with 'lib' and don't need to be bundled
# Entries from 'lib' key in allowed_libs should cover all 'lib*' libs
# Empty key is used for all libs
allowed_libs_to_check = []
for k in allowed_libs.keys():
if k in lib:
allowed_libs_to_check += allowed_libs[k]
return linked_lib in bundled_lib_names or get_list_elm_match(linked_lib, allowed_libs_to_check)
def main():
allowed_libs = {"": ["linux-vdso.so.1",
"libm.so.6",
"libpthread.so.0",
"libc.so.6",
"/lib64/ld-linux",
"/lib/ld-linux",
"libdl.so.2",
"librt.so.1",
"libstdc++.so.6",
"libgcc_s.so.1",
"libasan.so",
"liblsan.so",
"libubsan.so",
"libtsan.so"
]}
bundled_libs = argv[1:]
# Gather all names of bundled libs without path
bundled_lib_names = []
for lib in bundled_libs:
beg = lib.rfind('/')
bundled_lib_names.append(lib[beg + 1:])
print("Checking bundled libs linkage:")
for lib_path, lib_name in zip(bundled_libs, bundled_lib_names):
print(f"- {lib_name}")
ldd = subprocess.Popen(["ldd", lib_path], stdout=subprocess.PIPE)
for lib in ldd.stdout:
lib = lib.decode().strip('\t').strip('\n')
linked_lib = lib.split()[0]
if not check_ldd_out(lib_name, linked_lib, bundled_lib_names, allowed_libs):
print(f"Library: '{linked_lib}' should be bundled in whl "
f"or removed from the dynamic link dependency")
exit(1)
print("-> OK")
if __name__ == '__main__':
main()
|
DALI-main
|
tools/test_bundled_libs.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import time
import ctypes
from mxnet.base import _LIB
from mxnet.base import check_call
import mxnet as mx
import argparse
class IndexCreator(mx.recordio.MXRecordIO):
"""Reads `RecordIO` data format, and creates index file
that enables random access.
Example usage:
----------
>> creator = IndexCreator('data/test.rec','data/test.idx')
>> record.create_index()
>> record.close()
>> !ls data/
test.rec test.idx
Parameters
----------
uri : str
Path to the record file.
idx_path : str
Path to the index file, that will be created/overwritten.
key_type : type
Data type for keys (optional, default = int).
"""
def __init__(self, uri, idx_path, key_type=int):
self.key_type = key_type
self.fidx = None
self.idx_path = idx_path
super(IndexCreator, self).__init__(uri, 'r')
def open(self):
super(IndexCreator, self).open()
self.fidx = open(self.idx_path, 'w')
def close(self):
"""Closes the record and index files."""
if not self.is_open:
return
super(IndexCreator, self).close()
self.fidx.close()
def tell(self):
"""Returns the current position of read head.
"""
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos)))
return pos.value
def create_index(self):
"""Creates the index file from open record file
"""
self.reset()
counter = 0
pre_time = time.time()
while True:
if counter % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', counter)
pos = self.tell()
cont = self.read()
if cont is None:
break
key = self.key_type(counter)
self.fidx.write('%s\t%d\n' % (str(key), pos))
counter = counter + 1
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an index file from .rec file')
parser.add_argument('record', help='path to .rec file.')
parser.add_argument('index', help='path to index file.')
args = parser.parse_args()
args.record = os.path.abspath(args.record)
args.index = os.path.abspath(args.index)
return args
def main():
args = parse_args()
creator = IndexCreator(args.record, args.index)
creator.create_index()
creator.close()
if __name__ == '__main__':
main()
|
DALI-main
|
tools/rec2idx.py
|
#!/usr/bin/env python
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import itertools
import re
import argparse
import sys
import subprocess
# Linter script, that calls cpplint.py, specifically for DALI repo.
# This will be called in `make lint` cmake target
# Q: How to configure git hook for pre-push linter check?
# A: Create a file `.git/hooks/pre-push`:
#
# #!/bin/sh
# DALI_ROOT_DIR=$(git rev-parse --show-toplevel)
# python $DALI_ROOT_DIR/tools/lint.py $DALI_ROOT_DIR --nproc=10
# ret=$?
# if [ $ret -ne 0 ]; then
# exit 1
# fi
# exit 0
# Specifies, which files are to be excluded
# These filters are regexes, not typical unix-like path specification
negative_filters = [
".*operators/reader/nvdecoder/nvcuvid.h",
".*operators/reader/nvdecoder/cuviddec.h",
".*operators/reader/loader/video/nvdecode/*",
".*operators/sequence/optical_flow/optical_flow_impl/nvOpticalFlowCuda.h",
".*operators/sequence/optical_flow/optical_flow_impl/nvOpticalFlowCommon.h",
".*python/dummy.cu"
]
def negative_filtering(patterns: list, file_list):
"""
Patterns shall be a list of regex patterns
"""
if len(patterns) == 0:
return file_list
prog = re.compile(patterns.pop())
it = (i for i in file_list if not prog.search(i))
return negative_filtering(patterns, it)
def gather_files(path: str, patterns: list, antipatterns: list):
"""
Gather files, based on `path`, that match `patterns` unix-like specification
and do not match `antipatterns` regexes
"""
curr_path = os.getcwd()
os.chdir(path)
positive_iterators = [glob.iglob(os.path.join('**', pattern), recursive=True) for pattern in
patterns]
linted_files = itertools.chain(*positive_iterators)
linted_files = (os.path.join(path, file) for file in linted_files)
linted_files = negative_filtering(antipatterns.copy(), linted_files)
ret = list(linted_files)
os.chdir(curr_path)
return ret
def gen_cmd(dali_root_dir, file_list, process_includes=False):
"""
Command for calling cpplint.py
"""
if not file_list:
return ["true"]
cmd = ["python",
os.path.join(dali_root_dir, "third_party", "cpplint.py"),
"--quiet",
"--linelength=100",
"--headers=h,cuh",
"--root=" + os.path.join(dali_root_dir, "include" if process_includes else "")]
cmd.extend(file_list)
return cmd
def lint(dali_root_dir, file_list, process_includes, n_subproc):
"""
n_subprocesses: how many subprocesses to use for linter processing
Returns: 0 if lint passed, 1 otherwise
"""
if len(file_list) == 0:
return 0
cmds = []
diff = int(len(file_list) / n_subproc)
for process_idx in range(n_subproc - 1):
cmds.append(gen_cmd(dali_root_dir=dali_root_dir,
file_list=file_list[process_idx * diff: (process_idx + 1) * diff],
process_includes=process_includes))
cmds.append(gen_cmd(dali_root_dir=dali_root_dir,
file_list=file_list[(n_subproc - 1) * diff:],
process_includes=process_includes))
subprocesses = [subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for cmd in
cmds]
success = True
for subproc in subprocesses:
stdout, stderr = subproc.communicate()
success *= not bool(subproc.poll())
if len(stderr) > 0:
print(stderr.decode("utf-8"))
return 0 if success else 1
def main(dali_root_dir, n_subproc=1, file_list=None):
cc_files = gather_files(
os.path.join(dali_root_dir, "dali"),
["*.cc", "*.h", "*.cu", "*.cuh"] if file_list is None else file_list,
negative_filters)
inc_files = gather_files(
os.path.join(dali_root_dir, "include"),
["*.h", "*.cuh", "*.inc", "*.inl"] if file_list is None else file_list,
negative_filters)
tf_plugin_files = gather_files(
os.path.join(dali_root_dir, "dali_tf_plugin"),
["*.cc", "*.h", "*.cu", "*.cuh"] if file_list is None else file_list,
negative_filters)
cc_code = lint(dali_root_dir=dali_root_dir, file_list=cc_files, process_includes=False,
n_subproc=n_subproc)
inc_code = lint(dali_root_dir=dali_root_dir, file_list=inc_files, process_includes=True,
n_subproc=n_subproc)
tf_plugin_code = lint(dali_root_dir=dali_root_dir, file_list=tf_plugin_files,
process_includes=False, n_subproc=n_subproc)
if cc_code != 0 or inc_code != 0 or tf_plugin_code != 0:
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Run linter check for DALI files. Gather all code-files "
"(h, cuh, cc, cu, inc, inl, py) and perform linter check on them.")
parser.add_argument('dali_root_path', type=str,
help='Root path of DALI repository '
'(pointed directory should contain `.git` folder)')
parser.add_argument('--nproc', type=int, default=1,
help='Number of processes to spawn for linter verification')
parser.add_argument('--file-list', nargs='*',
help='List of files. This overrides the default scenario')
args = parser.parse_args()
assert args.nproc > 0
main(str(args.dali_root_path), args.nproc, file_list=args.file_list)
|
DALI-main
|
tools/lint.py
|
# Copyright 2020 The TensorFlow Runtime Authors
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Generates dynamic loading stubs for functions in CUDA and HIP APIs."""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import re
import json
import clang.cindex
def function_header(return_type, name, args):
args_expr = []
for arg_type, arg_name in args:
# handle arrays and function (or array of) pointer and reference to an array differently
# as well
# int[], int (*)(), int (*[])(), int (&)[5]
match = re.search(r'\[|\)', arg_type)
if match:
pos = match.span()[0]
print(arg_type[:pos])
args_expr.append(f"{arg_type[:pos]} {arg_name}{arg_type[pos:]}")
else:
args_expr.append(f"{arg_type} {arg_name}")
arg_str = ", ".join(args_expr)
ret = f"{return_type} {name}({arg_str})"
return ret
def main():
parser = argparse.ArgumentParser(
description='Generate dynamic loading stubs for CUDA and HIP APIs.')
parser.add_argument('--unique_prefix', default="", type=str,
help='Unique prefix for used in the stub')
parser.add_argument(
'input', nargs='?', type=argparse.FileType('r'))
parser.add_argument(
'output', nargs='?', type=argparse.FileType('w'))
parser.add_argument(
'header', nargs='?', type=str, default=None)
parser.add_argument(
'extra_args', nargs='*', type=str, default=None)
args = parser.parse_args()
config = json.load(args.input)
function_impl = """
{return_type} %s {1}NotFound({2}) {{
return {not_found_error};
}}
{0} {{
using FuncPtr = {return_type} (%s *)({2});
static auto func_ptr = reinterpret_cast<FuncPtr>(LOAD_SYMBOL_FUNC("{1}")) ?
reinterpret_cast<FuncPtr>(LOAD_SYMBOL_FUNC("{1}")) :
{1}NotFound;
return func_ptr({3});
}}\n""" % (config['calling_conv'], config['calling_conv'])
prolog = """
void *{0}LoadSymbol(const char *name);
#define LOAD_SYMBOL_FUNC {0}##LoadSymbol
"""
index = clang.cindex.Index.create()
header = args.header
extra_args = args.extra_args
translation_unit = index.parse(header, args=extra_args)
for diag in translation_unit.diagnostics:
if diag.severity in [diag.Warning, diag.Fatal]:
raise Exception(str(diag))
for extra_i in config['extra_include']:
args.output.write('#include {}\n'.format(extra_i))
args.output.write(prolog.format(args.unique_prefix))
all_definition = set()
all_declaration = set()
for cursor in translation_unit.cursor.get_children():
if cursor.is_definition():
all_definition.add(cursor.spelling)
if cursor.kind == clang.cindex.CursorKind.FUNCTION_DECL:
all_declaration.add(cursor.spelling)
for cursor in translation_unit.cursor.get_children():
if cursor.kind != clang.cindex.CursorKind.FUNCTION_DECL:
continue
function_name = cursor.spelling
# make sure that we deal only with functions with no definition
if function_name not in config['functions'] or function_name in all_definition or \
function_name not in all_declaration:
continue
# make sure that we deal with every function only once
all_declaration.remove(function_name)
arg_types = [arg.type.spelling for arg in cursor.get_arguments()]
arg_names = [arg.spelling for arg in cursor.get_arguments()]
return_type = config['functions'][function_name].get(
'return_type', config['return_type'])
not_found_error = config['functions'][function_name].get(
'not_found_error', config['not_found_error'])
header = function_header(return_type, function_name, zip(arg_types, arg_names))
implementation = function_impl.format(header, function_name, ', '.join(arg_types),
', '.join(arg_names), return_type=return_type,
not_found_error=not_found_error)
args.output.write(implementation)
if __name__ == '__main__':
main()
|
DALI-main
|
tools/stub_generator/stub_codegen.py
|
#!/usr/bin/env python
import argparse
import sys
# use packaging from PIP as it is always present on system we are testing on
from pip._vendor.packaging.version import parse
import urllib.parse
try:
import pip._internal.utils.compatibility_tags as p
except ImportError:
try:
import pip._internal.pep425tags as p
except ImportError:
import pip.pep425tags as p
from urllib.request import urlopen, HTTPError, Request, URLError
PYTHON_VERSION = ".".join([str(x) for x in sys.version_info[0:2]])
class PckgVer():
"""Class that holds a version string accompanied with maximum and minimum python version that
this version should support. If python falls beyond version bounds it evaluates to the
empty string
Parameters
----------
`ver`: str
Version that is housed by object of this class
`python_max_ver` : str, optional, default = None
Maximum python version supported by this package. If empty there is no upper bound
`python_min_ver`: str, optional, default = None
Mimimum python version supported by this package. If empty there is no lower bound
'alias': std, optional, default = None
Alternative name that should be used during installation instead of general package name
`dependencies` : list of str, optional, default = None
List of packages in ["name==version", ] format that should be installed together with
a given package
"""
def __init__(self, ver, python_min_ver=None, python_max_ver=None, alias=None,
dependencies=None):
self.ver = ver
self.python_min_ver = python_min_ver
self.python_max_ver = python_max_ver
self.name_alias = alias
self.dependent_packages = dependencies
def __bool__(self):
return (not self.python_min_ver or
parse(PYTHON_VERSION) >= parse(self.python_min_ver)) and \
(not self.python_max_ver or parse(PYTHON_VERSION) <= parse(self.python_max_ver))
def __repr__(self):
if self:
return self.ver
else:
return ""
@property
def alias(self):
return self.name_alias
@property
def dependencies(self):
return self.dependent_packages
class BasePackage():
"""Class describing basic methods that package should provide
Parameters
----------
`key`: str
Name this package should be queried for
`versions`: str or PckgVer class object
List of versions this package is available for
`name`: str, optional, default = None
Name of this package used during installation. If empty it is the same as the key
"""
def __init__(self, key, versions, name=None):
self.key = key
if not name:
name = key
self.name = name
self.versions = versions
def clamp_index(self, idx, cuda_version=None):
"""Clamps index to range 0 - num_of_packages - 1
Parameters
----------
`key`: idx: int
Index to clamp
`cuda_version`: str, optional, default = None
Cuda version used for a given index
"""
if idx < 0 or idx >= self.get_num_of_version(cuda_version):
idx = 0
return idx
@staticmethod
def get_alias(version):
"""Obtains alias for given version if exists. Otherwise return None
Parameters
----------
`version`: str or PckgVer
Package version
"""
return getattr(version, "alias", None)
def get_dependencies(self, cuda_version=None, idx=None):
"""Obtains dependant packages list if exists. Otherwise return None
Parameters
----------
`version`: str or PckgVer
Package version
"""
version = self.get_version(idx, cuda_version)
return getattr(version, "dependencies", None)
def get_name(self, cuda_version=None, idx=None):
"""Retrives package name.
Parameters
----------
`cuda_version`: str, optional, default = None
Cuda version used for this query
`idx`: int
Index of name to retrive in case of specific version has different alias
"""
name = BasePackage.get_alias(self.get_version(idx, cuda_version))
if name is None:
name = self.name
return name
def get_uninstall_names(self, cuda_version=None):
"""Retrives package name/s used to uninstall it.
Parameters
----------
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
version = self.get_all_versions(cuda_version)
uninstall_names = [self.get_name(cuda_version)]
for v in version:
name = BasePackage.get_alias(v)
if name is not None:
uninstall_names.append(name)
# merge into one string
return " ".join(uninstall_names)
def filter_versions(self, versions):
"""Retrieves only compatible versions of this package from provided `versions` list
Parameters
----------
`versions`: list
List of versions to be checked. All versions that evaluate to True are returned
"""
# no need to convert PckgVer to string, it is done by get_install_string when printed
return [v for v in versions if v]
def get_version(self, idx, cuda_version=None):
"""Get versions at a given index, compatible with provided cuda_version
Parameters
----------
`idx`: int
Index of version to retrive. If index is beyond 0-num_of_versions-1 range
it is clamped to it
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
if idx is None:
idx = 0
idx = self.clamp_index(idx, cuda_version)
return self.get_all_versions(cuda_version)[idx]
def get_all_versions(self, cuda_version=None):
"""Get all versions compatible with provided cuda_version
Parameters
----------
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
raise NotImplementedError
def get_num_of_version(self, cuda_version=None):
"""Obtains the number of available versions for given cuda_version
Parameters
----------
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
return len(self.get_all_versions(cuda_version))
def get_install_string(self, idx, cuda_version=None):
"""Obtains installation string that pip should accept for version at
a given index with a given cuda_version
Parameters
----------
`idx`: int
Index of version to retrive. If index is beyond 0-num_ov_versions-1 range
it is clamped to it
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
version = self.get_version(idx, cuda_version)
op = "" if str(version)[0] in ("<", ">", "=") else "=="
pkg_cmd = f"{self.get_name(cuda_version, idx)}{op}{version}"
deps_cmd = self.get_dependencies(cuda_version, idx)
if deps_cmd is not None:
pkg_cmd = " ".join([pkg_cmd] + deps_cmd)
return pkg_cmd
def get_all_install_strings(self, cuda_version=None):
"""Gets all installation string that pip should accept for a given
cuda version. Providing all of them to pip won't work, but each of
them should be a valid pip argument
Parameters
----------
`cuda_version`: str, optional, default = None
Cuda version used for this query
"""
ret = []
for i in range(self.get_num_of_version(cuda_version)):
ret.append(self.get_install_string(i, cuda_version))
return " ".join(ret)
def get_extra_index(self, cuda_version):
"""Gets a extra url index for pip for given cuda version.
Parameters
----------
`cuda_version`: str
Cuda version used for this query
"""
return ""
def get_links_index(self, cuda_version):
"""Gets a url with direct links to artifacts for pip for given cuda version.
Parameters
----------
`cuda_version`: str
Cuda version used for this query
"""
return ""
class PlainPackage(BasePackage):
"""Class describing a simple package with a key/name and a list of versions.
Cuda version is irrelevant for this package
Parameters
----------
`key`: str
Name this package should be queried for
`versions`: str or PckgVer class object
List of versions this package is available for
`name`: str, , optional, default = None
Name of this package used during installation. If empty it is the same as the key
"""
def __init__(self, key, versions, name=None):
super(PlainPackage, self).__init__(key, versions, name)
def get_all_versions(self, cuda_version=None):
return self.filter_versions(self.versions)
class CudaPackage(BasePackage):
"""Class describing a cuda package with a key/name and a dictionary where the key
is a cuda version and value is the list of versions supported.
Parameters
----------
`key`: str
Name this package should be queried for
`versions`: dict or PckgVer class object
Dictionary, where the key is a cuda version and vale, is the list of versions supported
`name`: str, , optional, default = None
Name of this package used during installation. If empty it is the same as the key.
If it includes `{cuda_v}` it is replaced by the cuda_version when queried
"""
def __init__(self, key, versions, name=None):
super(CudaPackage, self).__init__(key, versions, name)
if not isinstance(versions, dict):
raise TypeError("versions argument should by dict type [cuda_version :"
"list_of_versions")
def get_name(self, cuda_version=None, idx=None):
cuda_version = self.max_cuda_version(cuda_version)
name = super().get_name(cuda_version, idx)
return name.format(cuda_v=cuda_version)
def get_all_versions(self, cuda_version):
cuda_version = self.max_cuda_version(cuda_version)
return self.filter_versions(self.versions.get(cuda_version, []))
def max_cuda_version(self, cuda_version):
"""Gets a compatible, available cuda version to one asked for.
If there is no cuda version in the version list that matches the one provided,
the cuda version that is not higher is used 10.2 -> 10, 9.2 -> 9
Parameters
----------
`cuda_version`: str
Cuda version used for this query
"""
max_cuda = None
for ver in sorted(self.versions.keys(), key=int):
if int(ver) <= int(cuda_version):
max_cuda = ver
return max_cuda
class CudaPackageExtraIndex(CudaPackage):
"""Class describing a cuda package with a key/name and a dictionary where the key
is a cuda version and value is the list of versions supported.
Parameters
----------
`key`: str
Name this package should be queried for
`versions`: dict or PckgVer class object
Dictionary, where the key is a cuda version and vale, is the list of versions supported
`name`: str, optional, default = None
Name of this package used during installation. If empty it is the same as the key.
If it includes `{cuda_v}` it is replaced by the cuda_version when queried
`extra_index`: str, optional, default = ""
Extra url used as pep 503 compatible repository to obtain listed packages
`links_index`: str, optional, default = ""
A URL or path to an html file with direct links to archives
"""
def __init__(self, key, versions, name=None, extra_index="", links_index=""):
super(CudaPackageExtraIndex, self).__init__(key, versions, name)
if not isinstance(versions, dict):
raise TypeError("versions argument should be a dictionary"
" {cuda_version_str : list_of_versions}")
self.extra_index = extra_index
self.links_index = links_index
def get_extra_index(self, cuda_version):
"""Gets a extra url index for pip for a given cuda version.
Parameters
----------
`cuda_version`: str
Cuda version used for this query
"""
cuda_version = self.max_cuda_version(cuda_version)
return self.extra_index.format(cuda_v=cuda_version)
def get_links_index(self, cuda_version):
"""Gets a url with direct links to artifacts for pip for given cuda version.
Parameters
----------
`cuda_version`: str
Cuda version used for this query
"""
cuda_version = self.max_cuda_version(cuda_version)
return self.links_index.format(cuda_v=cuda_version)
class CudaHttpPackage(CudaPackage):
"""Class describing a cuda package with a key/name and a dictionary where the key
is a cuda version and value is the list of directly accessible http links
When it asked for a package version it checks compatible platform tags and provides
a download link to a compatible package
Parameters
----------
`key`: str
Name this package should be queried for
`versions`: str or PckgVer class object
Dictionary, where the key is a cuda version and vale, is the list
of directly accessible http links. `{platform}` inside the link is replaced by the
compatible platform tag provided by pip
`name`: str, , optional, default = None
Name of this package used during installation. If empty it is the same as the key.
If it includes `{cuda_v}` it is replaced by the cuda_version when queried
"""
def __init__(self, key, versions, name=None):
super(CudaHttpPackage, self).__init__(key, versions, name)
def get_all_versions(self, cuda_version):
cuda_version = self.max_cuda_version(cuda_version)
ret = []
for v in self.versions[cuda_version]:
vers = self.get_pyvers_name(v, cuda_version)
if vers != "":
ret.append(vers)
return ret
def get_install_string(self, idx, cuda_version=None):
return "{version}".format(version=self.get_version(idx, cuda_version))
def test_request(self, url):
"""Checks if a provided url is available
Parameters
----------
`url`: str
Package url to be tested.
"""
url = url.split("://")
url[-1] = urllib.parse.quote(url[-1])
url = "://".join(url)
request = Request(url)
request.get_method = lambda: 'HEAD'
attempts = 3
while attempts:
try:
_ = urlopen(request, timeout=100)
return url
except HTTPError:
return None
except URLError:
attempts -= 1
if attempts == 0:
raise
print("Cannot reach {}, attempts left {}".format(url, attempts))
def get_pyvers_name(self, url, cuda_version):
"""Checks if a provided url is available for a given cuda version
It checks what package is available and is compatible with the available platforms
returned by the pip
Parameters
----------
`url`: str
Package url to be tested. `{cuda_v}` is replaced by cuda_version and `{platform}`
by the platform tag
`cuda_version`: str
Cuda version used for this query
"""
if isinstance(p.get_supported()[0], tuple):
# old PIP returns tuple
for py_ver in [(x, y, z) for (x, y, z) in p.get_supported()
if y != 'none' and 'any' not in y]:
py_ver = "-".join(py_ver)
ret = self.test_request(url.format(platform=py_ver, cuda_v=cuda_version))
if ret:
return ret
else:
# new PIP returns object
for py_ver in [tag for tag in p.get_supported()
if tag.abi != 'none' and tag.platform != 'any']:
py_ver = str(py_ver)
ret = self.test_request(url.format(platform=py_ver, cuda_v=cuda_version))
if ret:
return ret
return ""
all_packages = [PlainPackage("numpy", [">=1.17,<1.24"]),
PlainPackage("opencv-python", [PckgVer("4.5.4.60", dependencies=["numpy<1.24"])]),
CudaPackage("cupy",
{"113": [PckgVer("9.6.0", python_max_ver="3.6",
dependencies=["numpy<1.24"]),
PckgVer("10.0.0", python_min_ver="3.7",
dependencies=["numpy<1.24"])]},
"cupy-cuda{cuda_v}"),
CudaPackage("mxnet",
{"113": [PckgVer("1.9.1", dependencies=["numpy<1.24"])]},
"mxnet-cu{cuda_v}"),
CudaPackage("tensorflow-gpu",
{"110": [
PckgVer("2.12.1", python_min_ver="3.8", alias="tensorflow",
dependencies=["protobuf<4", "numpy<1.24",
"urllib3<2.0", "typing_extensions<4.6"]),
PckgVer("2.13.0", python_min_ver="3.8", alias="tensorflow",
dependencies=["protobuf<4", "numpy<1.24",
"urllib3<2.0", "typing_extensions<4.6"])]}),
CudaPackageExtraIndex("torch",
# use the older Torch just for python 3.6
{"113": [PckgVer("1.10.0", python_max_ver="3.6",
dependencies=["numpy<1.24"]),
PckgVer("1.11.0", python_min_ver="3.7",
dependencies=["numpy<1.24"])]},
extra_index="https://download.pytorch.org/whl/cu{cuda_v}/"),
CudaPackageExtraIndex("torchvision",
# use the older Torch just for python 3.6
{"113": [PckgVer("0.11.0", python_max_ver="3.6",
dependencies=["numpy<1.24"]),
PckgVer("0.12.0", python_min_ver="3.7",
dependencies=["numpy<1.24"])]},
extra_index="https://download.pytorch.org/whl/cu{cuda_v}/"),
CudaPackageExtraIndex("paddlepaddle-gpu",
{"110": [PckgVer("2.4.1.post117",
dependencies=["protobuf<4", "numpy<1.24"])]},
links_index="https://www.paddlepaddle.org.cn/"
"whl/linux/mkl/avx/stable.html"),
CudaPackageExtraIndex("jax", # name used in our test script, see the mxnet case
{"113": [PckgVer("0.4.11",
python_min_ver="3.8",
dependencies=["jaxlib"])]},
# name used during installation
name="jax[cuda{cuda_v[0]}{cuda_v[1]}_local]",
links_index=("https://storage.googleapis.com/"
"jax-releases/jax_cuda_releases.html")),
CudaPackage("numba",
{"110": [
PckgVer("0.57.0", python_min_ver="3.8",
dependencies=["numpy<1.24"]),
PckgVer("0.56.0", python_min_ver="3.7", python_max_ver="3.7",
dependencies=["numpy<1.24"]),
PckgVer("0.53.1", python_max_ver="3.6")]})
]
all_packages_keys = [pckg.key for pckg in all_packages]
parser = argparse.ArgumentParser(description='Env setup helper')
parser.add_argument('--list', '-l', help='list configs', action='store_true', default=False)
parser.add_argument('--num', '-n', help='return number of all configurations possible',
action='store_true', default=False)
parser.add_argument('--install', '-i', dest='install', type=int, help="get Nth configuration",
default=-1)
parser.add_argument('--all', '-a', dest='getall', action='store_true',
help='return packages in all versions')
parser.add_argument('--remove', '-r', dest='remove', help="list packages to remove",
action='store_true', default=False)
parser.add_argument('--cuda', dest='cuda', default="90", help="CUDA version to use")
parser.add_argument('--use', '-u', dest='use', default=[],
help="provide only packages from this list", nargs='*')
parser.add_argument('--extra_index', '-e', dest='extra_index',
help="return relevant extra indices list for pip", action='store_true',
default=False)
parser.add_argument('--links_index', '-k', dest='links_index',
help="return relevant link indices list for pip", action='store_true',
default=False)
args = parser.parse_args()
def print_configs(cuda_version):
"""Prints all available configurations"""
for pckg in all_packages:
print("{}:".format(pckg.get_name(cuda_version)))
for v in pckg.get_all_versions(cuda_version):
alias = BasePackage.get_alias(v)
if alias is not None:
op = "" if str(v)[0] in ("<", ">", "=") else "=="
v = f"{alias}{op}{v}"
print("\t{}".format(v))
def cal_num_of_configs(packages, cuda_version):
"""Calculates how many different version configurations are available for given
packages and cuda version"""
ret = 1
for pckg in all_packages:
if pckg.key in packages:
ret *= pckg.get_num_of_version(cuda_version)
return ret
def for_all_pckg(packages, fun, add_additional_packages=True):
"""Iterates over all packages, executes a function. Returns all function results as a list"""
ret = []
for pckg in all_packages:
if pckg.key in packages:
ret.append(fun(pckg))
additional = []
if add_additional_packages:
# add all remaining used packages with default versions
additional = [v for v in packages if v not in all_packages_keys]
return ret + additional
def get_remove_string(packages, cuda_version):
"""Creates pip remove string for given cuda version and package list"""
# Remove only these which version we want to change
ret = for_all_pckg(packages, lambda pckg: pckg.get_uninstall_names(cuda_version))
return " ".join(ret)
def get_all_strings(packages, cuda_version):
"""Prints all available configurations for given package list and cuda version"""
ret = for_all_pckg(packages, lambda pckg: pckg.get_all_install_strings(cuda_version))
return " ".join(ret)
def get_install_string(idx, packages, cuda_version):
"""Creates pip install string for given cuda version, variant number and package list"""
ret = for_all_pckg(packages, lambda pckg: pckg.get_install_string(idx, cuda_version))
# add all remaining used packages with default versions
return " ".join(ret)
def get_extra_indices(packages, cuda_version):
"""Get all extra indices for given packages"""
ret = for_all_pckg(packages, lambda pckg: pckg.get_extra_index(cuda_version),
add_additional_packages=False)
# add all remaining used packages with default versions
return " ".join(ret)
def get_links_indices(packages, cuda_version):
"""Get all urls with direct links for given packages"""
ret = for_all_pckg(packages, lambda pckg: pckg.get_links_index(cuda_version),
add_additional_packages=False)
# add all remaining used packages with default versions
return " ".join(ret)
def main():
global args
if args.list:
print_configs(args.cuda)
elif args.num:
print(cal_num_of_configs(args.use, args.cuda) - 1)
elif args.remove:
print(get_remove_string(args.use, args.cuda))
elif args.getall:
print(get_all_strings(args.use, args.cuda))
elif args.install >= 0:
print(get_install_string(args.install, args.use, args.cuda))
elif args.extra_index:
print(get_extra_indices(args.use, args.cuda))
elif args.links_index:
print(get_links_indices(args.use, args.cuda))
if __name__ == "__main__":
main()
|
DALI-main
|
qa/setup_packages.py
|
import sys
from nose.core import run_exit
import collections
import nose.case
import nose.inspector
import nose.loader
import nose.suite
import nose.plugins.attrib
if sys.version_info >= (3, 10) and not hasattr(collections, "Callable"):
nose.case.collections = collections.abc
nose.inspector.collections = collections.abc
nose.loader.collections = collections.abc
nose.suite.collections = collections.abc
nose.plugins.attrib.collections = collections.abc
if sys.argv[0].endswith('__main__.py'):
sys.argv[0] = '%s -m nose_wrapper' % sys.executable
run_exit()
|
DALI-main
|
qa/nose_wrapper/__main__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
import jax.numpy as jnp
import numpy as np
import logging as log
import argparse
from nvidia.dali import pipeline_def
from nvidia.dali.backend import TensorGPU
import nvidia.dali.types as types
import nvidia.dali.plugin.jax as dax
from jax.sharding import Mesh
from jax.sharding import PartitionSpec
from jax.sharding import PositionalSharding
from jax.sharding import NamedSharding
def get_dali_tensor_gpu(value, shape, dtype, device_id=0) -> TensorGPU:
"""Helper function to create DALI TensorGPU.
Args:
value : Value to fill the tensor with.
shape : Shape for the tensor.
dtype : Data type for the tensor.
Returns:
TensorGPU: DALI TensorGPU with provided shape and dtype filled
with provided value.
"""
@pipeline_def(num_threads=1, batch_size=1)
def dali_pipeline():
values = types.Constant(value=np.full(shape, value, dtype), device='gpu')
return values
pipe = dali_pipeline(device_id=device_id)
pipe.build()
dali_output = pipe.run()
return dali_output[0][0]
def print_devices(process_id):
log.info(f"Local devices = {jax.local_device_count()}, "
f"global devices = {jax.device_count()}")
log.info("All devices: ")
print_devices_details(jax.devices(), process_id)
log.info("Local devices:")
print_devices_details(jax.local_devices(), process_id)
def print_devices_details(devices_list, process_id):
for device in devices_list:
log.info(f"Id = {device.id}, platform = {device.platform}, "
f"process_id = {device.process_index}, kind = {device.device_kind}")
def run_distributed_sharing_test(sharding, process_id):
log.info(f"Sharding: {sharding}")
dali_local_shards = []
for id, device in enumerate(jax.local_devices()):
current_shard = dax.integration._to_jax_array(
get_dali_tensor_gpu(process_id, (1), np.int32, id))
assert current_shard.device() == device
dali_local_shards.append(current_shard)
dali_sharded_array = jax.make_array_from_single_device_arrays(
shape=(jax.device_count(),), sharding=sharding, arrays=dali_local_shards)
assert len(dali_sharded_array.device_buffers) == jax.local_device_count()
for id, buffer in enumerate(dali_sharded_array.device_buffers):
assert buffer == jnp.array([process_id])
assert buffer.device() == jax.local_devices()[id]
def test_positional_sharding_workflow(process_id):
sharding = PositionalSharding(jax.devices())
run_distributed_sharing_test(sharding=sharding, process_id=process_id)
log.info("Passed positional sharding workflow test")
def test_named_sharding_workflow(process_id):
mesh = Mesh(jax.devices(), axis_names=('device'))
sharding = NamedSharding(mesh, PartitionSpec('device'))
run_distributed_sharing_test(sharding=sharding, process_id=process_id)
log.info("Passed named sharding workflow test")
def run_multiprocess_workflow(process_id=0, cluster_size=1):
jax.distributed.initialize(
coordinator_address="localhost:12321",
num_processes=cluster_size,
process_id=process_id)
log.basicConfig(
format=f"PID {process_id}: %(message)s", level=log.INFO)
print_devices(process_id=process_id)
test_positional_sharding_workflow(process_id=process_id)
test_named_sharding_workflow(process_id=process_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--size", type=int, required=True)
args = parser.parse_args()
run_multiprocess_workflow(process_id=0, cluster_size=args.size)
|
DALI-main
|
qa/TL3_JAX_multiprocess/jax_server.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jax_server import run_multiprocess_workflow
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--id", type=int, required=True)
parser.add_argument("--size", type=int, required=True)
args = parser.parse_args()
assert args.id != 0, "Client can not have process id == 0"
run_multiprocess_workflow(process_id=args.id, cluster_size=args.size)
|
DALI-main
|
qa/TL3_JAX_multiprocess/jax_client.py
|
from nvidia.dali import backend as b
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.plugin.pytorch
import nvidia.dali.plugin.numba.experimental
import sys
# Dictionary with modules that can have registered Ops
ops_modules = {
'nvidia.dali.ops': nvidia.dali.ops,
'nvidia.dali.plugin.pytorch': nvidia.dali.plugin.pytorch,
'nvidia.dali.plugin.numba.experimental': nvidia.dali.plugin.numba.experimental
}
# Some operators might have a different module for the fn wrapper
module_mapping = {
'nvidia.dali.plugin.pytorch' : 'nvidia.dali.plugin.pytorch.fn',
'nvidia.dali.plugin.numba.experimental' : 'nvidia.dali.plugin.numba.fn.experimental'
}
# Remove ops not available in the fn API
removed_ops = [
'Compose'
]
cpu_ops = ops.cpu_ops()
gpu_ops = ops.gpu_ops()
mix_ops = ops.mixed_ops()
all_ops = cpu_ops.union(gpu_ops).union(mix_ops)
link_formatter = ':meth:`{op} <{module}.{op}>`'
def to_fn_name(full_op_name):
tokens = full_op_name.split('.')
tokens[-1] = fn._to_snake_case(tokens[-1])
return '.'.join(tokens)
def to_fn_module(module_name):
if module_name in module_mapping:
return module_mapping[module_name]
else:
return module_name.replace('.ops', '.fn')
def name_sort(op_name):
_, module, name = ops._process_op_name(op_name)
return '.'.join(module + [name.upper()])
def longest_fn_string():
longest_str = ""
for op in sorted(all_ops, key=name_sort):
fn_string = ""
op_full_name, submodule, op_name = ops._process_op_name(op)
for (module_name, module) in ops_modules.items():
m = module
for part in submodule:
m = getattr(m, part, None)
if m is None:
break
if m is not None and hasattr(m, op_name):
fn_string = link_formatter.format(op = to_fn_name(op_full_name), module = to_fn_module(module_name))
if len(fn_string) > len(longest_str):
longest_str = fn_string
return longest_str
op_name_max_len = len(longest_fn_string())
name_bar = op_name_max_len * '='
def fn_to_op_table(out_filename):
formater = '{:{c}<{op_name_max_len}} {:{c}<{op_name_max_len}}\n'
doc_table = ''
doc_table += formater.format('', '', op_name_max_len = op_name_max_len, c='=')
doc_table += formater.format('Function (fn.*)', 'Operator Object (ops.*)', op_name_max_len = op_name_max_len, c=' ')
doc_table += formater.format('', '', op_name_max_len = op_name_max_len, c='=')
for op in sorted(all_ops, key=name_sort):
op_full_name, submodule, op_name = ops._process_op_name(op)
schema = b.TryGetSchema(op)
if schema:
if schema.IsDocHidden():
continue
for (module_name, module) in ops_modules.items():
m = module
for part in submodule:
m = getattr(m, part, None)
if m is None:
break
if m is not None and hasattr(m, op_name):
op_string = link_formatter.format(op = op_full_name, module = module_name)
fn_string = link_formatter.format(op = to_fn_name(op_full_name), module = to_fn_module(module_name))
if op_name in removed_ops:
fn_string = "N/A"
op_doc = formater.format(fn_string, op_string, op_name_max_len = op_name_max_len, c=' ')
doc_table += op_doc
doc_table += formater.format('', '', op_name_max_len = op_name_max_len, c='=')
with open(out_filename, 'w') as f:
f.write(doc_table)
def operations_table_str(ops_to_process):
formater = '{:{c}<{op_name_max_len}} {:{c}^48} {:{c}<150}\n'
doc_table = ''
doc_table += '\n.. currentmodule:: nvidia.dali.fn\n\n'
doc_table += formater.format('', '', '', op_name_max_len = op_name_max_len, c='=')
doc_table += formater.format('Function', 'Device support', 'Short description', op_name_max_len = op_name_max_len, c=' ')
doc_table += formater.format('', '', '', op_name_max_len = op_name_max_len, c='=')
for op in sorted(ops_to_process, key=name_sort):
op_full_name, submodule, op_name = ops._process_op_name(op)
if op_name in removed_ops:
continue
schema = b.TryGetSchema(op)
short_descr = ''
devices = []
if op in cpu_ops:
devices += ['CPU']
if op in mix_ops:
devices += ['Mixed']
if op in gpu_ops:
devices += ['GPU']
devices_str = ', '.join(devices)
if schema:
if schema.IsDocHidden():
continue
full_doc = schema.Dox()
else:
full_doc = eval('ops.' + op).__doc__
short_descr = full_doc.split("\n\n")[0].replace('\n', ' ').replace("::", '.')
for (module_name, module) in ops_modules.items():
m = module
for part in submodule:
m = getattr(m, part, None)
if m is None:
break
if m is not None and hasattr(m, op_name):
fn_string = link_formatter.format(op = to_fn_name(op_full_name), module = to_fn_module(module_name))
op_doc = formater.format(fn_string, devices_str, short_descr, op_name_max_len = op_name_max_len, c=' ')
doc_table += op_doc
doc_table += formater.format('', '', '', op_name_max_len = op_name_max_len, c='=')
return doc_table
def operations_table(out_filename):
doc_table = operations_table_str(all_ops)
with open(out_filename, 'w') as f:
f.write(doc_table)
if __name__ == "__main__":
assert(len(sys.argv) >= 2 and len(sys.argv) <= 3)
operations_table(sys.argv[1])
if len(sys.argv) == 3:
fn_to_op_table(sys.argv[2])
|
DALI-main
|
docs/operations_table.py
|
#!/usr/bin/python3
from pathlib import Path
def _parse_entry(entry):
"""Wrap in DocEntry object if it the entry was just a string"""
if isinstance(entry, str):
return doc_entry(entry)
else:
return entry
class Doc:
def __init__(self, title, underline_char, options, entries):
self.title = title
self.underline_char = underline_char
if self.underline_char is not None and len(self.underline_char) != 1:
raise ValueError(
f"Expected only 1 character for `underline_char`, got {self.underline_char}.")
if not isinstance(options, list):
self.options = [options]
else:
self.options = options
self.entries = entries
self.entries = [_parse_entry(entry) for entry in entries]
def get_title(self):
if self.underline_char is None:
return f".. title:: {self.title}\n"
else:
return f"{self.title}\n{self.underline_char * len(self.title)}\n"
class DocEntry:
def __init__(self, name, operator_refs):
self.name = name
if operator_refs is not None:
if isinstance(operator_refs, list):
for elem in operator_refs:
if not isinstance(elem, OpReference):
raise TypeError(
"Expected a single op_reference or a list of them to be provided")
self.operator_refs = operator_refs
elif not isinstance(operator_refs, OpReference):
raise TypeError("Expected a single op_reference or a list of them to be provided")
else:
# Single OpReference, normalize to list
self.operator_refs = [operator_refs]
else:
# or just keep it as None
self.operator_refs = None
# If we need to recurse over this entry
self.python_index = True if name.endswith(".py") else False
def name_to_sphinx(self):
if self.name.endswith(".py"):
return str(Path(self.name).with_suffix(".rst"))
return self.name
class OpReference:
def __init__(self, operator, docstring, order=None):
self.operator = operator
self.docstring = docstring
self.order = 1000000 if order is None else order
def doc(title, underline_char=None, options=":maxdepth: 2", entries=[]):
"""Main entry point for index.py file that replaces a standard index.rst file.
The contents of this doc will be used to generate corresponding index.rst
Parameters
----------
title : str
Either a title used within `..title::` directive or if underline_char is present,
the underline_char will be used to do the sphinx header by placing
it len(title) times under the title.
underline_char : str, optional
If provided, do not generate a `..title::` section but a header with specified underline
options : str or list[str]
List of options like `:maxdepth:` for the toctree.
entries : list[str or doc_entry(...)]
Toctree of subpages, can be either represented by regular strings or by
`doc_entry()` that allows to put the reference from operator to given notebook.
Entries come in three form:
* a path to Python index file, for example: "operations/index.py" must lead to another
file with `doc()` section to be processed recursively.
* any other string representing path that doesn't end with `.py` - they will be inserted
as is. No extension also supported with the same behaviour as regular Sphinx.
Python processing stops here.
* an doc_entry() - allows to provide optional reference.
"""
global doc_return_value
doc_return_value = Doc(title, underline_char, options, entries)
def doc_entry(name, operator_refs=None):
"""Place given notebook or doc page in the toctree and optionally add a reference from operator
documentation to that notebook or page.
Parameters
----------
name : str
Name of jupyter notebook or rst file, must contain proper extension.
operator_refs : OpReference or List[OpReference], optional
Optional reference, defined by `op_reference()` call, by default None
"""
return DocEntry(name, operator_refs)
def op_reference(operator, docstring, order=None):
"""Add a reference from operator to this notebook with specified docstring.
Parameters
----------
operator : str
Name of operator without nvidia.dali prefix, for example fn.resize or fn.gaussian_blur
docstring : str
Text that would appear in the see also block for given link.
order : int, optional
The order in which this entry should appear - lower values appear on top
"""
return OpReference(operator, docstring, order)
def _obtain_doc(py_file):
"""Extract the doc() definition from index.py file"""
with open(py_file, 'r') as f:
doc_file = f.read()
exec(doc_file)
return doc_return_value
def _collect_references(base_path, entry_name, operator_refs, result_dict):
if operator_refs is None:
return
for op_ref in operator_refs:
if not op_ref.operator in result_dict:
result_dict[op_ref.operator] = []
result_dict[op_ref.operator].append(
(op_ref.docstring, str((base_path / entry_name).with_suffix(".html")), op_ref))
def _document_examples(path, result_dict={}):
if not path.endswith(".py"):
raise ValueError(f"Expected a path to Python index file (ending with '.py'), got {path}")
rst_file = Path(path).with_suffix(".rst")
doc_contents = _obtain_doc(path)
tab = " " * 3
with open(rst_file, "w") as f:
f.write(doc_contents.get_title())
f.write("\n")
f.write(f".. toctree::\n")
for option in doc_contents.options:
f.write(f"{tab}{option}\n")
f.write("\n")
for entry in doc_contents.entries:
f.write(f"{tab}{entry.name_to_sphinx()}\n")
canonical_path = Path(path)
base_path = canonical_path.parent
for entry in doc_contents.entries:
_collect_references(base_path, entry.name_to_sphinx(), entry.operator_refs, result_dict)
# For Python index files do the recursion on the actual value stored in entry.name
if entry.python_index:
_document_examples(str(base_path / entry.name), result_dict)
return result_dict
def document_examples(path):
"""Main api entry point, for given path to top-level index.py file containing doc() defintion
will generate a dictionary mapping operator/module to the list of referenced examples.
Parameters
----------
path : str
Path to Python index file (with .py extension)
Returns
-------
Dict
Mapping from fn.operator or fn.module to list of example references
"""
dict = _document_examples(path)
for key in dict:
entries = sorted(dict[key], key=lambda entry: entry[2].order)
dict[key] = [(str, url) for (str, url, _) in entries]
return dict
|
DALI-main
|
docs/doc_index.py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# sys.path.insert(0, os.path.abspath('..'))
import os
import sys
import sphinx_rtd_theme
from sphinx.ext.autodoc.mock import mock
from sphinx.ext.autodoc import between, ClassDocumenter, AttributeDocumenter
from sphinx.util import inspect
from builtins import str
from enum import Enum
import re
import subprocess
from pathlib import Path
from datetime import date
# -- Project information -----------------------------------------------------
project = u'NVIDIA DALI'
copyright = u'2018-{}, NVIDIA Corporation'.format(date.today().year)
author = u'NVIDIA Corporation'
version_long = u'0.0.0'
with open("../VERSION") as f:
version_long = f.readline()
version_short = re.match('^[\d]+\.[\d]+', version_long).group(0)
git_sha = os.getenv("GIT_SHA")
if not git_sha:
try:
git_sha = subprocess.check_output(["git", "log", "--pretty=format:'%h'", "-n1"]).decode('ascii').replace("'","").strip()
except:
git_sha = u'0000000'
git_sha = git_sha[:7] if len(git_sha) > 7 else git_sha
version = str(version_long + u"-" + git_sha)
# The full version, including alpha/beta/rc tags
release = str(version_long)
# Use a predefined path as a place for all the automatically generated docs pages
generated_path = Path("./operations")
generated_path.mkdir(exist_ok=True)
# generate table of supported operators and their devices
# mock torch required by supported_op_devices
with mock(["torch", "numba"]):
sys.path.insert(0, os.path.abspath('./'))
import operations_table
operations_table.operations_table(generated_path / "fn_table")
operations_table.fn_to_op_table(generated_path / "fn_to_op_table")
import doc_index
references = doc_index.document_examples('examples/index.py')
import autodoc_submodules
autodoc_submodules.op_autodoc(generated_path / "op_autodoc")
autodoc_submodules.fn_autodoc(generated_path / "fn_autodoc", generated_path, references)
# Uncomment to keep warnings in the output. Useful for verbose build and output debugging.
# keep_warnings = True
# hack: version is used for html creation, so put the version picker
# link here as well:
option_on = " selected"
option_off = ""
if "dev" in version_long:
release_opt = option_off
main_opt = option_on
option_nr = 1
html_baseurl = "https://docs.nvidia.com/deeplearning/dali/main-user-guide/docs/"
else:
release_opt = option_on
main_opt = option_off
option_nr = 0
html_baseurl = "https://docs.nvidia.com/deeplearning/dali/user-guide/docs/"
version = version + """<br/>
Version select: <select onChange="window.location.href = this.value" onFocus="this.selectedIndex = {0}">
<option value="https://docs.nvidia.com/deeplearning/dali/user-guide/docs/index.html"{1}>Current release</option>
<option value="https://docs.nvidia.com/deeplearning/dali/main-user-guide/docs/index.html"{2}>main (unstable)</option>
<option value="https://docs.nvidia.com/deeplearning/dali/archives/index.html">Older releases</option>
</select>""".format(option_nr, release_opt, main_opt)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.ifconfig',
'sphinx.ext.extlinks',
'IPython.sphinxext.ipython_console_highlighting',
'nbsphinx',
'sphinx.ext.intersphinx',
'sphinx.ext.autosectionlabel'
]
# https://stackoverflow.com/questions/67473396/shorten-display-format-of-python-type-annotations-in-sphinx
autodoc_typehints_format = 'short'
python_use_unqualified_type_names = True
autodoc_typehints = 'none'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The main toctree document.
main_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Mock some of the dependencies for building docs. tf-plugin doc check tf version before loading,
# so we do not mock tensorflow so we do not need to extend the logic there.
autodoc_mock_imports = ['paddle', 'torch', 'torchvision']
# -- Options for MathJax -----------------------------------------------------
# Configure the MathJax to use SVG rendering as a default instead of the CHTML one.
# Apparently, this is how MathJax is supposed to be configured based on their converter
# https://mathjax.github.io/MathJax-demos-web/convert-configuration/convert-configuration.html
# The import is crucial, in version two it was apparently enough to set
# `jax: ["input/TeX", "output/SVG"]` in the config.
# We need it, because the newer version of MatJax tries to render some vertical and horizontal lines
# with less than 1 pixel, which doesn't show in Firefox in some cases:
# * https://github.com/mathjax/MathJax/issues/2795
# * https://bugzilla.mozilla.org/show_bug.cgi?id=1741887
# The bug happens only with the CHTML renderer.
mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-svg.js"
mathjax_config = {
'loader': {
'load': ['output/svg']
},
'ignoreHtmlClass': 'tex2jax_ignore',
'processHtmlClass': 'tex2jax_process'
}
# -- Options for Napoleon ----------------------------------------------------
napoleon_custom_sections = ['Supported backends']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'canonical_url': 'https://docs.nvidia.com/deeplearning/dali/user-guide/docs/index.html',
'collapse_navigation': False,
'display_version': True,
'logo_only': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Download favicon and set it (the variable `html_favicon`) for this project.
# It must be relative path.
favicon_rel_path = "nvidia.ico"
subprocess.call(["wget", "-O", favicon_rel_path, "https://docs.nvidia.com/images/nvidia.ico"])
html_favicon = favicon_rel_path
subprocess.call(["wget", "-O", "dali.png", "https://raw.githubusercontent.com/NVIDIA/DALI/main/dali.png"])
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NVIDIADALIdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(main_doc, 'NVIDIADALI.tex', u'NVIDIA DALI Documentation',
u'NVIDIA Corporation', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(main_doc, 'nvidiadali', u'NVIDIA DALI Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(main_doc, 'NVIDIADALI', u'NVIDIA DALI Documentation',
author, 'NVIDIADALI', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
extlinks = {
'issue': ('https://github.com/NVIDIA/DALI/issues/%s', 'issue %s'),
'fileref': ('https://github.com/NVIDIA/DALI/tree/' +
(git_sha if git_sha != u'0000000' else "main") + '/%s', '%s'),
}
from typing import (
Any, Callable, Dict, Iterator, List, Optional, Sequence, Set, Tuple, Type, TypeVar, Union
)
from typing import get_type_hints
_dali_enums = ["DALIDataType", "DALIIterpType", "DALIImageType", "PipelineAPIType"]
count_unique_visitor_script = os.getenv("ADD_NVIDIA_VISITS_COUNTING_SCRIPT")
html_context = {
'nvidia_analytics_id': count_unique_visitor_script
}
class EnumDocumenter(ClassDocumenter):
# Register as .. autoenum::
objtype = 'enum'
# Produce .. py:class:: fields in the RST doc
directivetype = 'class'
def __init__(self, *args):
super().__init__(*args)
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
"""Verify that we handle only the registered DALI enums. Pybind doesn't subclass Enum class,
so we need an explicit list.
"""
return membername in _dali_enums and isinstance(parent, ClassDocumenter)
def filter_members(self, members, want_all):
"""After self.get_object_members() obtained all members, this function filters only
the ones we're interested in.
We can do the sorting here based on the values, and pass through in self.sort_members()
"""
# Since pybind11 https://github.com/pybind/pybind11/pull/2739 there is an extra `value` member
# returned by get_object_members(). Here we are filtering the list, to keep only enum members
filtered = [member for member in members if member[0] in self.object.__members__.keys()]
filtered = super().filter_members(filtered, want_all)
# sort by the actual value of enum - this is a tuple of (name, value, boolean)
def get_member_value(member_desc):
_, member_value, _ = member_desc
if isinstance(member_value, Enum):
return member_value.value
else:
return int(member_value)
filtered.sort(key = get_member_value)
return filtered
def sort_members(self, documenters, order):
"""Ignore the order. Here we have access only to documenters that carry the name
and not the object. We need to sort based on the enum values and we do it in
self.filter_members()
"""
return documenters
class EnumAttributeDocumenter(AttributeDocumenter):
# Give us higher priority over Sphinx native AttributeDocumenter which is 10, or 11 in case
# of more specialized attributes.
priority = 12
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
"""Run only for the Enums supported by DALI
"""
return isinstance(parent, EnumDocumenter)
def add_directive_header(self, sig):
"""Greatly simplified AttributeDocumenter.add_directive_header()
as we know we're dealing with only specific enums here, we can append a line of doc
with just their value.
"""
super(AttributeDocumenter, self).add_directive_header(sig)
def setup(app):
if count_unique_visitor_script:
app.add_js_file(count_unique_visitor_script)
app.add_js_file('redirect.js')
# Register a sphinx.ext.autodoc.between listener to ignore everything
# between lines that contain the word <SPHINX_IGNORE>
app.connect('autodoc-process-docstring', between('^.*<SPHINX_IGNORE>.*$', exclude=True))
app.add_autodocumenter(EnumDocumenter)
app.add_autodocumenter(EnumAttributeDocumenter)
return app
|
DALI-main
|
docs/conf.py
|
from nvidia.dali import backend as b
import nvidia.dali.ops as ops
import nvidia.dali.plugin.pytorch
import nvidia.dali.plugin.numba
import inspect
import sys
from pathlib import Path
import operations_table
# Dictionary with modules that can have registered Ops
ops_modules = {
'nvidia.dali.ops': nvidia.dali.ops,
'nvidia.dali.plugin.numba.experimental': nvidia.dali.plugin.numba.experimental,
}
exclude_ops_members = {
'nvidia.dali.ops': ["PythonFunctionBase"]
}
fn_modules = {
'nvidia.dali.fn': nvidia.dali.fn,
'nvidia.dali.plugin.pytorch.fn': nvidia.dali.plugin.pytorch.fn,
'nvidia.dali.plugin.numba.fn.experimental': nvidia.dali.plugin.numba.fn.experimental,
}
exclude_fn_members = {
}
mod_aditional_doc = {
'nvidia.dali.fn.transforms' : "All operators in this module support only CPU device as they are meant " +
"to be provided as an input to named keyword operator arguments. Check for more details the relevant " +
":ref:`pipeline documentation section<Processing Graph Structure>`."
}
def get_modules(top_modules):
modules = []
for module in sys.modules.keys():
for doc_module in top_modules:
if module.startswith(doc_module) and not module.endswith('hidden'):
modules += [module]
return sorted(modules)
def get_functions(module):
"""Get all function names (so DALI API operators) from given DALI module without private
or hidden members. No nested modules would be reported."""
result = []
# Take all public members of given module
public_members = list(filter(lambda x: not str(x).startswith("_"), dir(module)))
for member_name in public_members:
member = getattr(module, member_name)
# Just user-defined functions
if inspect.isfunction(member) and not member.__module__.endswith("hidden"):
result.append(member_name)
return result
def get_schema_names(module, functions):
return [getattr(sys.modules[module], fun)._schema_name for fun in functions]
def op_autodoc(out_filename):
s = ""
for module in get_modules(ops_modules):
s += module + "\n"
s += "~" * len(module) + "\n"
normalize_mod = module.replace("nvidia.dali.ops", "nvidia.dali.fn")
if normalize_mod in mod_aditional_doc:
s += mod_aditional_doc[normalize_mod] + "\n" + "\n"
s += ".. automodule:: {}\n".format(module)
s += " :members:\n"
s += " :special-members: __call__\n"
if module in exclude_ops_members:
excluded = exclude_ops_members[module]
s += " :exclude-members: {}\n".format(", ".join(excluded))
s += "\n\n"
with open(out_filename, 'w') as f:
f.write(s)
def get_references(name, references):
"""Generate section with references for given operator or module"""
name = name[12:] # remove nvidia.dali prefix
result = ""
if name in references:
result += ".. seealso::\n"
for desc, url in references[name]:
result += f" * `{desc} <../{url}>`_\n"
return result
def single_fun_file(full_name, references):
"""Generate stub page for documentation of given function from fn api.
"""
result = f"{full_name}\n"
result += "-" * len(full_name) + "\n\n"
result += f".. autofunction:: {full_name}\n\n"
result += get_references(full_name, references)
return result
def single_module_file(module, funs_in_module, references):
"""Generate stub page for documentation of given module
"""
result = f"{module}\n"
result += "~" * len(module) + "\n\n"
if module in mod_aditional_doc:
result += mod_aditional_doc[module] + "\n\n"
result += get_references(module, references)
result += "\n"
result += f"The following table lists all operations available in ``{module}`` module:\n"
result += operations_table.operations_table_str(get_schema_names(module, funs_in_module))
result += "\n\n"
result += ".. toctree::\n :hidden:\n\n"
for fun in funs_in_module:
if module in exclude_fn_members and fun in exclude_fn_members[module]:
continue
full_name = f"{module}.{fun}"
result += f" {full_name}\n"
return result
def fn_autodoc(out_filename, generated_path, references):
all_modules_str = ".. toctree::\n :hidden:\n\n"
all_modules = get_modules(fn_modules)
for module in all_modules:
dali_module = sys.modules[module]
# Take all public members of given module
funs_in_module = get_functions(dali_module)
if len(funs_in_module) == 0:
continue
# As the top-level file is included from a directory above generated_path
# we need to provide the relative path to the per-module files
# the rest is within the same directory, so there is no need for that
all_modules_str += f" {generated_path / module}\n"
single_module_str = single_module_file(module, funs_in_module, references)
with open(generated_path / (module + ".rst"), 'w') as module_file:
module_file.write(single_module_str)
for fun in funs_in_module:
full_name = f"{module}.{fun}"
if module in exclude_fn_members and fun in exclude_fn_members[module]:
continue
with open(generated_path / (full_name + ".rst"), "w") as function_file:
single_file_str = single_fun_file(full_name, references)
function_file.write(single_file_str)
with open(out_filename, 'w') as f:
f.write(all_modules_str)
|
DALI-main
|
docs/autodoc_submodules.py
|
doc(title="Examples and Tutorials",
entries=[
doc_entry("general/data_loading/index.py", [
op_reference(
"fn.readers",
"Examples covering how to load data using DALI readers and external source operator"
),
op_reference(
"fn.external_source",
"Examples covering how to load data using external source operator or dedicated \
DALI readers operators"),
]),
"operations_index.py",
"use_cases/index.py",
"other_index.py",
])
|
DALI-main
|
docs/examples/index.py
|
doc(title="Other",
underline_char="=",
entries=[
doc_entry("general/multigpu.ipynb", [
op_reference("fn.readers.file", "Reading the data in the multi-GPU setup."),
op_reference("fn.readers.caffe", "Reading the data in the multi-GPU setup."),
op_reference("fn.readers.caffe2", "Reading the data in the multi-GPU setup."),
op_reference("fn.readers.coco", "Reading the data in the multi-GPU setup."),
op_reference("fn.readers.mxnet", "Reading the data in the multi-GPU setup."),
op_reference("fn.readers.nemo_asr", "Reading the data in the multi-GPU setup."),
op_reference("fn.readers.numpy", "Reading the data in the multi-GPU setup."),
op_reference("fn.readers.sequence", "Reading the data in the multi-GPU setup."),
op_reference("fn.readers.tfrecord", "Reading the data in the multi-GPU setup."),
op_reference("fn.readers.video", "Reading the data in the multi-GPU setup."),
op_reference("fn.readers.video_resize", "Reading the data in the multi-GPU setup."),
op_reference("fn.readers.webdataset", "Reading the data in the multi-GPU setup."),
]),
doc_entry("general/conditionals.ipynb"),
doc_entry("custom_operations/index.py"),
doc_entry("advanced/serialization.ipynb"),
doc_entry("legacy_getting_started.ipynb"),
doc_entry("general/debug_mode.ipynb"),
])
|
DALI-main
|
docs/examples/other_index.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.