python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/experimental/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/experimental/datapipes/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .climate_hdf5 import ClimateHDF5Datapipe
|
modulus-main
|
modulus/experimental/datapipes/climate/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import h5py
import numpy as np
import torch
from datetime import datetime, timedelta
import netCDF4 as nc
try:
import nvidia.dali as dali
import nvidia.dali.plugin.pytorch as dali_pth
except ImportError:
raise ImportError(
"DALI dataset requires NVIDIA DALI package to be installed. "
+ "The package can be installed at:\n"
+ "https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html"
)
from dataclasses import dataclass
from typing import Iterable, List, Union, Tuple
from pathlib import Path
from torch.utils.data import Dataset
from modulus.datapipes.datapipe import Datapipe
from modulus.datapipes.meta import DatapipeMetaData
from modulus.experimental.datapipes.climate.utils.zenith_angle import cos_zenith_angle
Tensor = torch.Tensor
@dataclass
class MetaData(DatapipeMetaData):
name: str = "ClimateHDF5"
# Optimization
auto_device: bool = True
cuda_graphs: bool = True
# Parallel
ddp_sharding: bool = True
class ClimateHDF5Datapipe(Datapipe):
"""
A Climate DALI data pipeline for HDF5 files. This pipeline loads data from
HDF5 files, which can include latitude, longitude, cosine of the solar zenith
angle, geopotential, and land sea mask if specified. Additionally, it normalizes
the data if a statistics file is provided. The pipeline returns a dictionary
with the following structure:
- `state_seq`: Tensor of shape (batch_size, num_steps, num_channels, height,
width). This sequence is drawn from the HDF5 file and normalized if a
statistics file is provided.
- `timestamps`: Tensor of shape (batch_size, num_steps), containing
timestamps for each timestep in the sequence.
- `land_sea_mask`: Tensor of shape (batch_size, 1, height, width),
containing the land sea mask if a path to a land sea mask file is
provided.
- `geopotential`: Tensor of shape (batch_size, 1, height, width), containing
geopotential if a path to a geopotential file is provided.
- `latlon`: Tensor of shape (batch_size, 2, height, width), containing
latitude and longitude meshgrid if specified.
- `cos_latlon`: Tensor of shape (batch_size, 3, height, width), containing
`[cos(lat), sin(lon), cos(lon)]` if specified. This is required by many
neural climate models.
- `cos_zenith`: Tensor of shape (batch_size, num_steps, 1, height, width),
containing the cosine of the solar zenith angle if specified.
To use this data pipeline, your data directory must be structured as
follows:
```
βββ data_dir
β βββ 1980.h5
β βββ 1981.h5
β βββ 1982.h5
β βββ ...
β βββ 2020.h5
βββ stats_dir
β
βββ global_means.npy
β
βββ global_stds.npy
```
The HDF5 files should contain the following variable
with the corresponding name:
- `fields`: Tensor of shape (num_timesteps, num_channels, height, width),
containing climate data. The order of the channels should match the order
of the channels in the statistics files. The statistics files should be
`.npy` files with the shape (1, num_channels, 1, 1).
This pipeline assumes the HDF5 files have no metadata, such as timestamps.
Because of this, it's important to specify the `dt` parameter and the
`start_year` parameter so that the pipeline can compute the correct
timestamps for each timestep. These timestamps are then used to compute the
cosine of the solar zenith angle, if specified.
Parameters
----------
data_dir : str
Directory where climate data is stored
stats_dir : Union[str, None], optional
Directory to data statistic numpy files for normalization, if None, no normalization
will be used, by default None
channels : Union[List[int], None], optional
Defines which climate variables to load, if None will use all in HDF5 file, by default None
batch_size : int, optional
Batch size, by default 1
stride : int, optional
Number of steps between input and output variables. For example, if the dataset
contains data at every 6 hours, a stride 1 = 6 hour delta t and
stride 2 = 12 hours delta t, by default 1
dt : float, optional
Time in hours between each timestep in the dataset, by default 6 hr
start_year : int, optional
Start year of dataset, by default 1980
num_steps : int, optional
Number of timesteps to return, by default 2 (1 for input, 1 for output)
lsm_filename : str, optional
Path to land sea mask file, by default None
geopotential_filename : str, optional
Path to geopotential file, by default None
use_latlon : bool, optional
Include latitude and longitude meshgrid, by default False
use_cos_zenith : bool, optional
Include cosine of the solar zenith angle, by default False. If True then latitude and longitude
will also be computed.
latlon_lower_bound : Tuple[float, float], optional
Lower bound of latitude and longitude, by default (-90, -180)
patch_size : Union[Tuple[int, int], int, None], optional
If specified, crops input and output variables so image dimensions are
divisible by patch_size, by default None
num_samples_per_year : int, optional
Number of samples randomly taken from each year. If None, all will be use, by default None
shuffle : bool, optional
Shuffle dataset, by default True
num_workers : int, optional
Number of workers, by default 1
device: Union[str, torch.device], optional
Device for DALI pipeline to run on, by default cuda
process_rank : int, optional
Rank ID of local process, by default 0
world_size : int, optional
Number of training processes, by default 1
"""
def __init__(
self,
data_dir: str,
stats_dir: Union[str, None] = None,
channels: Union[List[int], None] = None,
batch_size: int = 1,
stride: int = 1,
dt: float = 6.0,
start_year: int = 1980,
num_steps: int = 2,
lsm_filename: str = None,
geopotential_filename: str = None,
use_latlon: bool = False,
use_cos_zenith: bool = False,
latlon_lower_bound: Tuple[float, float] = (-90, -180),
patch_size: Union[Tuple[int, int], int, None] = None,
num_samples_per_year: Union[int, None] = None,
shuffle: bool = True,
num_workers: int = 1,
device: Union[str, torch.device] = "cuda",
process_rank: int = 0,
world_size: int = 1,
):
super().__init__(meta=MetaData())
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.data_dir = Path(data_dir)
self.stats_dir = Path(stats_dir) if not stats_dir is None else None
self.channels = channels
self.stride = stride
self.dt = dt
self.start_year = start_year
self.num_steps = num_steps
self.lsm_filename = lsm_filename
self.geopotential_filename = geopotential_filename
if use_cos_zenith:
use_latlon = True
self.use_latlon = use_latlon
self.use_cos_zenith = use_cos_zenith
self.latlon_lower_bound = latlon_lower_bound
self.process_rank = process_rank
self.world_size = world_size
if isinstance(patch_size, int):
patch_size = (patch_size, patch_size)
self.patch_size = patch_size
self.num_samples_per_year = num_samples_per_year
# Determine outputs of pipeline
self.pipe_outputs = ["state_seq", "timestamps"]
if self.lsm_filename is not None:
self.pipe_outputs.append("land_sea_mask")
if self.geopotential_filename is not None:
self.pipe_outputs.append("geopotential")
if self.use_latlon:
self.pipe_outputs.append("latlon")
self.pipe_outputs.append("cos_latlon")
if self.use_cos_zenith:
self.pipe_outputs.append("cos_zenith")
# Set up device, needed for pipeline
if isinstance(device, str):
device = torch.device(device)
# Need a index id if cuda
if device.type == "cuda" and device.index == None:
device = torch.device("cuda:0")
self.device = device
# check root directory exists
if not self.data_dir.is_dir():
raise IOError(f"Error, data directory {self.data_dir} does not exist")
if not self.stats_dir is None and not self.stats_dir.is_dir():
raise IOError(f"Error, stats directory {self.stats_dir} does not exist")
if self.stats_dir is None:
self.logger.warning(
"Warning, no stats directory specified, this will result in no normalisation"
)
# Load all data files and statistics
self._parse_dataset_files()
self._load_statistics()
if self.lsm_filename is not None:
self._load_land_sea_mask()
if self.geopotential_filename is not None:
self._load_geopotential()
if self.use_latlon:
self._load_latlon()
# Create pipeline
self.pipe = self._create_pipeline()
def _parse_dataset_files(self) -> None:
"""Parses the data directory for valid HDF5 files and determines training samples
Raises
------
ValueError
In channels specified or number of samples per year is not valid
"""
# get all input data files
self.data_paths = sorted(self.data_dir.glob("*.h5"))
for data_path in self.data_paths:
self.logger.info(f"Climate file found: {data_path}")
self.n_years = len(self.data_paths)
self.logger.info(f"Number of years: {self.n_years}")
# get total number of examples and image shape from the first file,
# assuming other files have exactly the same format.
self.logger.info(f"Getting file stats from {self.data_paths[0]}")
with h5py.File(self.data_paths[0], "r") as f:
# truncate the dataset to avoid out-of-range sampling
data_samples_per_year = (
f["fields"].shape[0] - (self.num_steps - 1) * self.stride
)
self.data_shape = f["fields"].shape[2:]
# If channels not provided, use all of them
if self.channels is None:
self.channels = [i for i in range(f["fields"].shape[1])]
# If num_samples_per_year use all
if self.num_samples_per_year is None:
self.num_samples_per_year = data_samples_per_year
# Adjust image shape if patch_size defined
if self.patch_size is not None:
self.cropped_data_shape = [
s - s % self.patch_size[i] for i, s in enumerate(self.data_shape)
]
else:
self.cropped_data_shape = self.data_shape
self.logger.info(f"Input data shape: {self.cropped_data_shape}")
# Get total length
self.total_length = self.n_years * self.num_samples_per_year
# Sanity checks
if max(self.channels) >= f["fields"].shape[1]:
raise ValueError(
f"Provided channel has indexes greater than the number \
of fields {f['fields'].shape[1]}"
)
if self.num_samples_per_year > data_samples_per_year:
raise ValueError(
f"num_samples_per_year ({self.num_samples_per_year}) > number of \
samples available ({data_samples_per_year})!"
)
self.logger.info(f"Number of samples/year: {self.num_samples_per_year}")
self.logger.info(f"Number of channels available: {f['fields'].shape[1]}")
def _load_statistics(self) -> None:
"""Loads climate statistics from pre-computed numpy files
The statistic files should be of name global_means.npy and global_std.npy with
a shape of [1, C, 1, 1] located in the stat_dir.
Raises
------
IOError
If mean or std numpy files are not found
AssertionError
If loaded numpy arrays are not of correct size
"""
# If no stats dir we just skip loading the stats
if self.stats_dir is None:
self.mu = None
self.std = None
return
# load normalisation values
mean_stat_file = self.stats_dir / Path("global_means.npy")
std_stat_file = self.stats_dir / Path("global_stds.npy")
if not mean_stat_file.exists():
raise IOError(f"Mean statistics file {mean_stat_file} not found")
if not std_stat_file.exists():
raise IOError(f"Std statistics file {std_stat_file} not found")
# has shape [1, C, 1, 1]
self.mu = np.load(str(mean_stat_file))[:, self.channels]
# has shape [1, C, 1, 1]
self.sd = np.load(str(std_stat_file))[:, self.channels]
if not self.mu.shape == self.sd.shape == (1, len(self.channels), 1, 1):
raise AssertionError("Error, normalisation arrays have wrong shape")
def _load_land_sea_mask(self) -> None:
"""Load land-sea mask from netCDF file."""
ds = nc.Dataset(self.lsm_filename)
lsm = np.array(ds["lsm"]).astype(np.float32)
lsm = np.flip(
lsm, axis=1
) # flip latitude axis, TODO hacky fix and we should get this from the file
assert (
lsm.shape[1:] == self.data_shape
), f"Land-sea mask shape {lsm.shape} does not match data shape {self.data_shape}"
lsm = lsm[:, : self.cropped_data_shape[0], : self.cropped_data_shape[1]]
self.lsm = dali.types.Constant(lsm)
def _load_geopotential(self, normalize: bool = True) -> None:
"""Get geopotential from netCDF file."""
ds = nc.Dataset(self.geopotential_filename)
geop = np.array(ds["z"]).astype(np.float32)
geop = np.flip(
geop, axis=1
) # flip latitude axis, TODO hacky fix and we should get this from the file
assert (
geop.shape[1:] == self.data_shape
), f"Geopotential shape {geop.shape} does not match data shape {self.data_shape}"
geop = geop[:, : self.cropped_data_shape[0], : self.cropped_data_shape[1]]
if normalize:
geop = (geop - geop.mean()) / geop.std()
self.geopotential = dali.types.Constant(geop)
def _load_latlon(self) -> None:
"""Load latitude and longitude coordinates from data shape and compute cos/sin versions."""
# get latitudes and longitudes from data shape
lat = np.linspace(
self.latlon_lower_bound[0],
self.latlon_lower_bound[0] + 180,
self.cropped_data_shape[0],
).astype(np.float32)
lon = np.linspace(
self.latlon_lower_bound[1],
self.latlon_lower_bound[1] + 360,
self.cropped_data_shape[1] + 1,
).astype(np.float32)[1:]
lat, lon = np.meshgrid(lat, lon, indexing="ij")
self.latlon = dali.types.Constant(np.stack((lat, lon), axis=0))
# cos/sin latitudes and longitudes
cos_lat = np.cos(np.deg2rad(lat))
sin_lon = np.sin(np.deg2rad(lon))
cos_lon = np.cos(np.deg2rad(lon))
self.cos_latlon = dali.types.Constant(
np.stack((cos_lat, sin_lon, cos_lon), axis=0)
)
def _create_pipeline(self) -> dali.Pipeline:
"""Create DALI pipeline
Returns
-------
dali.Pipeline
HDF5 DALI pipeline
"""
pipe = dali.Pipeline(
batch_size=self.batch_size,
num_threads=2,
prefetch_queue_depth=2,
py_num_workers=self.num_workers,
device_id=self.device.index,
py_start_method="spawn",
)
with pipe:
# HDF5 source
source = ClimateDaliExternalSource(
data_paths=self.data_paths,
num_samples=self.total_length,
channels=self.channels,
stride=self.stride,
dt=self.dt,
start_year=self.start_year,
num_steps=self.num_steps,
num_samples_per_year=self.num_samples_per_year,
batch_size=self.batch_size,
shuffle=self.shuffle,
process_rank=self.process_rank,
world_size=self.world_size,
)
# Update length of dataset
self.total_length = len(source) // self.batch_size
# Read current batch
state_seq, timestamps = dali.fn.external_source(
source,
num_outputs=2,
parallel=True,
batch=False,
)
# Crop
h, w = self.cropped_data_shape
state_seq = state_seq[:, :, :h, :w]
# Normalize
if not self.stats_dir is None:
state_seq = dali.fn.normalize(state_seq, mean=self.mu, stddev=self.sd)
# Make output list
outputs = [state_seq, timestamps]
# Get static inputs
if self.lsm_filename is not None:
outputs.append(self.lsm)
if self.geopotential_filename is not None:
outputs.append(self.geopotential)
if self.use_latlon:
outputs.append(self.latlon)
outputs.append(self.cos_latlon)
# Get cosine zenith angle
if self.use_cos_zenith:
cos_zenith = cos_zenith_angle(timestamps, latlon=self.latlon)
outputs.append(cos_zenith)
if self.device.type == "cuda":
# Move tensors to GPU as external_source won't do that
outputs = [o.gpu() for o in outputs]
# Set outputs
pipe.set_outputs(*outputs)
return pipe
def __iter__(self):
# Reset the pipeline before creating an iterator to enable epochs.
self.pipe.reset()
# Create DALI PyTorch iterator.
return dali_pth.DALIGenericIterator([self.pipe], self.pipe_outputs)
def __len__(self):
return self.total_length
class ClimateDaliExternalSource:
"""DALI Source for lazy-loading the HDF5 climate files
Parameters
----------
data_paths : Iterable[str]
Directory where climate data is stored
num_samples : int
Total number of training samples
channels : Iterable[int]
List representing which climate variables to load
stride : int
Number of steps between input and output variables
num_steps : int
Number of timesteps to load
num_samples_per_year : int
Number of samples randomly taken from each year
batch_size : int, optional
Batch size, by default 1
shuffle : bool, optional
Shuffle dataset, by default True
process_rank : int, optional
Rank ID of local process, by default 0
world_size : int, optional
Number of training processes, by default 1
Note
----
For more information about DALI external source operator:
https://docs.nvidia.com/deeplearning/dali/archives/dali_1_13_0/user-guide/docs/examples/general/data_loading/parallel_external_source.html
"""
def __init__(
self,
data_paths: Iterable[str],
num_samples: int,
channels: Iterable[int],
num_steps: int,
stride: int,
dt: float,
start_year: int,
num_samples_per_year: int,
batch_size: int = 1,
shuffle: bool = True,
process_rank: int = 0,
world_size: int = 1,
):
self.data_paths = list(data_paths)
# Will be populated later once each worker starts running in its own process.
self.data_files = None
self.num_samples = num_samples
self.chans = list(channels)
self.num_steps = num_steps
self.stride = stride
self.dt = dt
self.start_year = start_year
self.num_samples_per_year = num_samples_per_year
self.batch_size = batch_size
self.shuffle = shuffle
self.last_epoch = None
self.indices = np.arange(num_samples)
# Shard from indices if running in parallel
self.indices = np.array_split(self.indices, world_size)[process_rank]
# Get number of full batches, ignore possible last incomplete batch for now.
# Also, DALI external source does not support incomplete batches in parallel mode.
self.num_batches = len(self.indices) // self.batch_size
def __call__(
self, sample_info: dali.types.SampleInfo
) -> Tuple[Tensor, Tensor, np.ndarray, np.ndarray, np.ndarray]:
if sample_info.iteration >= self.num_batches:
raise StopIteration()
if self.data_files is None:
# This will be called once per worker. Workers are persistent,
# so there is no need to explicitly close the files - this will be done
# when corresponding pipeline/dataset is destroyed
self.data_files = [h5py.File(path, "r") for path in self.data_paths]
# Shuffle before the next epoch starts
if self.shuffle and sample_info.epoch_idx != self.last_epoch:
# All workers use the same rng seed so the resulting
# indices are the same across workers
np.random.default_rng(seed=sample_info.epoch_idx).shuffle(self.indices)
self.last_epoch = sample_info.epoch_idx
# Get local indices from global index
# TODO: This is very hacky, but it works for now
idx = self.indices[sample_info.idx_in_epoch]
year_idx = idx // self.num_samples_per_year
in_idx = idx % self.num_samples_per_year
# Get data for the current year
data = self.data_files[year_idx]["fields"]
# Load sequence of input variables
state_seq = np.empty(
(self.num_steps, len(self.chans)) + data.shape[2:], dtype=data.dtype
)
for i in range(self.num_steps):
ind = in_idx + i * self.stride
state_seq[i] = data[ind, self.chans]
# Load sequence of timestamps
year = self.start_year + year_idx
timestamps = np.array(
[
(
datetime(year, 1, 1)
+ timedelta(hours=int(in_idx) * self.dt)
+ timedelta(hours=i * self.stride * self.dt)
).timestamp()
for i in range(self.num_steps)
]
).astype(np.float32)
return state_seq, timestamps
def __len__(self):
return len(self.indices)
|
modulus-main
|
modulus/experimental/datapipes/climate/climate_hdf5.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/experimental/datapipes/climate/utils/__init__.py
|
# ignore_header_test
# climt/LICENSE
# @mcgibbon
# BSD License
# Copyright (c) 2016, Rodrigo Caballero
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import numpy as np
from typing import Union, TypeVar
import datetime
try:
import nvidia.dali as dali
import nvidia.dali.plugin.pytorch as dali_pth
except ImportError:
raise ImportError(
"DALI dataset requires NVIDIA DALI package to be installed. "
+ "The package can be installed at:\n"
+ "https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html"
)
RAD_PER_DEG = np.pi / 180.0
DATETIME_2000 = datetime.datetime(2000, 1, 1, 12, 0, 0).timestamp()
def _dali_mod(a, b):
return a - b * dali.math.floor(a / b)
def cos_zenith_angle(
time: dali.types.DALIDataType,
latlon: dali.types.DALIDataType,
):
"""
Dali datapipe for computing Cosine of sun-zenith angle for lon, lat at time (UTC).
Parameters
----------
time : dali.types.DALIDataType
Time in seconds since 2000-01-01 12:00:00 UTC. Shape `(seq_length,)`.
latlon : dali.types.DALIDataType
Latitude and longitude in degrees. Shape `(2, nr_lat, nr_lon)`.
Returns
-------
dali.types.DALIDataType
Cosine of sun-zenith angle. Shape `(seq_length, 1, nr_lat, nr_lon)`.
"""
lat = latlon[dali.newaxis, 0:1, :, :] * RAD_PER_DEG
lon = latlon[dali.newaxis, 1:2, :, :] * RAD_PER_DEG
time = time[:, dali.newaxis, dali.newaxis, dali.newaxis]
return _star_cos_zenith(time, lat, lon)
def _days_from_2000(model_time): # pragma: no cover
"""Get the days since year 2000."""
# return (model_time - DATETIME_2000) / (24.0 * 3600.0)
return (3600 + model_time - DATETIME_2000) / (24.0 * 3600.0)
def _greenwich_mean_sidereal_time(model_time):
"""
Greenwich mean sidereal time, in radians.
Reference:
The AIAA 2006 implementation:
http://www.celestrak.com/publications/AIAA/2006-6753/
"""
jul_centuries = _days_from_2000(model_time) / 36525.0
theta = 67310.54841 + jul_centuries * (
876600 * 3600
+ 8640184.812866
+ jul_centuries * (0.093104 - jul_centuries * 6.2 * 10e-6)
)
theta_radians = _dali_mod((theta / 240.0) * RAD_PER_DEG, 2 * np.pi)
return theta_radians
def _local_mean_sidereal_time(model_time, longitude):
"""
Local mean sidereal time. requires longitude in radians.
Ref:
http://www.setileague.org/askdr/lmst.htm
"""
return _greenwich_mean_sidereal_time(model_time) + longitude
def _sun_ecliptic_longitude(model_time):
"""
Ecliptic longitude of the sun.
Reference:
http://www.geoastro.de/elevaz/basics/meeus.htm
"""
julian_centuries = _days_from_2000(model_time) / 36525.0
# mean anomaly calculation
mean_anomaly = (
357.52910
+ 35999.05030 * julian_centuries
- 0.0001559 * julian_centuries * julian_centuries
- 0.00000048 * julian_centuries * julian_centuries * julian_centuries
) * RAD_PER_DEG
# mean longitude
mean_longitude = (
280.46645 + 36000.76983 * julian_centuries + 0.0003032 * (julian_centuries**2)
) * RAD_PER_DEG
d_l = (
(1.914600 - 0.004817 * julian_centuries - 0.000014 * (julian_centuries**2))
* dali.math.sin(mean_anomaly)
+ (0.019993 - 0.000101 * julian_centuries) * dali.math.sin(2 * mean_anomaly)
+ 0.000290 * dali.math.sin(3 * mean_anomaly)
) * RAD_PER_DEG
# true longitude
return mean_longitude + d_l
def _obliquity_star(julian_centuries):
"""
return obliquity of the sun
Use 5th order equation from
https://en.wikipedia.org/wiki/Ecliptic#Obliquity_of_the_ecliptic
"""
return (
23.0
+ 26.0 / 60
+ 21.406 / 3600.0
- (
46.836769 * julian_centuries
- 0.0001831 * (julian_centuries**2)
+ 0.00200340 * (julian_centuries**3)
- 0.576e-6 * (julian_centuries**4)
- 4.34e-8 * (julian_centuries**5)
)
/ 3600.0
) * RAD_PER_DEG
def _right_ascension_declination(model_time):
"""
Right ascension and declination of the sun.
"""
julian_centuries = _days_from_2000(model_time) / 36525.0
eps = _obliquity_star(julian_centuries)
eclon = _sun_ecliptic_longitude(model_time)
x = dali.math.cos(eclon)
y = dali.math.cos(eps) * dali.math.sin(eclon)
z = dali.math.sin(eps) * dali.math.sin(eclon)
r = dali.math.sqrt(1.0 - z * z)
# sun declination
declination = dali.math.atan2(z, r)
# right ascension
right_ascension = 2 * dali.math.atan2(y, (x + r))
return right_ascension, declination
def _local_hour_angle(model_time, longitude, right_ascension):
"""
Hour angle at model_time for the given longitude and right_ascension
longitude in radians
Ref:
https://en.wikipedia.org/wiki/Hour_angle#Relation_with_the_right_ascension
"""
return _local_mean_sidereal_time(model_time, longitude) - right_ascension
def _star_cos_zenith(model_time, lat, lon):
"""
Return cosine of star zenith angle
lon,lat in radians
Ref:
Azimuth:
https://en.wikipedia.org/wiki/Solar_azimuth_angle#Formulas
Zenith:
https://en.wikipedia.org/wiki/Solar_zenith_angle
"""
ra, dec = _right_ascension_declination(model_time)
h_angle = _local_hour_angle(model_time, lon, ra)
cosine_zenith = dali.math.sin(lat) * dali.math.sin(dec) + dali.math.cos(
lat
) * dali.math.cos(dec) * dali.math.cos(h_angle)
return cosine_zenith
|
modulus-main
|
modulus/experimental/datapipes/climate/utils/zenith_angle.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .manager import DistributedManager
from .utils import gather_loss
|
modulus-main
|
modulus/distributed/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
import torch.distributed as dist
from typing import Optional, List
from .manager import DistributedManager
from .utils import (
gather_v_wrapper,
scatter_v_wrapper,
all_gather_v_wrapper,
all_reduce_v_wrapper,
indexed_all_to_all_v_wrapper,
indexed_all_to_all_v_wrapper_bwd,
)
class AllGatherVAutograd(torch.autograd.Function):
"""
Autograd Wrapper for a distributed AllGatherV primitive.
It is based on the idea of a single global tensor which is distributed
along a specified dimension into chunks of variable size.
This primitive gathers all local tensors from each rank into the
full global tensor onto each rank. Its indended to be used in
tensor-parallel settings on tensors which require gradients
to be passed through.
The backward pass performs an AllReduceV operation where
each rank gathers its corresponding chunk of a global tensor
from each other rank and sums up these individual gradients.
"""
@staticmethod
def forward(
ctx,
tensor: torch.Tensor,
sizes: List[int],
dim: int = 0,
use_fp32: bool = True,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""forward pass of the Distributed AllGatherV primitive"""
gathered_tensor = all_gather_v_wrapper(tensor, sizes, dim=dim, group=group)
ctx.sizes = sizes
ctx.group = group
ctx.dim = dim
ctx.use_fp32 = use_fp32
return gathered_tensor
@staticmethod
def backward(ctx, grad_output: torch.Tensor):
"""backward pass of the of the Distributed AllGatherV primitive"""
grad_tensor = None
needs_grad = ctx.needs_input_grad[0]
if needs_grad:
grad_tensor = all_reduce_v_wrapper(
grad_output,
ctx.sizes,
dim=ctx.dim,
use_fp32=ctx.use_fp32,
group=ctx.group,
)
return grad_tensor, None, None, None, None
class GatherVAutograd(torch.autograd.Function):
"""
Autograd Wrapper for a distributed GatherV primitive.
It is based on the idea of a single global tensor which is distributed
along a specified dimension into chunks of variable size.
This primitive assumes such a distributed tensor and gathers all
local tensors from each rank into the full global tensor valid
on the specified destination rank. It is intended to be used in
tensor-parallel settings on tensors which require gradients to
be passed through.
The backward pass corresponds to a straightforward
ScatterV primitive distributing the global gradient from the
specified destination rank to all the other ranks.
"""
@staticmethod
def forward(
ctx,
tensor: torch.Tensor,
sizes: List[int],
dim: int = 0,
dst: int = 0,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""forward pass of the distributed GatherV primitive"""
gathered_tensor = gather_v_wrapper(tensor, sizes, dim=dim, dst=dst, group=group)
ctx.sizes = sizes
ctx.dim = dim
ctx.dst = dst
ctx.group = group
return gathered_tensor
@staticmethod
def backward(
ctx,
grad_output: torch.Tensor,
) -> torch.Tensor:
"""backward pass of the Distributed GatherV primitive"""
grad_tensor = None
needs_grad = ctx.needs_input_grad[0]
if needs_grad:
grad_tensor = scatter_v_wrapper(
grad_output, ctx.sizes, dim=ctx.dim, src=ctx.dst, group=ctx.group
)
return grad_tensor, None, None, None, None
class ScatterVAutograd(torch.autograd.Function):
"""
Autograd Wrapper for Distributed ScatterV. It is based
on the idea of a single global tensor which is distributed along
a specified dimension into chunks of variable size.
This primitive scatters the global tensor from a specified source rank
into local chunks onto each other rank. It is intended to be used in
tensor-parallel settings on tensors which require gradients to
be passed through.
The backward pass corresponds to an GatherV primitive
gathering local gradients from all the other ranks into a single
global gradient on the specified source rank.
"""
@staticmethod
def forward(
ctx,
tensor: torch.Tensor,
sizes: List[int],
dim: int = 0,
src: int = 0,
group=Optional[dist.ProcessGroup],
) -> torch.Tensor:
"""forward pass of the Distributed ScatterV primitive"""
scattered_tensor = scatter_v_wrapper(
tensor, sizes, dim=dim, src=src, group=group
)
ctx.tensor = tensor
ctx.sizes = sizes
ctx.dim = dim
ctx.src = src
ctx.group = group
return scattered_tensor
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
"""backward pass of the Distributed ScatterV primitive"""
grad_tensor = None
needs_grad = ctx.needs_input_grad[0]
if needs_grad:
grad_tensor = gather_v_wrapper(
grad_output, ctx.sizes, dim=ctx.dim, dst=ctx.src, group=ctx.group
)
return grad_tensor, None, None, None, None
class IndexedAllToAllVAutograd(torch.autograd.Function):
"""
Autograd Wrapper for an Indexed AllToAllV primitive. It is based on the
idea of a single global tensor which is distributed along a
specified dimension into chunks of variable size.
This primitive assumes a set of indices into this dimension which indicate
the corresponding slices sent to each other rank forming an indexed version
of an AllToAllV primitive. It is intended to be used in tensor-parallel settings
on tensors which require gradients to be passed through.
The backward pass more or less corresponds to the same operation as in the forward
pass but with reversed roles and does an additional reduction of gathered gradients
so that each rank finally will compute the overall gradient on its local tensor partition.
"""
@staticmethod
def forward(
ctx,
tensor: torch.Tensor,
indices: List[torch.Tensor],
sizes: List[List[int]],
use_fp32: bool = True,
dim: int = 0,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""forward pass of the Distributed IndexedAlltoAllV primitive"""
tensor_to_recv = indexed_all_to_all_v_wrapper(
tensor,
indices,
sizes,
dim=dim,
group=group,
)
ctx.sizes = sizes
ctx.use_fp32 = use_fp32
ctx.group = group
ctx.tensor_size_along_dim = tensor.size(dim)
ctx.indices = indices
ctx.dim = dim
return tensor_to_recv
@staticmethod
def backward(
ctx,
grad_output: torch.Tensor,
) -> torch.Tensor:
"""backward pass of the Distributed IndexedAlltoAllV primitive"""
needs_grad = ctx.needs_input_grad[0]
grad_tensor = None
if needs_grad:
grad_tensor = indexed_all_to_all_v_wrapper_bwd(
grad_output,
ctx.indices,
ctx.sizes,
tensor_size_along_dim=ctx.tensor_size_along_dim,
use_fp32=ctx.use_fp32,
dim=ctx.dim,
group=ctx.group,
)
return grad_tensor, None, None, None, None, None, None
def all_gather_v(
tensor: torch.Tensor,
sizes: List[int],
dim: int = 0,
use_fp32: bool = True,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""
Autograd Wrapper for a distributed AllGatherV primitive.
It is based on the idea of a single global tensor which is distributed
along a specified dimension into chunks of variable size.
This primitive gathers all local tensors from each rank into the
full global tensor onto each rank. Its indended to be used in
tensor-parallel settings on tensors which require gradients
to be passed through.
The backward pass performs an AllReduceV operation where
each rank gathers its corresponding chunk of a global tensor
from each other rank and sums up these individual gradients.
Parameters
----------
tensor : "torch.Tensor"
local tensor on each rank
sizes : List[int]
list of the sizes of each chunk on each rank along distributed dimension,
valid and set on each rank
dim : int, optional
dimension along which global tensor is distributed, by default 0
use_fp32 : bool, optional
boolean flag to indicate whether to use FP32 precision for the
reduction in the backward pass, by default True
group : Optional[dist.ProcessGroup], optional
process group along which global tensor is shared, by default None
Returns
-------
torch.Tensor
full global tensor, valid on each rank
"""
return AllGatherVAutograd.apply(tensor, sizes, dim, use_fp32, group)
def gather_v(
tensor: torch.Tensor,
sizes: List[int],
dim: int = 0,
dst: int = 0,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""
Autograd Wrapper for a distributed GatherV primitive.
It is based on the idea of a single global tensor which is distributed
along a specified dimension into chunks of variable size.
This primitive assumes such a distributed tensor and gathers all
local tensors from each rank into the full global tensor valid
on the specified destination rank. It is intended to be used in
tensor-parallel settings on tensors which require gradients to
be passed through.
The backward pass corresponds to a straightforward
ScatterV primitive distributing the global gradient from the
specified destination rank to all the other ranks.
Parameters
----------
tensor : torch.Tensor
local tensor on each rank
sizes : List[int]
list of the sizes of each chunk on each rank along distributed dimension,
valid and set on each rank
dim : int, optional
dimension along which global tensor is distributed, by default 0
dst : int, optional
destination rank which contains the full global tensor after the operation, by default 0
group : Optional[dist.ProcessGroup], optional
process group along which global tensor is shared, by default None
Returns
-------
torch.Tensor
full global tensor, valid on destination rank
"""
return GatherVAutograd.apply(tensor, sizes, dim, dst, group)
def scatter_v(
tensor: torch.Tensor,
sizes: List[int],
dim: int = 0,
src: int = 0,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""
Autograd Wrapper for Distributed ScatterV. It is based
on the idea of a single global tensor which is distributed along
a specified dimension into chunks of variable size.
This primitive scatters the global tensor from a specified source rank
into local chunks onto each other rank. It is intended to be used in
tensor-parallel settings on tensors which require gradients to
be passed through.
The backward pass corresponds to an GatherV primitive
gathering local gradients from all the other ranks into a single
global gradient on the specified source rank.
Parameters
----------
tensor : torch.Tensor
global tensor, valid on source rank
sizes : List[int]
list of the sizes of each chunk on each rank along distributed dimension,
valid and set each rank
dim : int, optional
dimension along which global tensor is distributed, by default 0
src : int, optional
source rank of primitive, i.e. rank of original full global tensor, by default 0
group : Optional[dist.ProcessGroup], optional
process group along which global tensor is shared, by default None
Returns
-------
torch.Tensor
corresponding local part of the global tensor on each rank
"""
return ScatterVAutograd.apply(tensor, sizes, dim, src, group)
def indexed_all_to_all_v(
tensor: torch.Tensor,
indices: List[torch.Tensor],
sizes: List[List[int]],
use_fp32: bool = True,
dim: int = 0,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""
Autograd Wrapper for an Indexed AllToAllV primitive. It is based on the
idea of a single global tensor which is distributed along a
specified dimension into chunks of variable size.
This primitive assumes a set of indices into this dimension which indicate
the corresponding slices sent to each other rank forming an indexed version
of an AllToAllV primitive. It is intended to be used in tensor-parallel settings
on tensors which require gradients to be passed through.
The backward pass more or less corresponds to the same operation as in the forward
pass but with reversed roles and does an additional reduction of gathered gradients
so that each rank finally will compute the overall gradient on its local tensor partition.
Parameters
----------
tensor : torch.Tensor
local part of global tensor on each rank
indices : List[torch.Tensor]
list of indices on each rank of slices being sent to
each other rank from this rank
sizes : List[List[int]]
number of indices each rank sends to each other rank,
valid and set on each rank, e.g. sizes[0][3] corresponds
to the number of slices rank 0 sends to rank 3
use_fp32 : bool, optional
flag to specify whether to use FP32 precision in the reduction
in the backward pass, by default True
dim : int
dimension along which global tensor is distributed, by default 0
group : Optional[dist.ProcessGroup], optional
process group along which global tensor is shared, by default None
Returns
-------
torch.Tensor
local result of primitive corresponding to indexed global tensor
"""
return IndexedAllToAllVAutograd.apply(
tensor,
indices,
sizes,
use_fp32,
dim,
group,
)
|
modulus-main
|
modulus/distributed/autograd.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO this also needs more docstrings
import torch
import torch.nn.functional as F
import torch.distributed as dist
from typing import List, Optional
from .manager import DistributedManager
def get_memory_format(tensor):
"""Gets format for tensor"""
if tensor.is_contiguous(memory_format=torch.channels_last):
return torch.channels_last
else:
return torch.contiguous_format
def pad_helper(tensor, dim, new_size, mode="zero"):
"""Util for padding tensors"""
ndim = tensor.ndim
dim = (dim + ndim) % ndim
ndim_pad = ndim - dim
output_shape = [0 for _ in range(2 * ndim_pad)]
orig_size = tensor.shape[dim]
output_shape[1] = new_size - orig_size
tensor_pad = F.pad(tensor, output_shape, mode="constant", value=0.0)
if mode == "conj":
lhs_slice = [
slice(0, x) if idx != dim else slice(orig_size, new_size)
for idx, x in enumerate(tensor.shape)
]
rhs_slice = [
slice(0, x) if idx != dim else slice(1, output_shape[1] + 1)
for idx, x in enumerate(tensor.shape)
]
tensor_pad[lhs_slice] = torch.flip(
torch.conj(tensor_pad[rhs_slice]), dims=[dim]
)
return tensor_pad
def truncate_helper(tensor, dim, new_size):
"""Util for truncating"""
input_format = get_memory_format(tensor)
ndim = tensor.ndim
dim = (dim + ndim) % ndim
output_slice = [
slice(0, x) if idx != dim else slice(0, new_size)
for idx, x in enumerate(tensor.shape)
]
tensor_trunc = tensor[output_slice].contiguous(memory_format=input_format)
return tensor_trunc
def split_tensor_along_dim(tensor, dim, num_chunks):
"""splits tensor along specific dim"""
assert (
dim < tensor.dim()
), f"Error, tensor dimension is {tensor.dim()} which cannot be split along {dim}"
assert (
tensor.shape[dim] % num_chunks == 0
), f"Error, cannot split dim {dim} evenly. Dim size is \
{tensor.shape[dim]} and requested numnber of splits is {num_chunks}"
chunk_size = tensor.shape[dim] // num_chunks
tensor_list = torch.split(tensor, chunk_size, dim=dim)
return tensor_list
@torch.no_grad()
def gather_loss(loss: float, dst_rank: int = 0, mean: bool = True):
"""Gathers loss from all processes to one for logging
Parameters
----------
loss : float
loss value
dst_rank : int, Optional
destination rank to gather to, by default 0
mean : bool, Optional
Calculate the mean of the losses gathered, by default True
Raises
------
Exception
If DistributedManager has yet to be initialized
"""
if not DistributedManager.is_initialized():
raise Exception(
"Distributed manager should be initialized when using gather_loss"
)
distmng = DistributedManager()
loss = torch.Tensor([loss])
# For serial runs, just return the current loss!
if distmng.world_size == 1:
return float(loss)
# Gather using PyTorch distributed function
gather_list = None
if distmng.rank == dst_rank:
gather_list = [
torch.zeros(1).to(distmng.device) for i in range(distmng.world_size)
]
dist.gather(loss.to(distmng.device), gather_list, dst_rank)
# Return loss if dst_rank, None otherwise
if distmng.rank == dst_rank:
loss = torch.sum(torch.cat(gather_list))
if mean:
loss = loss / distmng.world_size
return float(loss.cpu())
else:
return None
# distributed primitives
def _transpose(tensor, dim0, dim1, group=None, async_op=False):
"""Perform distributed transpose of tensor to switch sharding dimension"""
# get input format
input_format = get_memory_format(tensor)
# get comm params
comm_size = dist.get_world_size(group=group)
# split and local transposition
split_size = tensor.shape[dim0] // comm_size
x_send = [
y.contiguous(memory_format=input_format)
for y in torch.split(tensor, split_size, dim=dim0)
]
x_recv = [torch.empty_like(x_send[0]) for _ in range(comm_size)]
# global transposition
req = dist.all_to_all(x_recv, x_send, group=group, async_op=async_op)
return x_recv, req
def _reduce(input_, use_fp32=True, group=None):
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# All-reduce.
if use_fp32:
dtype = input_.dtype
inputf_ = input_.float()
dist.all_reduce(inputf_, group=group)
input_ = inputf_.to(dtype)
else:
dist.all_reduce(input_, group=group)
return input_
def _split(input_, dim_, group=None):
"""Split the tensor along its last dimension and keep the corresponding slice."""
# get input format
input_format = get_memory_format(input_)
# Bypass the function if we are using only 1 GPU.
comm_size = dist.get_world_size(group=group)
if comm_size == 1:
return input_
# Split along last dimension.
input_list = split_tensor_along_dim(input_, dim_, comm_size)
# Note: torch.split does not create contiguous tensors by default.
rank = dist.get_rank(group=group)
output = input_list[rank].contiguous(memory_format=input_format)
return output
def _gather(input_, dim_, group=None):
"""Gather tensors and concatenate along the specified dimension."""
# get input format
input_format = get_memory_format(input_)
comm_size = dist.get_world_size(group=group)
# Bypass the function if we are using only 1 GPU.
if comm_size == 1:
return input_
# sanity checks
assert (
dim_ < input_.dim()
), f"Error, cannot gather along {dim_} for tensor with {input_.dim()} dimensions."
# Size and dimension.
comm_rank = dist.get_rank(group=group)
tensor_list = [torch.empty_like(input_) for _ in range(comm_size)]
tensor_list[comm_rank] = input_
dist.all_gather(tensor_list, input_, group=group)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=dim_).contiguous(memory_format=input_format)
return output
def all_gather_v_wrapper(
tensor: torch.Tensor,
sizes: List[int],
dim: int = 0,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""
Implements a distributed AllGatherV primitive. It is based
on the idea of a single global tensor which is distributed along
a specified dimension into chunks of variable size.
This primitive gathers all local tensors from each rank into the
full global tensor onto each rank.
Parameters
----------
tensor : "torch.Tensor"
local tensor on each rank
sizes : List[int]
list of the sizes of each chunk on each rank along distributed dimension,
valid and set on each rank
dim : int, optional
dimension along which global tensor is distributed, by default 0
group : Optional[dist.ProcessGroup], optional
process group along which global tensor is shared, by default None
Returns
-------
torch.Tensor
full global tensor, valid on each rank
"""
comm_size = dist.get_world_size(group=group)
rank = dist.get_rank(group=group)
assert len(sizes) == comm_size
assert dim < tensor.dim()
if comm_size == 1:
return tensor
tensor_shape = list(tensor.shape)
tensor_list = [None] * comm_size
for src in range(comm_size):
tensor_shape[dim] = sizes[src]
tensor_list[src] = torch.empty(
tensor_shape,
dtype=tensor.dtype,
device=tensor.device,
)
dist.all_gather(tensor_list, tensor, group=group)
output = torch.cat(tensor_list, dim=dim)
return output
def all_reduce_v_wrapper(
tensor: torch.Tensor,
sizes: List[int],
dim: int = 0,
use_fp32: bool = True,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""
Implements a distributed AllReduceV primitive. It is based
on the idea of a single global tensor which which can be distributed
along a specified dimension into chunks of variable size.
This primitive assumes different global tensors of the same shape on each
rank. It then re-distributes chunks of all these tensors such that each rank
receives all corresponding parts of a global tensor. Each rank then sums up
the chunks after receiving it. By design, this primitive thus implements the
backward pass of the "all_gather_v" primitive. In this case, the result would
be a single global gradient tensor distributed onto different ranks.
Parameters
----------
tensor : torch.Tensor
global tensor on each rank (different one on each rank)
sizes : List[int]
list of the sizes of each chunk on each rank along distributed dimension,
valid and set on each rank
dim : int, optional
dimension along which global tensor is distributed, by default 0
use_fp32 : bool, optional
flag to specify FP32 precision for the redcution, by default True
group : Optional[dist.ProcessGroup], optional
process group along which global tensor is shared, by default None
Returns
-------
torch.Tensor
local tensor, i.e. result of reduction of all corresponding chunks
from all global tensors for each rank separately
"""
comm_size = dist.get_world_size(group=group)
rank = dist.get_rank(group=group)
assert len(sizes) == comm_size
assert dim < tensor.dim()
global_size = sum(sizes)
tensor_shape = list(tensor.shape)
tensor_shape[dim] = sizes[rank]
tmp = [
torch.empty(
tensor_shape,
dtype=tensor.dtype,
device=tensor.device,
)
for _ in range(comm_size)
]
scatter_list = list(torch.split(tensor, sizes, dim=dim))
dist.all_to_all(tmp, scatter_list, group=group)
stack_dim = tensor.dim()
tmp = torch.stack(tmp, dim=stack_dim)
if use_fp32:
# cast to float before sum and return float, then cast back
output = tmp.sum(dim=stack_dim, dtype=torch.float32)
output = output.to(dtype=tensor.dtype)
else:
# else: just do sum in native dtype
output = tmp.sum(dim=stack_dim)
return output
def gather_v_wrapper(
tensor: torch.Tensor,
sizes: List[int],
dim: int = 0,
dst: int = 0,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""
Implements a distributed GatherV primitive. It is based
on the idea of a single global tensor which is distributed along
a specified dimension into chunks of variable size.
This primitive assumes such a distributed tensor and gathers all
local tensors from each rank into the full global tensor valid
on the specified destination rank.
Parameters
----------
tensor : torch.Tensor
local tensor on each rank
sizes : List[int]
list of the sizes of each chunk on each rank along distributed dimension,
valid and set on each rank
dim : int, optional
dimension along which global tensor is distributed, by default 0
dst : int, optional
destination rank which contains the full global tensor after the operation, by default 0
group : Optional[dist.ProcessGroup], optional
process group along which global tensor is shared, by default None
Returns
-------
torch.Tensor
full global tensor, valid on destination rank
"""
comm_size = dist.get_world_size(group=group)
rank = dist.get_rank(group=group)
assert len(sizes) == comm_size
assert 0 <= dst < comm_size
assert dim < tensor.dim()
assert tensor.size(dim) == sizes[rank]
if comm_size == 1:
return tensor
gather_list = [None] * comm_size
tensor_shape = list(tensor.shape)
for r in range(comm_size):
tensor_shape[dim] = sizes[r]
gather_list[r] = torch.empty(
tensor_shape,
dtype=tensor.dtype,
device=tensor.device,
)
# dist.scatter doesn't support tensors of different shape
# so this implementation is using explicit send/recv combinations
if rank == dst:
req_list = [None] * comm_size
for r in range(comm_size):
if r == dst:
gather_list[r] = tensor
else:
req_list[r] = dist.irecv(gather_list[r], src=r, group=group)
for r in range(comm_size):
if r != dst:
req_list[r].wait()
else:
req = dist.isend(tensor, dst=dst, group=group)
req.wait()
output = torch.cat(gather_list, dim=dim)
return output
def scatter_v_wrapper(
tensor: torch.Tensor,
sizes: List[int],
dim: int = 0,
src: int = 0,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""
Implements a distributed ScatterV primitive. It is based
on the idea of a single global tensor which is distributed along
a specified dimension into chunks of variable size.
This primitive scatters the global tensor from a specified source rank
into local chunks onto each other rank.
Parameters
----------
tensor : torch.Tensor
global tensor, valid on source rank
sizes : List[int]
list of the sizes of each chunk on each rank along distributed dimension,
valid and set each rank
dim : int, optional
dimension along which global tensor is distributed, by default 0
src : int, optional
source rank of primitive, i.e. rank of original full global tensor, by default 0
group : Optional[dist.ProcessGroup], optional
process group along which global tensor is shared, by default None
Returns
-------
torch.Tensor
corresponding local part of the global tensor on each rank
"""
comm_size = dist.get_world_size(group=group)
rank = dist.get_rank(group=group)
assert len(sizes) == comm_size
assert 0 <= src < comm_size
assert dim < tensor.dim()
tensor_shape = list(tensor.shape)
tensor_shape[dim] = sizes[rank]
output = torch.empty(
tensor_shape,
dtype=tensor.dtype,
device=tensor.device,
)
# dist.scatter doesn't support tensors of different shape
# so this implementation is using explicit send/recv combinations
scatter_list = None
if rank == src:
scatter_list = torch.split(tensor, sizes, dim=dim)
req_list = [None] * comm_size
for r in range(comm_size):
tensor_to_scatter_to_r = scatter_list[r]
if r == src:
output = tensor_to_scatter_to_r
else:
req_list[r] = dist.isend(tensor_to_scatter_to_r, dst=r, group=group)
for r in range(comm_size):
if r != src:
req_list[r].wait()
else:
req = dist.irecv(output, src=src, group=group)
req.wait()
return output
def indexed_all_to_all_v_wrapper(
tensor: torch.Tensor,
indices: List[torch.Tensor],
sizes: List[List[int]],
dim: int = 0,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""
Implements an indexed version of a distributed AllToAllV
primitive. It is based on the idea of a single global tensor which
is distributed along a specified dimension into chunks of variable size.
This primitive assumes a set of indices into this dimension which indicate
the corresponding slices sent to each other rank forming an indexed version
of an AllToAllV primitive.
Parameters
----------
tensor : torch.Tensor
local part of global tensor on each rank
indices : List[torch.Tensor]
list of indices on each rank of slices being sent to
each other rank from this rank
sizes : List[List[int]]
number of indices each rank sends to each other rank,
valid and set on each rank, e.g. sizes[0][3] corresponds
to the number of slices rank 0 sends to rank 3
dim : int
dimension along which global tensor is distributed, by default 0
group : Optional[dist.ProcessGroup], optional
process group along which global tensor is shared, by default None
Returns
-------
torch.Tensor
local result of primitive corresponding to indexed global tensor
"""
comm_size = dist.get_world_size(group=group)
rank = dist.get_rank(group=group)
assert len(sizes) == comm_size
assert len(sizes[rank]) == comm_size
assert len(indices) == comm_size
assert dim < tensor.dim()
indices = torch.cat(indices, dim=0)
tensor_to_send = torch.index_select(tensor, dim=dim, index=indices)
recv_list = [None] * comm_size
for r in range(comm_size):
recv_list[r] = scatter_v_wrapper(
tensor_to_send,
sizes=sizes[r],
src=r,
dim=dim,
group=group,
)
tensor_to_recv = torch.cat(recv_list, dim=dim)
return tensor_to_recv
def indexed_all_to_all_v_wrapper_bwd(
tensor: torch.Tensor,
indices: List[torch.Tensor],
sizes: List[List[int]],
tensor_size_along_dim: int,
use_fp32: bool = True,
dim: int = 0,
group: Optional[dist.ProcessGroup] = None,
) -> torch.Tensor:
"""
Implements the backward pass to the indexed version of a distributed
AllToAllV primitive.
Parameters
----------
tensor : torch.Tensor
local tensor, i.e. gradient on resulting tensor from forward pass
indices : List[torch.Tensor]
list of indices on each rank of slices being sent to
each other rank from this rank
sizes : List[List[int]]
list of the sizes of each chunk on each rank along distributed dimension,
valid and set on each rank
tensor_size_along_dim : int
size of original local tensor along specified dimension,
i.e. from the corresponding forward pass
use_fp32 : bool, optional
flag to specify FP32 precision, by default True
dim : int, optional
dimension along with global tensor is distributed, by default 0
group : Optional[dist.ProcessGroup], optional
process group along which global tensor is shared, by default None
Returns
-------
torch.Tensor
result of primitive corresponding to indexed global tensor
"""
comm_size = dist.get_world_size(group=group)
rank = dist.get_rank(group=group)
assert len(sizes) == comm_size
assert len(sizes[rank]) == comm_size
assert len(indices) == comm_size
assert dim < tensor.dim()
indices = torch.cat(indices, dim=0)
tensor_shape = list(tensor.shape)
# scatter gradients, roles reversed compared to forward pass
recv_sizes = [sizes[r][rank] for r in range(comm_size)]
recv_list = [None] * comm_size
for r in range(comm_size):
recv_list[r] = scatter_v_wrapper(
tensor, recv_sizes, dim=dim, src=r, group=group
)
tensor_to_recv = torch.cat(recv_list, dim=dim)
# sum up gathered gradients and taking
# care of precision handling as specified
# by boolean flag
tensor_shape[dim] = tensor_size_along_dim
if use_fp32:
out = torch.zeros(
tensor_shape,
dtype=torch.float32,
device=tensor.device,
)
tensor_to_recv = tensor_to_recv.to(dtype=torch.float32)
else:
out = torch.zeros(
tensor_shape,
dtype=tensor.dtype,
device=tensor.device,
)
out.index_add_(source=tensor_to_recv, index=indices, dim=dim)
if use_fp32:
out = out.to(tensor.dtype)
return out
|
modulus-main
|
modulus/distributed/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed as dist
from typing import Optional
import os
import numpy as np
from warnings import warn
class DistributedManager(object):
"""Distributed Manager for setting up distributed training enviroment.
This is a singleton that creates a persistance class instance for storing parallel
environment information through out the life time of the program. This should be
used to help set up Distributed Data Parallel and parallel datapipes.
Note
----
One should call `DistributedManager.initialize()` prior to constructing a manager
object
Example
-------
>>> DistributedManager.initialize()
>>> manager = DistributedManager()
>>> manager.rank
0
>>> manager.world_size
1
"""
_shared_state = {}
def __new__(cls):
obj = super(DistributedManager, cls).__new__(cls)
obj.__dict__ = cls._shared_state
# Set the defaults
if not hasattr(obj, "_rank"):
obj._rank = 0
if not hasattr(obj, "_world_size"):
obj._world_size = 1
if not hasattr(obj, "_local_rank"):
obj._local_rank = 0
if not hasattr(obj, "_distributed"):
obj._distributed = False
if not hasattr(obj, "_device"):
obj._device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if not hasattr(obj, "_cuda"):
obj._cuda = torch.cuda.is_available()
if not hasattr(obj, "_broadcast_buffers"):
obj._broadcast_buffers = False
if not hasattr(obj, "_find_unused_parameters"):
obj._find_unused_parameters = False
if not hasattr(obj, "_initialization_method"):
obj._initialization_method = "None"
if not hasattr(obj, "_groups"):
obj._groups = {}
if not hasattr(obj, "_group_ranks"):
obj._group_ranks = {}
if not hasattr(obj, "_group_names"):
obj._group_names = {}
return obj
@property
def rank(self):
"""Process rank"""
return self._rank
@property
def local_rank(self):
"""Process rank on local machine"""
return self._local_rank
@property
def world_size(self):
"""Number of processes in distributed enviroment"""
return self._world_size
@property
def device(self):
"""Process device"""
return self._device
@property
def distributed(self):
"""Distributed enviroment"""
return self._distributed
@property
def cuda(self):
"""If cuda is available"""
return self._cuda
@property
def group_names(self):
"""
Returns a list of all named process groups created
"""
return self._groups.keys()
def group(self, name=None):
"""
Returns a process group with the given name
If name is None, group is also None indicating the default process group
If named group does not exist, returns None also
"""
if name in self._groups.keys():
return self._groups[name]
else:
return None
def group_size(self, name=None):
"""
Returns the size of named process group
"""
if name is None:
return self._world_size
group = self.group(name)
return dist.get_world_size(group=group)
def group_rank(self, name=None):
"""
Returns the rank in named process group
"""
if name is None:
return self._rank
group = self.group(name)
if group is None:
return 0
else:
return dist.get_rank(group=group)
def group_name(self, group=None):
"""
Returns the name of process group
"""
if group is None:
return None
return self._group_names[group]
@property
def broadcast_buffers(self):
"""broadcast_buffers in PyTorch DDP"""
return self._broadcast_buffers
@broadcast_buffers.setter
def broadcast_buffers(self, broadcast: bool):
"""Setter for broadcast_buffers"""
self._broadcast_buffers = broadcast
@property
def find_unused_parameters(self):
"""find_unused_parameters in PyTorch DDP"""
return self._find_unused_parameters
@find_unused_parameters.setter
def find_unused_parameters(self, find_params: bool):
"""Setter for find_unused_parameters"""
if find_params:
warn(
"Setting `find_unused_parameters` in DDP to true, "
"use only if necessary."
)
self._find_unused_parameters = find_params
def __str__(self):
output = (
f"Initialized process {self.rank} of {self.world_size} using "
f"method '{self._initialization_method}'. Device set to {str(self.device)}"
)
return output
@classmethod
def is_initialized(cls) -> bool:
"""If manager singleton has been initialized"""
return len(cls._shared_state) > 0
@staticmethod
def get_available_backend():
"""Get communication backend"""
if torch.cuda.is_available() and torch.distributed.is_nccl_available():
return "nccl"
else:
return "gloo"
@staticmethod
def initialize_env():
"""Setup method using generic initialization"""
rank = int(os.environ.get("RANK"))
world_size = int(os.environ.get("WORLD_SIZE"))
if "LOCAL_RANK" in os.environ:
local_rank = int(os.environ.get("LOCAL_RANK"))
else:
local_rank = rank % torch.cuda.device_count()
# Read env variables
addr = os.environ.get("MASTER_ADDR")
port = os.environ.get("MASTER_PORT")
DistributedManager.setup(
rank=rank,
world_size=world_size,
local_rank=local_rank,
addr=addr,
port=port,
backend=DistributedManager.get_available_backend(),
)
@staticmethod
def initialize_open_mpi(addr, port):
"""Setup method using OpenMPI initialization"""
rank = int(os.environ.get("OMPI_COMM_WORLD_RANK"))
world_size = int(os.environ.get("OMPI_COMM_WORLD_SIZE"))
local_rank = int(os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK"))
DistributedManager.setup(
rank=rank,
world_size=world_size,
local_rank=local_rank,
addr=addr,
port=port,
backend=DistributedManager.get_available_backend(),
method="openmpi",
)
@staticmethod
def initialize_slurm(port):
"""Setup method using SLURM initialization"""
rank = int(os.environ.get("SLURM_PROCID"))
world_size = int(os.environ.get("SLURM_NPROCS"))
local_rank = int(os.environ.get("SLURM_LOCALID"))
addr = os.environ.get("SLURM_LAUNCH_NODE_IPADDR")
DistributedManager.setup(
rank=rank,
world_size=world_size,
local_rank=local_rank,
addr=addr,
port=port,
backend=DistributedManager.get_available_backend(),
method="slurm",
)
@staticmethod
def initialize():
"""
Initialize distributed manager
Current supported initialization methods are:
`ENV`: PyTorch environment variable initialization
https://pytorch.org/docs/stable/distributed.html#environment-variable-initialization
`SLURM`: Initialization on SLURM systems.
Uses `SLURM_PROCID`, `SLURM_NPROCS`, `SLURM_LOCALID` and
`SLURM_LAUNCH_NODE_IPADDR` environment variables.
`OPENMPI`: Initialization for OpenMPI launchers.
Uses `OMPI_COMM_WORLD_RANK`, `OMPI_COMM_WORLD_SIZE` and
`OMPI_COMM_WORLD_LOCAL_RANK` environment variables.
Initialization by default is done using the first valid method in the order
listed above. Initialization method can also be explicitly controlled using the
`MODULUS_DISTRIBUTED_INITIALIZATION_METHOD` environment variable and setting it
to one of the options above.
"""
if DistributedManager.is_initialized():
warn("Distributed manager is already intialized")
return
addr = os.getenv("MASTER_ADDR", "localhost")
port = os.getenv("MASTER_PORT", "12355")
# https://pytorch.org/docs/master/notes/cuda.html#id5
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
initialization_method = os.getenv("MODULUS_DISTRIBUTED_INITIALIZATION_METHOD")
if initialization_method is None:
try:
DistributedManager.initialize_env()
except:
if "SLURM_PROCID" in os.environ:
DistributedManager.initialize_slurm(port)
elif "OMPI_COMM_WORLD_RANK" in os.environ:
DistributedManager.initialize_open_mpi(addr, port)
elif initialization_method == "ENV":
DistributedManager.initialize_env()
elif initialization_method == "SLURM":
DistributedManager.initialize_slurm(port)
elif initialization_method == "OPENMPI":
DistributedManager.initialize_open_mpi(addr, port)
else:
raise RuntimeError(
"Unknown initialization method "
f"{initialization_method}. "
"Supported values for "
"MODULUS_DISTRIBUTED_INITIALIZATION_METHOD are "
"ENV, SLURM and OPENMPI"
)
# Set per rank numpy random seed for data sampling
np.random.seed(seed=DistributedManager().rank)
@staticmethod
def setup(
rank=0,
world_size=1,
local_rank=None,
addr="localhost",
port="12355",
backend="nccl",
method="env",
):
"""Set up PyTorch distributed process group and update manager attributes"""
os.environ["MASTER_ADDR"] = addr
os.environ["MASTER_PORT"] = str(port)
manager = DistributedManager()
manager._distributed = (world_size > 1) and torch.distributed.is_available()
if manager._distributed:
# Update rank and world_size if using distributed
manager._rank = rank
manager._world_size = world_size
if local_rank is None:
manager._local_rank = rank % torch.cuda.device_count()
else:
manager._local_rank = local_rank
# Setup distributed process group
# time.sleep(1)
dist.init_process_group(
backend, rank=manager.rank, world_size=manager.world_size
)
manager._device = torch.device(
f"cuda:{manager.local_rank}" if torch.cuda.is_available() else "cpu"
)
# Needed for cuda graphs
if torch.cuda.is_available():
torch.cuda.set_device(manager.local_rank)
manager._initialization_method = method
# Set device for this process and empty cache to optimize memory usage
torch.cuda.device(manager.device)
torch.cuda.empty_cache()
@staticmethod
def create_process_subgroup(
name: str, size: int, group_name: Optional[str] = None, verbose: bool = False
):
"""
Create a process subgroup of a parent process group. This must be a collective
call by all processes participating in this application.
Parameters
----------
name : str
Name of the process subgroup to be created.
size : int
Size of the process subgroup to be created. This must be an integer factor of
the parent group's size.
group_name : Optional[str]
Name of the parent process group, optional. If None, the default process group
will be used. Default None.
verbose : bool
Print out ranks of each created process group, default False.
"""
manager = DistributedManager()
if not manager.distributed:
return None
assert name not in manager._groups, f"Group with name {name} already exists"
# Get parent group's params
group = manager._group[group_name] if group_name else None
group_size = dist.get_world_size(group=group)
num_groups = manager.world_size // group_size
# Get number of sub-groups per parent group
assert (
group_size % size == 0
), f"Cannot divide group size {group_size} evenly into subgroups of size {size}"
num_subgroups = group_size // size
# Create all the sub-groups
# Note: all ranks in the job need to create all sub-groups in
# the same order even if a rank is not part of a sub-group
manager._group_ranks[name] = []
for g in range(num_groups):
for i in range(num_subgroups):
# Get global ranks that are part of this sub-group
start = i * size
end = start + size
if group_name:
ranks = manager._group_ranks[group_name][g][start:end]
else:
ranks = list(range(start, end))
# Create sub-group and keep track of ranks
tmp_group = dist.new_group(ranks=ranks)
manager._group_ranks[name].append(ranks)
if manager.rank in ranks:
# Set group in manager only if this rank is part of the group
manager._groups[name] = tmp_group
manager._group_names[tmp_group] = name
if verbose and manager.rank == 0:
print(f"Process group '{name}':")
for grp in manager._group_ranks[name]:
print(" ", grp)
@staticmethod
def create_orthogonal_process_group(
name: str, group_name: str, verbose: bool = False
):
"""
Create a process group that is orthogonal to the specified process group.
Parameters
----------
name : str
Name of the process group to be created.
group_name : str
Name of the existing process group.
verbose : bool
Print out ranks of each created process group, default False.
"""
manager = DistributedManager()
if not manager.distributed:
return None
assert (
group_name in manager._groups
), f"Group with name {group_name} does not exist"
assert name not in manager._groups, f"Group with name {name} already exists"
group_ranks = manager._group_ranks[group_name]
orthogonal_ranks = [list(i) for i in zip(*group_ranks)]
for ranks in orthogonal_ranks:
tmp_group = dist.new_group(ranks=ranks)
if manager.rank in ranks:
# Set group in manager only if this rank is part of the group
manager._groups[name] = tmp_group
manager._group_names[tmp_group] = name
manager._group_ranks[name] = orthogonal_ranks
if verbose and manager.rank == 0:
print(f"Process group '{name}':")
for grp in manager._group_ranks[name]:
print(" ", grp)
@staticmethod
def cleanup():
"""Clean up distributed group and singleton"""
dist.destroy_process_group()
DistributedManager._shared_state = {}
|
modulus-main
|
modulus/distributed/manager.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/deploy/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .utils import export_to_onnx_stream, run_onnx_inference
|
modulus-main
|
modulus/deploy/onnx/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
import torch
import torch.nn as nn
try:
import onnxruntime as ort
except:
ort = None
from typing import Tuple, Union
Tensor = torch.Tensor
logger = logging.getLogger("__name__")
def check_ort_install(func):
"""Decorator to check if ONNX runtime is installed"""
def _wrapper_ort_install(*args, **kwargs):
if ort is None:
raise ModuleNotFoundError(
"ONNXRuntime is not installed. 'pip install \
onnxruntime onnxruntime_gpu'"
)
func(*args, **kwargs)
return func(*args, **kwargs)
return _wrapper_ort_install
def export_to_onnx_stream(
model: nn.Module,
invars: Union[Tensor, Tuple[Tensor, ...]],
verbose: bool = False,
) -> bytes:
"""Exports PyTorch model to byte stream instead of a file
Parameters
----------
model : nn.Module
PyTorch model to export
invars : Union[Tensor, Tuple[Tensor,...]]
Input tensor(s)
verbose : bool, optional
Print out a human-readable representation of the model, by default False
Returns
-------
bytes
ONNX model byte stream
Note
----
Exporting a ONNX model while training when using CUDA graphs will likely break things.
Because model must be copied to the CPU and back for export.
Note
----
ONNX exporting can take a longer time when using custom ONNX functions.
"""
# Move inputs to CPU for ONNX export
if isinstance(invars, Tensor):
invars = (invars.detach().cpu(),)
else:
invars = tuple([invar.detach().cpu() for invar in invars])
# Use model's device if provided (Modulus modules have this)
if hasattr(model, "device"):
model_device = model.device
elif len(list(model.parameters())) > 0:
model_device = next(model.parameters()).device
else:
model_device = "cpu"
with io.BytesIO() as onnx_model:
# Export to ONNX.
torch.onnx.export(
model.cpu(),
invars,
onnx_model,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
opset_version=15,
verbose=verbose,
)
# Move model back to original device
model.to(model_device)
return onnx_model.getvalue()
@check_ort_install
def get_ort_session(
model: Union[bytes, str],
device: torch.device = "cuda",
):
"""Create a ORT session for performing inference of an onnx model
Parameters
----------
model : Union[bytes, str]
ONNX model byte string or file name/path
device : torch.device, optional
Device to run ORT, by default "cuda"
Returns
-------
ort.InferenceSession
ONNX runtime session
"""
providers = ["CPUExecutionProvider"]
if "cuda" in str(device):
providers = ["CUDAExecutionProvider"] + providers
# Must run on GPU as Rfft is currently implemented only for GPU.
ort_sess = ort.InferenceSession(model, providers=providers)
return ort_sess
@check_ort_install
def run_onnx_inference(
model: Union[bytes, str],
invars: Union[Tensor, Tuple[Tensor, ...]],
device: torch.device = "cuda",
) -> Tuple[Tensor]:
"""Runs ONNX model in ORT session
Parameters
----------
model : Union[bytes, str]
ONNX model byte string or file name/path
invars : Union[Tensor, Tuple[Tensor,...]]
Input tensors
device : torch.device, optional
Device to run ORT, by default "cuda"
Returns
-------
Tuple[Tensor]
Tuple of output tensors on CPU
"""
ort_sess = get_ort_session(model, device)
# fmt: off
if isinstance(invars, Tensor):
invars = (invars,)
ort_inputs = {inp.name: v.detach().cpu().numpy()
for inp, v in zip(ort_sess.get_inputs(), invars)}
# fmt: on
ort_outputs = ort_sess.run(None, ort_inputs)
# Convert to tensors
outputs = tuple([torch.Tensor(v) for v in ort_outputs])
return outputs
|
modulus-main
|
modulus/deploy/onnx/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/deploy/trt/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/deploy/triton/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import fsspec
import fsspec.implementations.cached
import s3fs
import builtins
import urllib.request
import os
import hashlib
import requests
import logging
logger = logging.getLogger(__name__)
try:
LOCAL_CACHE = os.environ["LOCAL_CACHE"]
except KeyError:
LOCAL_CACHE = os.environ["HOME"] + "/.cache/modulus"
def _cache_fs(fs):
return fsspec.implementations.cached.CachingFileSystem(
fs=fs, cache_storage=LOCAL_CACHE
)
def _get_fs(path):
if path.startswith("s3://"):
return s3fs.S3FileSystem(client_kwargs=dict(endpoint_url="https://pbss.s8k.io"))
else:
return fsspec.filesystem("file")
def _download_cached(path: str, recursive: bool = False) -> str:
sha = hashlib.sha256(path.encode())
filename = sha.hexdigest()
try:
os.makedirs(LOCAL_CACHE, exist_ok=True)
except PermissionError as error:
logger.error(
"Failed to create cache folder, check permissions or set a cache"
+ " location using the LOCAL_CACHE enviroment variable"
)
raise error
except OSError as error:
logger.error(
"Failed to create cache folder, set a cache"
+ " location using the LOCAL_CACHE enviroment variable"
)
raise error
cache_path = os.path.join(LOCAL_CACHE, filename)
url = urllib.parse.urlparse(path)
# TODO watch for race condition here
if not os.path.exists(cache_path):
logger.debug("Downloading %s to cache: %s", path, cache_path)
if path.startswith("s3://"):
fs = _get_fs(path)
fs.get(path, cache_path, recursive=recursive)
elif url.scheme == "http":
# urllib.request.urlretrieve(path, cache_path)
# TODO: Check if this supports directory fetches
response = requests.get(path, stream=True, timeout=5)
with open(cache_path, "wb") as output:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
output.write(chunk)
elif url.scheme == "file":
path = os.path.join(url.netloc, url.path)
return path
else:
return path
else:
logger.debug("Opening from cache: %s", cache_path)
return cache_path
class Package:
"""A package
Represents a potentially remote directory tree
"""
def __init__(self, root: str, seperator: str):
self.root = root
self.seperator = seperator
def get(self, path: str, recursive: bool = False) -> str:
"""Get a local path to the item at ``path``
``path`` might be a remote file, in which case it is downloaded to a
local cache at $LOCAL_CACHE or $HOME/.cache/modulus first.
"""
return _download_cached(self._fullpath(path), recursive=recursive)
def _fullpath(self, path):
return self.root + self.seperator + path
|
modulus-main
|
modulus/utils/filesystem.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .capture import (
StaticCaptureTraining,
StaticCaptureEvaluateNoGrad,
)
|
modulus-main
|
modulus/utils/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import functools
import torch
import logging
from logging import Logger
from typing import Union, Any, Callable, NewType, Dict, Optional
from contextlib import nullcontext
from modulus.distributed import DistributedManager
import modulus
float16 = NewType("float16", torch.float16)
bfloat16 = NewType("bfloat16", torch.bfloat16)
optim = NewType("optim", torch.optim)
class _StaticCapture(object):
"""Base class for StaticCapture decorator.
This class should not be used, rather StaticCaptureTraining and StaticCaptureEvaluate
should be used instead for training and evaluation functions.
"""
# Grad scaler and checkpoint class variables use for checkpoint saving and loading
# Since an instance of Static capture does not exist for checkpoint functions
# one must use class functions to access state dicts
_amp_scalers = {}
_amp_scaler_checkpoints = {}
_logger = logging.getLogger("capture")
def __new__(cls, *args, **kwargs):
obj = super(_StaticCapture, cls).__new__(cls)
obj.amp_scalers = cls._amp_scalers
obj.amp_scaler_checkpoints = cls._amp_scaler_checkpoints
obj.logger = cls._logger
return obj
def __init__(
self,
model: "modulus.Module",
optim: Union[optim, None] = None,
logger: Union[Logger, None] = None,
use_graphs: bool = True,
use_autocast: bool = True,
use_gradscaler: bool = True,
cuda_graph_warmup: int = 11,
amp_type: Union[float16, bfloat16] = torch.float16,
label: Optional[str] = None,
):
self.logger = logger if logger else self.logger
# Checkpoint label (used for gradscaler)
self.label = label if label else f"scaler_{len(self.amp_scalers.keys())}"
# DDP fix
if not isinstance(model, modulus.models.Module) and hasattr(model, "module"):
model = model.module
if not isinstance(model, modulus.models.Module):
self.logger.error("Model not a Modulus Module!")
raise ValueError("Model not a Modulus Module!")
self.model = model
self.optim = optim
self.eval = False
self.no_grad = False
# Set up toggles for optimizations
assert (
amp_type == torch.float16 or amp_type == torch.bfloat16
), "AMP type must be torch.float16 or torch.bfloat16"
# CUDA device
if "cuda" in str(self.model.device):
# CUDA graphs
if use_graphs and not self.model.meta.cuda_graphs:
self.logger.warning(
f"Model {model.meta.name} does not support CUDA graphs, turning off"
)
use_graphs = False
self.cuda_graphs_enabled = use_graphs
# AMP GPU
if not self.model.meta.amp_gpu:
self.logger.warning(
f"Model {model.meta.name} does not support AMP on GPUs, turning off"
)
use_autocast = False
use_gradscaler = False
self.use_gradscaler = use_gradscaler
self.use_autocast = use_autocast
self.amp_device = "cuda"
# Check if bfloat16 is suppored on the GPU
if amp_type == torch.bfloat16 and not torch.cuda.is_bf16_supported():
self.logger.warning(
f"Current CUDA device does not support bfloat16, falling back to float16"
)
amp_type = torch.float16
self.amp_dtype = amp_type
# Gradient Scaler
scaler_enabled = self.use_gradscaler and amp_type == torch.float16
self.scaler = self._init_amp_scaler(scaler_enabled, self.logger)
self.replay_stream = torch.cuda.Stream(self.model.device)
# CPU device
else:
self.cuda_graphs_enabled = False
# AMP CPU
if use_autocast and not self.model.meta.amp_cpu:
self.logger.warning(
f"Model {model.meta.name} does not support AMP on CPUs, turning off"
)
use_autocast = False
self.use_autocast = use_autocast
self.amp_device = "cpu"
# Only float16 is supported on CPUs
# https://pytorch.org/docs/stable/amp.html#cpu-op-specific-behavior
if amp_type == torch.float16 and use_autocast:
self.logger.warning(
f"torch.float16 not supported for CPU AMP, switching to torch.bfloat16"
)
amp_type = torch.bfloat16
self.amp_dtype = torch.bfloat16
# Gradient Scaler (not enabled)
self.scaler = self._init_amp_scaler(False, self.logger)
self.replay_stream = None
if self.cuda_graphs_enabled:
self.graph = torch.cuda.CUDAGraph()
self.output = None
self.iteration = 0
self.cuda_graph_warmup = cuda_graph_warmup # Default for DDP = 11
def __call__(self, fn: Callable) -> Callable:
self.function = fn
@functools.wraps(fn)
def decorated(*args: Any, **kwds: Any) -> Any:
"""Training step decorator function"""
with torch.no_grad() if self.no_grad else nullcontext():
if self.cuda_graphs_enabled:
self._cuda_graph_forward(*args, **kwds)
else:
self._zero_grads()
self.output = self._amp_forward(*args, **kwds)
if not self.eval:
# Update model parameters
self.scaler.step(self.optim)
self.scaler.update()
return self.output
return decorated
def _cuda_graph_forward(self, *args: Any, **kwargs: Any) -> Any:
"""Forward training step with CUDA graphs
Returns
-------
Any
Output of neural network forward
"""
# Graph warm up
if self.iteration < self.cuda_graph_warmup:
self.replay_stream.wait_stream(torch.cuda.current_stream())
self._zero_grads()
with torch.cuda.stream(self.replay_stream):
output = self._amp_forward(*args, **kwargs)
self.output = output.detach()
torch.cuda.current_stream().wait_stream(self.replay_stream)
# CUDA Graphs
else:
# Graph record
if self.iteration == self.cuda_graph_warmup:
self.logger.warning(f"Recording graph of '{self.function.__name__}'")
self._zero_grads()
torch.cuda.synchronize()
if DistributedManager().distributed:
torch.distributed.barrier()
# TODO: temporary workaround till this issue is fixed:
# https://github.com/pytorch/pytorch/pull/104487#issuecomment-1638665876
delay = os.environ.get("MODULUS_CUDA_GRAPH_CAPTURE_DELAY", "10")
time.sleep(int(delay))
with torch.cuda.graph(self.graph):
output = self._amp_forward(*args, **kwargs)
self.output = output.detach()
# Graph replay
self.graph.replay()
self.iteration += 1
return self.output
def _zero_grads(self):
"""Zero gradients
Default to `set_to_none` since this will in general have lower memory
footprint, and can modestly improve performance.
Note
----
Zeroing gradients can potentially cause an invalid CUDA memory access in another
graph. However if your graph involves gradients, you much set your gradients to none.
If there is already a graph recorded that includes these gradients, this will error.
Use the `NoGrad` version of capture to avoid this issue for inferencers / validators.
"""
# Skip zeroing if no grad is being used
if self.no_grad:
return
try:
self.optim.zero_grad(set_to_none=True)
except:
if self.optim:
self.optim.zero_grad()
# For apex optim support and eval mode (need to reset model grads)
self.model.zero_grad(set_to_none=True)
def _amp_forward(self, *args, **kwargs) -> Any:
"""Compute loss and gradients (if training) with AMP
Returns
-------
Any
Output of neural network forward
"""
with torch.autocast(
self.amp_device, enabled=self.use_autocast, dtype=self.amp_dtype
):
output = self.function(*args, **kwargs)
if not self.eval:
# In training mode output should be the loss
self.scaler.scale(output).backward()
return output
def _init_amp_scaler(
self, scaler_enabled: bool, logger: Logger
) -> torch.cuda.amp.GradScaler:
# Create gradient scaler
scaler = torch.cuda.amp.GradScaler(enabled=scaler_enabled)
# Store scaler in class variable
self.amp_scalers[self.label] = scaler
logging.debug(f"Created gradient scaler {self.label}")
# If our checkpoint dictionary has weights for this scaler lets load
if self.label in self.amp_scaler_checkpoints:
try:
scaler.load_state_dict(self.amp_scaler_checkpoints[self.label])
del self.amp_scaler_checkpoints[self.label]
self.logger.info(f"Loaded grad scaler state dictionary {self.label}.")
except Exception as e:
self.logger.error(
f"Failed to load grad scaler {self.label} state dict from saved "
+ "checkpoints. Did you switch the ordering of declared static captures?"
)
raise ValueError(e)
return scaler
@classmethod
def state_dict(cls) -> Dict[str, Any]:
"""Class method for accsessing the StaticCapture state dictionary.
Use this in a training checkpoint function.
Returns
-------
Dict[str, Any]
Dictionary of states to save for file
"""
scaler_states = {}
for key, value in cls._amp_scalers.items():
scaler_states[key] = value.state_dict()
return scaler_states
@classmethod
def load_state_dict(cls, state_dict: Dict[str, Any]) -> None:
"""Class method for loading a StaticCapture state dictionary.
Use this in a training checkpoint function.
Returns
-------
Dict[str, Any]
Dictionary of states to save for file
"""
scaler_states = {}
for key, value in state_dict.items():
# If scaler has been created already load the weights
if key in cls._amp_scalers:
try:
cls._amp_scalers[key].load_state_dict(value)
cls._logger.info(f"Loaded grad scaler state dictionary {key}.")
except Exception as e:
cls._logger.error(
f"Failed to load grad scaler state dict with id {key}."
+ " Something went wrong!"
)
raise ValueError(e)
# Otherwise store in checkpoints for later use
else:
cls._amp_scaler_checkpoints[key] = value
@classmethod
def reset_state(cls):
cls._amp_scalers = {}
cls._amp_scaler_checkpoints = {}
class StaticCaptureTraining(_StaticCapture):
"""A performance optimization decorator for PyTorch training functions.
This class should be initialized as a decorator on a function that computes the
forward pass of the neural network and loss function. The user should only call the
defind training step function. This will apply optimizations including: AMP and
Cuda Graphs.
Parameters
----------
model : modulus.models.Module
Modulus Model
optim : torch.optim
Optimizer
logger : Union[Logger, None], optional
Modulus Launch Logger, by default None
use_graphs : bool, optional
Toggle CUDA graphs if supported by model, by default True
use_amp : bool, optional
Toggle AMP if supported by mode, by default True
cuda_graph_warmup : int, optional
Number of warmup steps for cuda graphs, by default 11
amp_type : Union[float16, bfloat16], optional
Auto casting type for AMP, by default torch.float16
label : Optional[str, None], optional
Static capture checkpoint label, by default None
Raises
------
ValueError
If the model provided is not a modulus.models.Module. I.e. has no meta data.
Example
-------
>>> # Create model
>>> model = modulus.models.mlp.FullyConnected(2, 64, 2)
>>> input = torch.rand(8, 2)
>>> output = torch.rand(8, 2)
>>> # Create optimizer
>>> optim = torch.optim.Adam(model.parameters(), lr=0.001)
>>> # Create training step function with optimization wrapper
>>> @StaticCaptureTraining(model=model, optim=optim)
... def training_step(model, invar, outvar):
... predvar = model(invar)
... loss = torch.sum(torch.pow(predvar - outvar, 2))
... return loss
...
>>> # Sample training loop
>>> for i in range(3):
... loss = training_step(model, input, output)
...
Note
----
Static captures must be checkpointed when training using the `state_dict()` if AMP
is being used with gradient scaler. By default, this requires static captures to be
instantiated in the same order as when they were checkpointed. The label parameter
can be used to relax/circumvent this ordering requirement.
Note
----
Capturing multiple cuda graphs in a single program can lead to potential invalid CUDA
memory access errors on some systems. Prioritize capturing training graphs when this
occurs.
"""
def __init__(
self,
model: "modulus.Module",
optim: torch.optim,
logger: Union[Logger, None] = None,
use_graphs: bool = True,
use_amp: bool = True,
cuda_graph_warmup: int = 11,
amp_type: Union[float16, bfloat16] = torch.float16,
label: Optional[str] = None,
):
super().__init__(
model,
optim,
logger,
use_graphs,
use_amp,
use_amp,
cuda_graph_warmup,
amp_type,
label,
)
class StaticCaptureEvaluateNoGrad(_StaticCapture):
"""An performance optimization decorator for PyTorch no grad evaluation.
This class should be initialized as a decorator on a function that computes run the
forward pass of the model that does not require gradient calculations. This is the
recommended method to use for inference and validation methods.
Parameters
----------
model : modulus.models.Module
Modulus Model
logger : Union[Logger, None], optional
Modulus Launch Logger, by default None
use_graphs : bool, optional
Toggle CUDA graphs if supported by model, by default True
use_amp : bool, optional
Toggle AMP if supported by mode, by default True
cuda_graph_warmup : int, optional
Number of warmup steps for cuda graphs, by default 11
amp_type : Union[float16, bfloat16], optional
Auto casting type for AMP, by default torch.float16
label : Optional[str, None], optional
Static capture checkpoint label, by default None
Raises
------
ValueError
If the model provided is not a modulus.models.Module. I.e. has no meta data.
Example
-------
>>> # Create model
>>> model = modulus.models.mlp.FullyConnected(2, 64, 2)
>>> input = torch.rand(8, 2)
>>> # Create evaluate function with optimization wrapper
>>> @StaticCaptureEvaluateNoGrad(model=model)
... def eval_step(model, invar):
... predvar = model(invar)
... return predvar
...
>>> output = eval_step(model, input)
>>> output.size()
torch.Size([8, 2])
Note
----
Capturing multiple cuda graphs in a single program can lead to potential invalid CUDA
memory access errors on some systems. Prioritize capturing training graphs when this
occurs.
"""
def __init__(
self,
model: "modulus.Module",
logger: Union[Logger, None] = None,
use_graphs: bool = True,
use_amp: bool = True,
cuda_graph_warmup: int = 11,
amp_type: Union[float16, bfloat16] = torch.float16,
label: Optional[str] = None,
):
super().__init__(
model,
None,
logger,
use_graphs,
use_amp,
False,
cuda_graph_warmup,
amp_type,
label,
)
self.eval = True # No optimizer/scaler calls
self.no_grad = True # No grad context and no grad zeroing
|
modulus-main
|
modulus/utils/capture.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
# we need those here:
from modulus.utils.sfno.metrics.weighted_acc_rmse import (
Quadrature,
lat_torch,
latitude_weighting_factor_torch,
l1_torch_local,
l1_torch_distributed,
weighted_rmse_torch_local,
weighted_rmse_torch_distributed,
weighted_acc_torch_local,
weighted_acc_torch_distributed,
)
# distributed computing stuff
from modulus.utils.sfno.distributed import comm
import torch.distributed as dist
from modulus.utils.sfno.distributed.mappings import gather_from_parallel_region
class MetricsHandler:
"""
A class that handles metrics for model training and validation. It calculates
metrics such as root mean square error (RMSE)and anomaly correlation coefficient
(ACC) for specified variables.
"""
# TODO not to hardcode channel indices
def __init__(
self,
params,
mult,
clim,
device,
rmse_var_names=["u10m", "t2m", "u500", "z500", "r500"],
acc_vars_names=["u10m", "t2m", "u500", "z500", "r500"],
acc_auc_var_names=["u10m", "t2m", "u500", "z500", "r500"],
): # pragma: no cover
self.device = device
self.log_to_screen = params.log_to_screen
self.log_to_wandb = params.log_to_wandb
self.ifs_acc_path = params.ifs_acc_path
self.channel_names = params.channel_names
# set a stream
self.stream = torch.cuda.Stream()
# select the vars which are actually present
rmse_var_names = [x for x in rmse_var_names if x in self.channel_names]
acc_vars_names = [x for x in acc_vars_names if x in self.channel_names]
acc_auc_var_names = [x for x in acc_auc_var_names if x in self.channel_names]
# now create an inverse mapping
rmse_vars = {
var_name: self.channel_names.index(var_name) for var_name in rmse_var_names
}
acc_vars = {
var_name: self.channel_names.index(var_name) for var_name in acc_vars_names
}
acc_auc_vars = {
var_name: self.channel_names.index(var_name)
for var_name in acc_auc_var_names
}
self.rmse_vars = rmse_vars
self.acc_vars = acc_vars
self.acc_auc_vars = acc_auc_vars
self.split_data_channels = params.split_data_channels
# get some stats: make data shared with tensor from the class
self.mult = mult.to(self.device)
# how many steps to run in acc curve
self.valid_autoreg_steps = params.valid_autoreg_steps
# climatology for autoregressive ACC
self.simpquad = Quadrature(
self.valid_autoreg_steps,
1.0 / float(self.valid_autoreg_steps + 1),
self.device,
)
clim = torch.unsqueeze(clim, 0)
self.clim = clim.to(self.device, dtype=torch.float32)
matmul_comm_size = comm.get_size("matmul")
# get global and local output channels
self.N_out_channels = params.N_out_channels
if self.split_data_channels:
self.out_channels_local = (
params.N_out_channels + matmul_comm_size - 1
) // matmul_comm_size
# split channel-wise climatology by matmul parallel rank
mprank = comm.get_rank("matmul")
self.clim = torch.split(self.clim, self.out_channels_local, dim=1)[
mprank
].contiguous()
else:
self.out_channels_local = params.N_out_channels
# compute latitude weighting factor
# lat_local = lat_torch(torch.arange(start=params.img_local_offset_x, end=(params.img_local_offset_x + params.img_local_shape_x), device=self.device), params.img_crop_shape_x)
lat_full = lat_torch(
torch.arange(start=0, end=params.img_crop_shape_x, device=self.device),
params.img_crop_shape_x,
)
lat_norm = torch.sum(torch.cos(torch.deg2rad(lat_full)))
# lwf = latitude_weighting_factor_torch(lat_local, params.img_crop_shape_x, lat_norm)
# using the global one for now as we are gathering
lwf = latitude_weighting_factor_torch(lat_full, params.img_shape_x, lat_norm)[
params.img_crop_offset_x : params.img_crop_offset_x
+ params.img_crop_shape_x
]
self.latitude_weights = torch.reshape(lwf, (1, 1, -1, 1))
self.img_shape = (params.img_crop_shape_x, params.img_crop_shape_y)
# which rmse compute handle do we need to use:
if params.multigrid_mode == "distributed":
self.metric_correction_factor = 1.0 / float(comm.get_size("matmul"))
self.l1_handle = l1_torch_distributed
self.weighted_rmse_handle = weighted_rmse_torch_distributed
self.weighted_acc_handle = weighted_acc_torch_distributed
else:
self.metric_correction_factor = 1.0
self.l1_handle = l1_torch_local
self.weighted_rmse_handle = weighted_rmse_torch_local
self.weighted_acc_handle = weighted_acc_torch_local
self.do_gather_input = False
if comm.get_size("h") * comm.get_size("w") > 1:
self.do_gather_input = True
@torch.jit.ignore
def _gather_input(self, x: torch.Tensor) -> torch.Tensor: # pragma: no cover
"""helper that gathers data from spatially distributed regions"""
# combine data
# h
xh = gather_from_parallel_region(x, -2, "h")
xw = gather_from_parallel_region(xh, -1, "w")
# crop
x = xw[:, :, : self.img_shape[0], : self.img_shape[1]]
return x
def initialize_buffers(self): # pragma: no cover
"""Initialize buffers for the validation metrics"""
self.valid_buffer = torch.zeros((3), dtype=torch.float32, device=self.device)
self.valid_loss = self.valid_buffer[0].view(-1)
self.valid_l1 = self.valid_buffer[1].view(-1)
self.valid_steps = self.valid_buffer[2].view(-1)
# we need these buffers
self.valid_weighted_rmse = torch.zeros(
(self.out_channels_local), dtype=torch.float32, device=self.device
)
self.acc_curve = torch.zeros(
(self.out_channels_local, self.valid_autoreg_steps + 1),
dtype=torch.float32,
device=self.device,
)
self.acc_counter = torch.zeros(
(self.valid_autoreg_steps + 1), dtype=torch.float32, device=self.device
)
# create CPU copies for all the buffers
self.valid_buffer_cpu = torch.zeros(
(3), dtype=torch.float32, device="cpu"
).pin_memory()
self.valid_weighted_rmse_cpu = torch.zeros(
(self.out_channels_local), dtype=torch.float32, device="cpu"
).pin_memory()
self.acc_curve_cpu = torch.zeros(
(self.out_channels_local, self.valid_autoreg_steps + 1),
dtype=torch.float32,
device="cpu",
).pin_memory()
self.acc_auc_cpu = torch.zeros(
(self.out_channels_local), dtype=torch.float32, device="cpu"
).pin_memory()
def zero_buffers(self): # pragma: no cover
"""Helper that zeros out buffers"""
with torch.inference_mode():
with torch.no_grad():
self.valid_buffer.fill_(0)
self.valid_weighted_rmse.fill_(0)
self.acc_curve.fill_(0)
self.acc_counter.fill_(0)
return
def update(self, prediction, target, loss, idt): # pragma: no cover
"""Updates the validation metrics with the given prediction and target."""
if self.do_gather_input:
prediction = self._gather_input(prediction)
target = self._gather_input(target)
# store values for rmse:
rmse_prediction = prediction
rmse_target = target
# update parameters
self.acc_curve[:, idt] += (
self.weighted_acc_handle(
prediction - self.clim, target - self.clim, self.latitude_weights
)
* self.metric_correction_factor
)
self.acc_counter[idt] += 1
if idt == 0:
self.valid_steps += 1.0
self.valid_loss += loss
self.valid_l1 += (
self.l1_handle(prediction, target) * self.metric_correction_factor
)
self.valid_weighted_rmse += (
self.weighted_rmse_handle(
rmse_prediction, rmse_target, self.latitude_weights
)
* self.metric_correction_factor
)
return
def finalize(self, final_inference=False): # pragma: no cover
"""
Finalizes the validation metrics after a validation run. It gathers the metrics
across different processes, computes the final metrics, and prepares the logs.
"""
# sync here
if dist.is_initialized():
dist.barrier(device_ids=[self.device.index])
with torch.no_grad():
valid_steps_local = int(self.valid_steps.item())
if dist.is_initialized():
dist.all_reduce(
self.valid_buffer,
op=dist.ReduceOp.SUM,
group=comm.get_group("data"),
)
dist.all_reduce(
self.valid_weighted_rmse,
op=dist.ReduceOp.SUM,
group=comm.get_group("data"),
)
dist.all_reduce(
self.acc_curve, op=dist.ReduceOp.SUM, group=comm.get_group("data")
)
dist.all_reduce(
self.acc_counter, op=dist.ReduceOp.SUM, group=comm.get_group("data")
)
# gather from matmul parallel ranks
if self.split_data_channels:
# gather rmse
valid_weighted_rmse_list = torch.split(
torch.zeros(
(self.N_out_channels), dtype=torch.float32, device=self.device
),
self.out_channels_local,
dim=0,
)
valid_weighted_rmse_list = [
x.contiguous() for x in valid_weighted_rmse_list
]
valid_weighted_rmse_list[
comm.get_rank("matmul")
] = self.valid_weighted_rmse
dist.all_gather(
valid_weighted_rmse_list,
self.valid_weighted_rmse,
group=comm.get_group("matmul"),
)
self.valid_weighted_rmse = torch.cat(valid_weighted_rmse_list, dim=0)
# we need to reduce the l1 loss as well, since this is not encoded in the loss obj
dist.all_reduce(
valid_l1, op=dist.ReduceOp.AVG, group=comm.get_group("matmul")
)
# gather acc curves
acc_curve_list = torch.split(
torch.zeros(
(self.N_out_channels, self.valid_autoreg_steps + 1),
dtype=torch.float32,
device=self.device,
),
self.out_channels_local,
dim=0,
)
acc_curve_list = [x.contiguous() for x in acc_curve_list]
acc_curve_list[comm.get_rank("matmul")] = self.acc_curve
dist.all_gather(
acc_curve_list, self.acc_curve, group=comm.get_group("matmul")
)
self.acc_curve = torch.cat(acc_curve_list, dim=0)
# divide by number of steps
self.valid_buffer[0:2] = self.valid_buffer[0:2] / self.valid_buffer[2]
self.valid_weighted_rmse = (
self.mult * self.valid_weighted_rmse / self.valid_buffer[2]
)
# Pull out autoregessive acc values
self.acc_curve /= self.acc_counter
# compute auc
acc_auc = self.simpquad(self.acc_curve, dim=1)
# copy buffers to cpu
# sync on stream
self.stream.wait_stream(torch.cuda.current_stream())
# schedule copy
with torch.cuda.stream(self.stream):
self.valid_buffer_cpu.copy_(self.valid_buffer, non_blocking=True)
self.valid_weighted_rmse_cpu.copy_(
self.valid_weighted_rmse, non_blocking=True
)
self.acc_curve_cpu.copy_(self.acc_curve, non_blocking=True)
self.acc_auc_cpu.copy_(acc_auc, non_blocking=True)
# wait for stream
self.stream.synchronize()
# prepare logs with the minimum content
valid_buffer_arr = self.valid_buffer_cpu.numpy()
logs = {
"base": {
"validation steps": valid_steps_local,
"validation loss": valid_buffer_arr[0],
"validation L1": valid_buffer_arr[1],
},
"metrics": {},
}
valid_weighted_rmse_arr = self.valid_weighted_rmse_cpu.numpy()
for var_name, var_idx in self.rmse_vars.items():
logs["metrics"]["validation " + var_name] = valid_weighted_rmse_arr[
var_idx
]
acc_curve_arr = self.acc_curve_cpu.numpy()
for var_name, var_idx in self.acc_vars.items():
logs["metrics"]["ACC time " + var_name] = acc_curve_arr[
var_idx, self.valid_autoreg_steps
]
acc_auc_arr = self.acc_auc_cpu.numpy()
for var_name, var_idx in self.acc_auc_vars.items():
logs["metrics"]["ACC AUC " + var_name] = acc_auc_arr[var_idx]
self.logs = logs
if final_inference:
return logs, self.acc_curve
else:
return logs
|
modulus-main
|
modulus/utils/sfno/metric.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from modulus.utils.sfno.distributed import comm
from modulus.utils.sfno.distributed.mappings import gather_from_parallel_region
import torch_harmonics as harmonics
from torch_harmonics.quadrature import clenshaw_curtiss_weights
class LossHandler(nn.Module):
"""
Wrapper class that will handle computing losses.
"""
def __init__(self, params, img_size=(720, 1440), d=2): # pragma: no cover
super(LossHandler, self).__init__()
self.rank = comm.get_rank("matmul")
self.n_future = params.n_future
# TODO: allow for crop offset, otherwise the weighting will not be correct
self.img_shape = (params.img_crop_shape_x, params.img_crop_shape_y)
loss_type = self.loss_type = params.loss
if loss_type[:11] == "pole-masked":
pole_mask = 1
loss_type = loss_type[12:]
else:
pole_mask = 0
if loss_type[:8] == "weighted":
if params.channel_weights == "auto":
channel_weights = torch.ones(params.N_out_channels, dtype=torch.float32)
for c, chn in enumerate(params.channel_names):
if chn in ["u10m", "v10m", "u100m", "v100m", "sp", "msl", "tcwv"]:
channel_weights[c] = 0.1
elif chn in ["t2m", "2d"]:
channel_weights[c] = 1.0
elif chn[0] in ["z", "u", "v", "t", "r", "q"]:
pressure_level = float(chn[1:])
channel_weights[c] = 0.001 * pressure_level
else:
channel_weights[c] = 0.01
else:
channel_weights = torch.Tensor(params.channel_weights).float()
loss_type = loss_type[9:]
else:
channel_weights = torch.ones(params.N_out_channels, dtype=torch.float32)
# renormalize the weights to one
channel_weights = channel_weights.reshape(1, -1, 1, 1)
channel_weights = channel_weights / torch.sum(channel_weights)
if loss_type[:8] == "absolute":
absolute = True
loss_type = loss_type[9:]
else:
absolute = False
if loss_type[:7] == "squared":
squared = True
loss_type = loss_type[8:]
else:
squared = False
if loss_type[:8] == "temp-std":
eps = 1e-6
global_stds = torch.from_numpy(np.load(params.global_stds_path)).reshape(
1, -1, 1, 1
)[:, params.in_channels]
time_diff_stds = torch.from_numpy(
np.load(params.time_diff_stds_path)
).reshape(1, -1, 1, 1)[:, params.in_channels]
time_var_weights = global_stds / (time_diff_stds + eps)
# time_var_weights = 1 / (time_diff_stds+eps)
if squared:
time_var_weights = time_var_weights**2
channel_weights = channel_weights * time_var_weights
loss_type = loss_type[9:]
self.register_buffer("channel_weights", channel_weights)
# TODO: clean this up and replace it with string parsing to set the parameters
if loss_type == "l2":
self.loss_obj = GeometricLpLoss(
self.img_shape,
p=2,
absolute=absolute,
pole_mask=pole_mask,
jacobian="flat",
)
elif loss_type == "l1":
self.loss_obj = GeometricLpLoss(
self.img_shape,
p=1,
absolute=absolute,
pole_mask=pole_mask,
jacobian="flat",
)
elif loss_type == "geometric l2":
self.loss_obj = GeometricLpLoss(
self.img_shape,
p=2,
absolute=absolute,
squared=squared,
pole_mask=pole_mask,
)
elif loss_type == "geometric l1":
self.loss_obj = GeometricLpLoss(
self.img_shape, p=1, absolute=absolute, pole_mask=pole_mask
)
elif loss_type == "geometric h1":
self.loss_obj = GeometricH1Loss(
self.img_shape, absolute=absolute, squared=squared
)
else:
raise ValueError(f"Unknown loss function: {loss_type}")
# weighting factor for the case of multistep training
# TODO change hardcoded weighting
multistep_weight = torch.arange(1, self.n_future + 2, dtype=torch.float32)
multistep_weight = multistep_weight / torch.sum(multistep_weight)
multistep_weight = multistep_weight.reshape(-1, 1, 1, 1)
self.register_buffer("multistep_weight", multistep_weight)
# # decide whether to gather the input
self.do_gather_input = False
if comm.get_size("h") * comm.get_size("w") > 1:
self.do_gather_input = True
@torch.jit.ignore
def _gather_input(self, x: torch.Tensor) -> torch.Tensor: # pragma: no cover
# combine data
# h
xh = gather_from_parallel_region(x, -2, "h")
xw = gather_from_parallel_region(xh, -1, "w")
# crop
x = xw[:, :, : self.img_shape[0], : self.img_shape[1]]
return x
def is_distributed(self): # pragma: no cover
"""Returns whether the loss is distributed or not (always False)"""
return False
def forward(
self, prd: torch.Tensor, tar: torch.Tensor, inp: torch.Tensor
): # pragma: no cover
if self.do_gather_input:
prd = self._gather_input(prd)
tar = self._gather_input(tar)
if hasattr(self, "minmax"):
chw = torch.ones_like(self.channel_weights)
chw = chw / torch.sum(chw)
chw += self.channel_weights.abs() / torch.sum(self.channel_weights.abs())
else:
chw = self.channel_weights
if self.training:
chw = (chw * self.multistep_weight).reshape(1, -1, 1, 1)
else:
chw = chw
return self.loss_obj(prd, tar, chw)
# double check if polar optimization has an effect - we use 5 here by default
class GeometricLpLoss(nn.Module):
"""Geometric Lp loss"""
def __init__(
self,
img_size: Tuple[int, int],
p: Optional[float] = 2.0,
size_average: Optional[bool] = False,
reduction: Optional[bool] = True,
absolute: Optional[bool] = False,
squared: Optional[bool] = False,
pole_mask: Optional[int] = 0,
jacobian: Optional[str] = "s2",
quadrature_rule: Optional[str] = "naive",
): # pragma: no cover
super(GeometricLpLoss, self).__init__()
self.p = p
self.img_size = img_size
self.reduction = reduction
self.size_average = size_average
self.absolute = absolute
self.squared = squared
self.pole_mask = pole_mask
if jacobian == "s2":
jacobian = torch.sin(
torch.linspace(0, torch.pi, self.img_size[0])
).unsqueeze(1)
else:
jacobian = torch.ones(self.img_size[0], 1)
if quadrature_rule == "naive":
dtheta = torch.pi / self.img_size[0]
dlambda = 2 * torch.pi / self.img_size[1]
dA = dlambda * dtheta
quad_weight = dA * jacobian
elif quadrature_rule == "clenshaw-curtiss":
cost, w = clenshaw_curtiss_weights(self.img_size[0], -1, 1)
weights = torch.from_numpy(w)
dlambda = 2 * torch.pi / self.img_size[1]
quad_weight = dlambda * torch.from_numpy(w).unsqueeze(-1)
else:
raise ValueError(f"Unknown quadrature rule {quadrature_rule}")
self.register_buffer("quad_weight", quad_weight)
def abs(
self, prd: torch.Tensor, tar: torch.Tensor, chw: torch.Tensor
): # pragma: no cover
"""Computes the absolute loss"""
num_examples = prd.size()[0]
if self.pole_mask:
all_norms = torch.sum(
torch.abs(
prd[..., self.pole_mask : -self.pole_mask, :]
- tar[..., self.pole_mask : -self.pole_mask, :]
)
** self.p
* self.quad_weight[..., self.pole_mask : -self.pole_mask, :],
dim=(-2, -1),
)
else:
all_norms = torch.sum(
torch.abs(prd - tar) ** self.p * self.quad_weight,
dim=(-2, -1),
)
all_norms = all_norms.reshape(num_examples, -1).sum()
if not self.squared:
all_norms = all_norms ** (1 / self.p)
# apply channel weighting
all_norms = chw.reshape(1, -1) * all_norms
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(
self,
prd: torch.Tensor,
tar: torch.Tensor,
chw: torch.Tensor,
mask: Optional[torch.Tensor] = None,
): # pragma: no cover
"""Computes the relative loss"""
num_examples = prd.size()[0]
if self.pole_mask:
diff_norms = torch.sum(
torch.abs(
prd[..., self.pole_mask : -self.pole_mask, :]
- tar[..., self.pole_mask : -self.pole_mask, :]
)
** self.p
* self.quad_weight[..., self.pole_mask : -self.pole_mask, :],
dim=(-2, -1),
)
else:
diff_norms = torch.sum(
torch.abs(prd - tar) ** self.p * self.quad_weight, dim=(-2, -1)
)
diff_norms = diff_norms.reshape(num_examples, -1)
tar_norms = torch.sum(torch.abs(tar) ** self.p * self.quad_weight, dim=(-2, -1))
tar_norms = tar_norms.reshape(num_examples, -1)
if not self.squared:
diff_norms = diff_norms ** (1 / self.p)
tar_norms = tar_norms ** (1 / self.p)
# setup return value
retval = chw.reshape(1, -1) * (diff_norms / tar_norms)
if mask is not None:
retval = retval * mask
if self.reduction:
if self.size_average:
if mask is None:
retval = torch.mean(retval)
else:
retval = torch.sum(retval) / torch.sum(mask)
else:
retval = torch.sum(retval)
return retval
def forward(
self,
prd: torch.Tensor,
tar: torch.Tensor,
chw: torch.Tensor,
mask: Optional[torch.Tensor] = None,
): # pragma: no cover
if self.absolute:
loss = self.abs(prd, tar, chw)
else:
loss = self.rel(prd, tar, chw, mask)
return loss
# double check if polar optimization has an effect - we use 5 here by default
class GeometricH1Loss(nn.Module):
"""Geometric H1 loss"""
def __init__(
self,
img_size: Tuple[int, int],
p: Optional[float] = 2.0,
size_average: Optional[bool] = False,
reduction: Optional[bool] = True,
absolute: Optional[bool] = False,
squared: Optional[bool] = False,
alpha: Optional[float] = 0.5,
): # pragma: no cover
super(GeometricH1Loss, self).__init__()
self.reduction = reduction
self.size_average = size_average
self.absolute = absolute
self.squared = squared
self.alpha = alpha
self.sht = harmonics.RealSHT(*img_size, grid="equiangular").float()
h1_weights = torch.arange(self.sht.lmax).float()
h1_weights = h1_weights * (h1_weights + 1)
self.register_buffer("h1_weights", h1_weights)
def abs(self, prd: torch.Tensor, tar: torch.Tensor): # pragma: no cover
"""Computes the absolute loss"""
num_examples = prd.size()[0]
coeffs = torch.view_as_real(self.sht(prd - tar))
coeffs = coeffs[..., 0] ** 2 + coeffs[..., 1] ** 2
norm2 = coeffs[..., :, 0] + 2 * torch.sum(coeffs[..., :, 1:], dim=-1)
l2_norm2 = norm2.reshape(num_examples, -1).sum(dim=-1)
h1_norm2 = (norm2 * self.h1_weights).reshape(num_examples, -1).sum(dim=-1)
if not self.squared:
all_norms = self.alpha * torch.sqrt(l2_norm2) + (
1 - self.alpha
) * torch.sqrt(h1_norm2)
else:
all_norms = self.alpha * l2_norm2 + (1 - self.alpha) * h1_norm2
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(
self, prd: torch.Tensor, tar: torch.Tensor, mask: Optional[torch.Tensor] = None
): # pragma: no cover
"""Computes the relative loss"""
num_examples = prd.size()[0]
coeffs = torch.view_as_real(self.sht(prd - tar))
coeffs = coeffs[..., 0] ** 2 + coeffs[..., 1] ** 2
norm2 = coeffs[..., :, 0] + 2 * torch.sum(coeffs[..., :, 1:], dim=-1)
l2_norm2 = norm2.reshape(num_examples, -1).sum(dim=-1)
h1_norm2 = (norm2 * self.h1_weights).reshape(num_examples, -1).sum(dim=-1)
tar_coeffs = torch.view_as_real(self.sht(tar))
tar_coeffs = tar_coeffs[..., 0] ** 2 + tar_coeffs[..., 1] ** 2
tar_norm2 = tar_coeffs[..., :, 0] + 2 * torch.sum(
tar_coeffs[..., :, 1:], dim=-1
)
tar_l2_norm2 = tar_norm2.reshape(num_examples, -1).sum(dim=-1)
tar_h1_norm2 = (
(tar_norm2 * self.h1_weights).reshape(num_examples, -1).sum(dim=-1)
)
if not self.squared:
diff_norms = self.alpha * torch.sqrt(l2_norm2) + (
1 - self.alpha
) * torch.sqrt(h1_norm2)
tar_norms = self.alpha * torch.sqrt(tar_l2_norm2) + (
1 - self.alpha
) * torch.sqrt(tar_h1_norm2)
else:
diff_norms = self.alpha * l2_norm2 + (1 - self.alpha) * h1_norm2
tar_norms = self.alpha * tar_l2_norm2 + (1 - self.alpha) * tar_h1_norm2
# setup return value
retval = diff_norms / tar_norms
if mask is not None:
retval = retval * mask
if self.reduction:
if self.size_average:
if mask is None:
retval = torch.mean(retval)
else:
retval = torch.sum(retval) / torch.sum(mask)
else:
retval = torch.sum(retval)
return retval
def forward(
self, prd: torch.Tensor, tar: torch.Tensor, mask: Optional[torch.Tensor] = None
): # pragma: no cover
if self.absolute:
loss = self.abs(prd, tar)
else:
loss = self.rel(prd, tar, mask)
return loss
|
modulus-main
|
modulus/utils/sfno/loss.py
|
# ignore_header_test
# climt/LICENSE
# @mcgibbon
# BSD License
# Copyright (c) 2016, Rodrigo Caballero
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import numpy as np
from typing import Union, TypeVar
# helper type
dtype = np.float32
T = TypeVar("T", np.ndarray, float)
def _ensure_units_of_degrees(da): # pragma: no cover
"""Ensure that the units of the DataArray are in degrees."""
units = da.attrs.get("units", "").lower()
if "rad" in units:
return np.rad2deg(da).assign_attrs(units="degrees")
else:
return da
def cos_zenith_angle(
time: Union[T, datetime.datetime],
lon: T,
lat: T,
) -> T: # pragma: no cover
"""
Cosine of sun-zenith angle for lon, lat at time (UTC).
If DataArrays are provided for the lat and lon arguments, their units will
be assumed to be in degrees, unless they have a units attribute that
contains "rad"; in that case they will automatically be converted to having
units of degrees.
Args:
time: time in UTC
lon: float or np.ndarray in degrees (E/W)
lat: float or np.ndarray in degrees (N/S)
Returns:
float, np.ndarray
Example:
--------
>>> model_time = datetime.datetime(2002, 1, 1, 12, 0, 0)
>>> angle = cos_zenith_angle(model_time, lat=360, lon=120)
>>> abs(angle - -0.447817277) < 1e-6
True
"""
lon_rad = np.deg2rad(lon, dtype=dtype)
lat_rad = np.deg2rad(lat, dtype=dtype)
return _star_cos_zenith(time, lon_rad, lat_rad)
def _days_from_2000(model_time): # pragma: no cover
"""Get the days since year 2000.
Example:
--------
>>> model_time = datetime.datetime(2002, 1, 1, 12, 0, 0)
>>> _days_from_2000(model_time)
731.0
"""
date_type = type(np.asarray(model_time).ravel()[0])
if date_type not in [datetime.datetime]:
raise ValueError(
f"model_time has an invalid date type. It must be "
f"datetime.datetime. Got {date_type}."
)
return _total_days(model_time - date_type(2000, 1, 1, 12, 0))
def _total_days(time_diff): # pragma: no cover
"""
Total time in units of days
"""
return np.asarray(time_diff).astype("timedelta64[us]") / np.timedelta64(1, "D")
def _greenwich_mean_sidereal_time(model_time): # pragma: no cover
"""
Greenwich mean sidereal time, in radians.
Reference:
The AIAA 2006 implementation:
http://www.celestrak.com/publications/AIAA/2006-6753/
Example:
--------
>>> model_time = datetime.datetime(2002, 1, 1, 12, 0, 0)
>>> g_time = _greenwich_mean_sidereal_time(model_time)
>>> abs(g_time - 4.903831411) < 1e-8
True
"""
jul_centuries = _days_from_2000(model_time) / 36525.0
theta = 67310.54841 + jul_centuries * (
876600 * 3600
+ 8640184.812866
+ jul_centuries * (0.093104 - jul_centuries * 6.2 * 10e-6)
)
theta_radians = np.deg2rad(theta / 240.0) % (2 * np.pi)
return theta_radians
def _local_mean_sidereal_time(model_time, longitude): # pragma: no cover
"""
Local mean sidereal time. requires longitude in radians.
Ref:
http://www.setileague.org/askdr/lmst.htm
Example:
--------
>>> model_time = datetime.datetime(2002, 1, 1, 12, 0, 0)
>>> l_time = _local_mean_sidereal_time(model_time, np.deg2rad(90))
>>> abs(l_time - 6.474627737) < 1e-8
True
"""
return _greenwich_mean_sidereal_time(model_time) + longitude
def _sun_ecliptic_longitude(model_time): # pragma: no cover
"""
Ecliptic longitude of the sun.
Reference:
http://www.geoastro.de/elevaz/basics/meeus.htm
Example:
--------
>>> model_time = datetime.datetime(2002, 1, 1, 12, 0, 0)
>>> lon = _sun_ecliptic_longitude(model_time)
>>> abs(lon - 17.469114444) < 1e-8
True
"""
julian_centuries = _days_from_2000(model_time) / 36525.0
# mean anomaly calculation
mean_anomaly = np.deg2rad(
357.52910
+ 35999.05030 * julian_centuries
- 0.0001559 * julian_centuries * julian_centuries
- 0.00000048 * julian_centuries * julian_centuries * julian_centuries
)
# mean longitude
mean_longitude = np.deg2rad(
280.46645 + 36000.76983 * julian_centuries + 0.0003032 * (julian_centuries**2)
)
d_l = np.deg2rad(
(1.914600 - 0.004817 * julian_centuries - 0.000014 * (julian_centuries**2))
* np.sin(mean_anomaly)
+ (0.019993 - 0.000101 * julian_centuries) * np.sin(2 * mean_anomaly)
+ 0.000290 * np.sin(3 * mean_anomaly)
)
# true longitude
return mean_longitude + d_l
def _obliquity_star(julian_centuries): # pragma: no cover
"""
return obliquity of the sun
Use 5th order equation from
https://en.wikipedia.org/wiki/Ecliptic#Obliquity_of_the_ecliptic
Example:
--------
>>> model_time = datetime.datetime(2002, 1, 1, 12, 0, 0)
>>> julian_centuries = _days_from_2000(model_time) / 36525.0
>>> obl = _obliquity_star(julian_centuries)
>>> abs(obl - 0.409088056) < 1e-8
True
"""
return np.deg2rad(
23.0
+ 26.0 / 60
+ 21.406 / 3600.0
- (
46.836769 * julian_centuries
- 0.0001831 * (julian_centuries**2)
+ 0.00200340 * (julian_centuries**3)
- 0.576e-6 * (julian_centuries**4)
- 4.34e-8 * (julian_centuries**5)
)
/ 3600.0
)
def _right_ascension_declination(model_time): # pragma: no cover
"""
Right ascension and declination of the sun.
Ref:
http://www.geoastro.de/elevaz/basics/meeus.htm
Example:
--------
>>> model_time = datetime.datetime(2002, 1, 1, 12, 0, 0)
>>> out1, out2 = _right_ascension_declination(model_time)
>>> abs(out1 - -1.363787213) < 1e-8
True
>>> abs(out2 - -0.401270126) < 1e-8
True
"""
julian_centuries = _days_from_2000(model_time) / 36525.0
eps = _obliquity_star(julian_centuries)
eclon = _sun_ecliptic_longitude(model_time)
x = np.cos(eclon)
y = np.cos(eps) * np.sin(eclon)
z = np.sin(eps) * np.sin(eclon)
r = np.sqrt(1.0 - z * z)
# sun declination
declination = np.arctan2(z, r)
# right ascension
right_ascension = 2 * np.arctan2(y, (x + r))
return right_ascension, declination
def _local_hour_angle(model_time, longitude, right_ascension): # pragma: no cover
"""
Hour angle at model_time for the given longitude and right_ascension
longitude in radians
Ref:
https://en.wikipedia.org/wiki/Hour_angle#Relation_with_the_right_ascension
"""
return _local_mean_sidereal_time(model_time, longitude) - right_ascension
def _star_cos_zenith(model_time, lon, lat): # pragma: no cover
"""
Return cosine of star zenith angle
lon,lat in radians
Ref:
Azimuth:
https://en.wikipedia.org/wiki/Solar_azimuth_angle#Formulas
Zenith:
https://en.wikipedia.org/wiki/Solar_zenith_angle
"""
ra, dec = _right_ascension_declination(model_time)
h_angle = _local_hour_angle(model_time, lon, ra)
cosine_zenith = np.sin(lat) * np.sin(dec) + np.cos(lat) * np.cos(dec) * np.cos(
h_angle
)
return cosine_zenith
|
modulus-main
|
modulus/utils/sfno/zenith_angle.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from torch.optim import lr_scheduler as lrs
from typing import List
class WarmupScheduler(lrs._LRScheduler): # pragma: no cover
"""Scheduler with linear warmup"""
def __init__(self, scheduler, num_warmup_steps, start_lr):
self.scheduler = scheduler
self.num_warmup_steps = num_warmup_steps
if not isinstance(start_lr, List):
self.start_lrs = [start_lr]
else:
self.start_lrs = start_lr
self.steps = 0
# this is hacky but I don't see a better way of doing that
self.end_lrs = self.scheduler.base_lrs
for lr, group in zip(self.start_lrs, self.scheduler.optimizer.param_groups):
group["lr"] = lr
# self.scheduler.base_lrs = [group['initial_lr'] for group in self.scheduler.optimizer.param_groups]
# init warmup scheduler:
def linwarm(step, max_steps, slr, elr):
if step <= max_steps:
t = step / float(max_steps)
res = t + (1 - t) * slr / elr
else:
res = 1.0
return res
self.warmup_scheduler = lrs.LambdaLR(
self.scheduler.optimizer,
lr_lambda=[
lambda x: linwarm(x, self.num_warmup_steps, slr, elr)
for slr, elr in zip(self.start_lrs, self.end_lrs)
],
)
self._last_lr = [
group["lr"] for group in self.warmup_scheduler.optimizer.param_groups
]
def step(self): # pragma: no cover
"""Scheduler step"""
shandle = (
self.scheduler
if self.steps >= self.num_warmup_steps
else self.warmup_scheduler
)
shandle.step()
self.steps += 1
self._last_lr = [group["lr"] for group in shandle.optimizer.param_groups]
def state_dict(self): # pragma: no cover
"""Returns the scheduler's state dict."""
state_dict = {
key: value
for key, value in self.__dict__.items()
if key not in ("optimizer", "_schedulers")
}
state_dict["warmup_scheduler"] = self.warmup_scheduler.state_dict()
state_dict["scheduler"] = self.scheduler.state_dict()
return state_dict
def load_state_dict(self, state_dict): # pragma: no cover
"""Load the scheduler's state dict."""
warmup_scheduler = state_dict.pop("warmup_scheduler")
scheduler = state_dict.pop("scheduler")
self.__dict__.update(state_dict)
# Restore state_dict keys in order to prevent side effects
# https://github.com/pytorch/pytorch/issues/32756
state_dict["warmup_scheduler"] = warmup_scheduler
state_dict["scheduler"] = scheduler
self.warmup_scheduler.load_state_dict(warmup_scheduler)
self.scheduler.load_state_dict(scheduler)
|
modulus-main
|
modulus/utils/sfno/warmup_scheduler.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ruamel.yaml import YAML
import logging
import json
class ParamsBase:
"""Convenience wrapper around a dictionary. Allows referring to dictionary items
as attributes, and tracking which attributes are modified.
"""
def __init__(self):
self._original_attrs = None
self.params = {}
self._original_attrs = list(self.__dict__)
def __getitem__(self, key):
return self.params[key]
def __setitem__(self, key, val):
self.params[key] = val
self.__setattr__(key, val)
def __contains__(self, key):
return key in self.params
def get(self, key, default=None):
"""Get a parameter value"""
if hasattr(self, key):
return getattr(self, key)
else:
return self.params.get(key, default)
def to_dict(self):
"""Return a dictionary representation of the parameters"""
new_attrs = {
key: val
for key, val in vars(self).items()
if key not in self._original_attrs
}
return {**self.params, **new_attrs}
@staticmethod
def from_json(path: str) -> "ParamsBase":
"""Load parameters from a json file"""
with open(path) as f:
c = json.load(f)
params = ParamsBase()
params.update_params(c)
return params
def update_params(self, config):
"""Update parameters from a dictionary"""
for key, val in config.items():
if val == "None":
val = None
self.params[key] = val
self.__setattr__(key, val)
class YParams(ParamsBase):
"""Open parameters stored with ``config_name`` in the yaml file ``yaml_filename``"""
def __init__(self, yaml_filename, config_name, print_params=False):
super().__init__()
self._yaml_filename = yaml_filename
self._config_name = config_name
if print_params:
print("------------------ Configuration ------------------")
with open(yaml_filename) as _file:
d = YAML().load(_file)[config_name]
self.update_params(d)
if print_params:
for key, val in d.items():
print(key, val)
print("---------------------------------------------------")
def log(self):
"""Log the parameters to the console"""
logging.info("------------------ Configuration ------------------")
logging.info("Configuration file: " + str(self._yaml_filename))
logging.info("Configuration name: " + str(self._config_name))
for key, val in self.to_dict().items():
logging.info(str(key) + " " + str(val))
logging.info("---------------------------------------------------")
|
modulus-main
|
modulus/utils/sfno/YParams.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
def config_logger(log_level=logging.INFO): # pragma: no cover
"""
Configure the logging basic settings with given log leve.
"""
logging.basicConfig(format=_format, level=log_level)
def log_to_file(
logger_name=None, log_level=logging.INFO, log_filename="tensorflow.log"
): # pragma: no cover
"""
Log to a file with the given log level.
"""
if not os.path.exists(os.path.dirname(log_filename)):
os.makedirs(os.path.dirname(log_filename))
if logger_name is not None:
log = logging.getLogger(logger_name)
else:
log = logging.getLogger()
fh = logging.FileHandler(log_filename)
fh.setLevel(log_level)
fh.setFormatter(logging.Formatter(_format))
log.addHandler(fh)
def log_versions(): # pragma: no cover
"""
Log the versions of git and torch.
"""
import torch
logging.info("--------------- Versions ---------------")
logging.info("Torch: " + str(torch.__version__))
logging.info("----------------------------------------")
class disable_logging(object):
"""
A context manager to disable logging temporarily.
"""
def __init__(self, level=logging.ERROR): # pragma: no cover
"""
Initialize the context manager.
"""
logging.disable(level=level)
def __enter__(self): # pragma: no cover
"""
Enter the context manager.
"""
return self
def __exit__(self, type, value, traceback): # pragma: no cover
"""
Exit the context manager and enable logging.
"""
logging.disable(level=logging.NOTSET)
|
modulus-main
|
modulus/utils/sfno/logging_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PeriodicPad2d(nn.Module):
"""
pad longitudinal (left-right) circular
and pad latitude (top-bottom) with zeros
"""
def __init__(self, pad_width): # pragma: no cover
super(PeriodicPad2d, self).__init__()
self.pad_width = pad_width
def forward(self, x): # pragma: no cover
# pad left and right circular
out = F.pad(x, (self.pad_width, self.pad_width, 0, 0), mode="circular")
# pad top and bottom zeros
out = F.pad(
out, (0, 0, self.pad_width, self.pad_width), mode="constant", value=0
)
return out
def reshape_fields(
img,
inp_or_tar,
crop_size_x,
crop_size_y,
rnd_x,
rnd_y,
params,
y_roll,
train,
normalize=True,
): # pragma: no cover
"""
Takes in np array of size (n_history+1, c, h, w) and returns torch tensor
of size ((n_channels*(n_history+1), crop_size_x, crop_size_y)
"""
if len(np.shape(img)) == 3:
img = np.expand_dims(img, 0)
img = img[:, :, 0:720] # remove last pixel
n_history = np.shape(img)[0] - 1
img_shape_x = np.shape(img)[-2]
img_shape_y = np.shape(img)[-1]
n_channels = np.shape(img)[1] # this will either be N_in_channels or N_out_channels
channels = params.in_channels if inp_or_tar == "inp" else params.out_channels
mins = np.load(params.min_path)[:, channels]
maxs = np.load(params.max_path)[:, channels]
means = np.load(params.global_means_path)[:, channels]
stds = np.load(params.global_stds_path)[:, channels]
if crop_size_x == None:
crop_size_x = img_shape_x
if crop_size_y == None:
crop_size_y = img_shape_y
if normalize:
if params.normalization == "minmax":
img -= mins
img /= maxs - mins
elif params.normalization == "zscore":
img -= means
img /= stds
if params.add_grid:
if inp_or_tar == "inp":
if params.gridtype == "linear":
assert (
params.n_grid_channels == 2
), "n_grid_channels must be set to 2 for gridtype linear"
x = np.meshgrid(np.linspace(-1, 1, img_shape_x))
y = np.meshgrid(np.linspace(-1, 1, img_shape_y))
grid_x, grid_y = np.meshgrid(y, x)
grid = np.stack((grid_x, grid_y), axis=0)
elif params.gridtype == "sinusoidal":
assert (
params.n_grid_channels == 4
), "n_grid_channels must be set to 4 for gridtype sinusoidal"
x1 = np.meshgrid(np.sin(np.linspace(0, 2 * np.pi, img_shape_x)))
x2 = np.meshgrid(np.cos(np.linspace(0, 2 * np.pi, img_shape_x)))
y1 = np.meshgrid(np.sin(np.linspace(0, 2 * np.pi, img_shape_y)))
y2 = np.meshgrid(np.cos(np.linspace(0, 2 * np.pi, img_shape_y)))
grid_x1, grid_y1 = np.meshgrid(y1, x1)
grid_x2, grid_y2 = np.meshgrid(y2, x2)
grid = np.expand_dims(
np.stack((grid_x1, grid_y1, grid_x2, grid_y2), axis=0), axis=0
)
img = np.concatenate((img, grid), axis=1)
if params.roll:
img = np.roll(img, y_roll, axis=-1)
if train and (crop_size_x or crop_size_y):
img = img[:, :, rnd_x : rnd_x + crop_size_x, rnd_y : rnd_y + crop_size_y]
if inp_or_tar == "inp":
img = np.reshape(img, (n_channels * (n_history + 1), crop_size_x, crop_size_y))
elif inp_or_tar == "tar":
img = np.reshape(img, (n_channels, crop_size_x, crop_size_y))
return torch.as_tensor(img)
|
modulus-main
|
modulus/utils/sfno/img_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from modulus.utils.sfno import logging_utils
logging_utils.config_logger()
import torch
def unlog_tp(x, eps=1e-5): # pragma: no cover
"""numpy transformation"""
# return np.exp(x + np.log(eps)) - eps
return eps * (np.exp(x) - 1)
def unlog_tp_torch(x, eps=1e-5): # pragma: no cover
"""torch transformation"""
# return torch.exp(x + torch.log(eps)) - eps
return eps * (torch.exp(x) - 1)
def mean(x, axis=None): # pragma: no cover
"""Calculates the spatial mean."""
# spatial mean
y = np.sum(x, axis) / np.size(x, axis)
return y
def lat_np(j, num_lat): # pragma: no cover
"""Calculates the latitude in degrees."""
return 90 - j * 180 / (num_lat - 1)
def weighted_acc(pred, target, weighted=True): # pragma: no cover
"""
Takes in arrays of size [1, num_lat, num_long] and returns latitude-weighted
correlation
"""
# takes in shape [1, num_lat, num_long]
if len(pred.shape) == 2:
pred = np.expand_dims(pred, 0)
if len(target.shape) == 2:
target = np.expand_dims(target, 0)
num_lat = np.shape(pred)[1]
num_long = np.shape(target)[2]
# pred -= mean(pred)
# target -= mean(target)
s = np.sum(np.cos(np.pi / 180 * lat_np(np.arange(0, num_lat), num_lat)))
weight = (
np.expand_dims(latitude_weighting_factor(np.arange(0, num_lat), num_lat, s), -1)
if weighted
else 1
)
r = (weight * pred * target).sum() / np.sqrt(
(weight * pred * pred).sum() * (weight * target * target).sum()
)
return r
def weighted_acc_masked(pred, target, weighted=True, maskarray=1): # pragma: no cover
"""
Takes in arrays of size [1, num_lat, num_long] and returns masked latitude-weighted
correlation
"""
# takes in shape [1, num_lat, num_long]
if len(pred.shape) == 2:
pred = np.expand_dims(pred, 0)
if len(target.shape) == 2:
target = np.expand_dims(target, 0)
num_lat = np.shape(pred)[1]
num_long = np.shape(target)[2]
pred -= mean(pred)
target -= mean(target)
s = np.sum(np.cos(np.pi / 180 * lat(np.arange(0, num_lat), num_lat)))
weight = (
np.expand_dims(latitude_weighting_factor(np.arange(0, num_lat), num_lat, s), -1)
if weighted
else 1
)
r = (maskarray * weight * pred * target).sum() / np.sqrt(
(maskarray * weight * pred * pred).sum()
* (maskarray * weight * target * target).sum()
)
return r
def weighted_rmse(pred, target): # pragma: no cover
"""
Calculates the latitude-weighted rmse
"""
if len(pred.shape) == 2:
pred = np.expand_dims(pred, 0)
if len(target.shape) == 2:
target = np.expand_dims(target, 0)
# takes in arrays of size [1, h, w] and returns latitude-weighted rmse
num_lat = np.shape(pred)[1]
num_long = np.shape(target)[2]
s = np.sum(np.cos(np.pi / 180 * lat_np(np.arange(0, num_lat), num_lat)))
weight = np.expand_dims(
latitude_weighting_factor(np.arange(0, num_lat), num_lat, s), -1
)
return np.sqrt(
1
/ num_lat
* 1
/ num_long
* np.sum(np.dot(weight.T, (pred[0] - target[0]) ** 2))
)
def latitude_weighting_factor(j, num_lat, s): # pragma: no cover
"""Calculates the latitude weighting factor."""
return num_lat * np.cos(np.pi / 180.0 * lat_np(j, num_lat)) / s
def top_quantiles_error(pred, target): # pragma: no cover
"""
Calculates the top quantile error
"""
if len(pred.shape) == 2:
pred = np.expand_dims(pred, 0)
if len(target.shape) == 2:
target = np.expand_dims(target, 0)
qs = 100
qlim = 5
qcut = 0.1
qtile = 1.0 - np.logspace(-qlim, -qcut, num=qs)
P_tar = np.quantile(target, q=qtile, axis=(1, 2))
P_pred = np.quantile(pred, q=qtile, axis=(1, 2))
return np.mean(P_pred - P_tar, axis=0)
# torch version for rmse comp
@torch.jit.script
def lat(j: torch.Tensor, num_lat: int) -> torch.Tensor: # pragma: no cover
"""Calculates the latitude in degrees."""
return 90.0 - j * 180.0 / float(num_lat - 1)
@torch.jit.script
def latitude_weighting_factor_torch(
j: torch.Tensor, num_lat: int, s: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the latitude weighting factor."""
return num_lat * torch.cos(3.1416 / 180.0 * lat(j, num_lat)) / s
@torch.jit.script
def weighted_rmse_torch_channels(
pred: torch.Tensor, target: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the latitude-weighted rmse for each channel"""
# takes in arrays of size [n, c, h, w] and returns latitude-weighted rmse for each chann
num_lat = pred.shape[2]
# num_long = target.shape[2]
lat_t = torch.arange(start=0, end=num_lat, device=pred.device)
s = torch.sum(torch.cos(3.1416 / 180.0 * lat(lat_t, num_lat)))
weight = torch.reshape(
latitude_weighting_factor_torch(lat_t, num_lat, s), (1, 1, -1, 1)
)
result = torch.sqrt(torch.mean(weight * (pred - target) ** 2.0, dim=(-1, -2)))
return result
@torch.jit.script
def weighted_rmse_torch(
pred: torch.Tensor, target: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the latitude-weighted rmse"""
result = weighted_rmse_torch_channels(pred, target)
return torch.mean(result, dim=0)
@torch.jit.script
def weighted_acc_masked_torch_channels(
pred: torch.Tensor, target: torch.Tensor, maskarray: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Takes in arrays of size [n, c, h, w] and returns latitude-weighted ACC"""
num_lat = pred.shape[2]
lat_t = torch.arange(start=0, end=num_lat, device=pred.device)
s = torch.sum(torch.cos(3.1416 / 180.0 * lat(lat_t, num_lat)))
weight = torch.reshape(
latitude_weighting_factor_torch(lat_t, num_lat, s), (1, 1, -1, 1)
)
result = torch.sum(maskarray * weight * pred * target, dim=(-1, -2)) / torch.sqrt(
torch.sum(maskarray * weight * pred * pred, dim=(-1, -2))
* torch.sum(maskarray * weight * target * target, dim=(-1, -2))
)
return result
@torch.jit.script
def weighted_acc_torch_channels(
pred: torch.Tensor, target: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Takes in arrays of size [n, c, h, w] and returns latitude-weighted ACC"""
num_lat = pred.shape[2]
# num_long = target.shape[2]
lat_t = torch.arange(start=0, end=num_lat, device=pred.device)
s = torch.sum(torch.cos(3.1416 / 180.0 * lat(lat_t, num_lat)))
weight = torch.reshape(
latitude_weighting_factor_torch(lat_t, num_lat, s), (1, 1, -1, 1)
)
result = torch.sum(weight * pred * target, dim=(-1, -2)) / torch.sqrt(
torch.sum(weight * pred * pred, dim=(-1, -2))
* torch.sum(weight * target * target, dim=(-1, -2))
)
return result
@torch.jit.script
def weighted_acc_torch(
pred: torch.Tensor, target: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the latitude-weighted ACC"""
result = weighted_acc_torch_channels(pred, target)
return torch.mean(result, dim=0)
@torch.jit.script
def unweighted_acc_torch_channels(
pred: torch.Tensor, target: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the ACC without weighting"""
result = torch.sum(pred * target, dim=(-1, -2)) / torch.sqrt(
torch.sum(pred * pred, dim=(-1, -2)) * torch.sum(target * target, dim=(-1, -2))
)
return result
@torch.jit.script
def unweighted_acc_torch(
pred: torch.Tensor, target: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the ACC without weighting"""
result = unweighted_acc_torch_channels(pred, target)
return torch.mean(result, dim=0)
@torch.jit.script
def top_quantiles_error_torch(
pred: torch.Tensor, target: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the top quantiles error"""
qs = 100
qlim = 3
qcut = 0.1
n, c, h, w = pred.size()
qtile = 1.0 - torch.logspace(-qlim, -qcut, steps=qs, device=pred.device)
P_tar = torch.quantile(target.view(n, c, h * w), q=qtile, dim=-1)
P_pred = torch.quantile(pred.view(n, c, h * w), q=qtile, dim=-1)
return torch.mean(P_pred - P_tar, dim=0)
|
modulus-main
|
modulus/utils/sfno/metrics/weighted_acc_rmse_inf.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from modulus.utils.sfno import logging_utils
logging_utils.config_logger()
import torch
import torch.nn as nn
# distributed stuff
from modulus.utils.sfno.distributed import comm
from modulus.utils.sfno.distributed.mappings import reduce_from_parallel_region
def mean(x, axis=None): # pragma: no cover
"""Calculates the spatial mean."""
y = np.sum(x, axis) / np.size(x, axis)
return y
def lat_np(j, num_lat): # pragma: no cover
"""Calculates the latitude in degrees."""
return 90 - j * 180 / (num_lat - 1)
def weighted_acc(pred, target, weighted=True): # pragma: no cover
"""takes in arrays of size [1, h, w] and returns latitude-weighted correlation"""
if len(pred.shape) == 2:
pred = np.expand_dims(pred, 0)
if len(target.shape) == 2:
target = np.expand_dims(target, 0)
num_lat = np.shape(pred)[1]
num_long = np.shape(target)[2]
pred -= mean(pred)
target -= mean(target)
s = np.sum(np.cos(np.pi / 180 * lat_np(np.arange(0, num_lat), num_lat)))
weight = (
np.expand_dims(latitude_weighting_factor(np.arange(0, num_lat), num_lat, s), -1)
if weighted
else 1
)
r = (weight * pred * target).sum() / np.sqrt(
(weight * pred * pred).sum() * (weight * target * target).sum()
)
return r
def weighted_rmse(pred, target): # pragma: no cover
"""takes in arrays of size [1, h, w] and returns latitude-weighted rmse"""
if len(pred.shape) == 2:
pred = np.expand_dims(pred, 0)
if len(target.shape) == 2:
target = np.expand_dims(target, 0)
num_lat = np.shape(pred)[1]
num_long = np.shape(target)[2]
s = np.sum(np.cos(np.pi / 180 * lat_np(np.arange(0, num_lat), num_lat)))
weight = np.expand_dims(
latitude_weighting_factor(np.arange(0, num_lat), num_lat, s), -1
)
return np.sqrt(
1
/ num_lat
* 1
/ num_long
* np.sum(np.dot(weight.T, (pred[0] - target[0]) ** 2))
)
def latitude_weighting_factor(j, num_lat, s): # pragma: no cover
"""Calculates the latitude weighting factor."""
return np.clip(num_lat * np.cos(np.pi / 180.0 * lat_np(j, num_lat)) / s, a_min=0.0)
# torch version for rmse comp
@torch.jit.script
def l1_torch_local(
pred: torch.Tensor, target: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the L1 loss between prediction and target in a local setting."""
return nn.functional.l1_loss(pred, target)
def l1_torch_distributed(
pred: torch.Tensor, target: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the L1 loss between prediction and target in a distributed setting."""
res = nn.functional.l1_loss(pred, target)
res = reduce_from_parallel_region(res, "matmul") / float(comm.get_size("matmul"))
return res
@torch.jit.script
def lat_torch(j: torch.Tensor, num_lat: int) -> torch.Tensor: # pragma: no cover
"""Calculates the latitude in degrees."""
return 90.0 - j * 180.0 / float(num_lat - 1)
@torch.jit.script
def latitude_weighting_factor_torch(
lat: torch.Tensor, num_lat: int, norm: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the latitude weighting factor."""
return torch.clamp(num_lat * torch.cos(torch.deg2rad(lat)) / norm, min=0.0)
@torch.jit.script
def weighted_rmse_torch_kernel(
pred: torch.Tensor, target: torch.Tensor, weight: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the weighted rmse between prediction and target."""
result = torch.mean(weight * torch.square(pred - target), dim=(-1, -2))
return result
@torch.jit.script
def weighted_rmse_torch_local(
pred: torch.Tensor, target: torch.Tensor, weight: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the weighted rmse between prediction and target in a local setting."""
# compute the rmse
res = weighted_rmse_torch_kernel(pred, target, weight)
# average over batches
res = torch.mean(torch.sqrt(res), dim=0)
return res
def weighted_rmse_torch_distributed(
pred: torch.Tensor, target: torch.Tensor, weight: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the weighted rmse between prediction and target in a distributed
setting."""
# compute the local rmse
res = weighted_rmse_torch_kernel(pred, target, weight)
# perform model parallel mean:
res = reduce_from_parallel_region(res, "matmul") / float(comm.get_size("matmul"))
# average over batches
res = torch.mean(torch.sqrt(res), dim=0)
return res
# FIXME: needs to be adopted like above
@torch.jit.script
def weighted_acc_torch_kernel(
pred: torch.Tensor, target: torch.Tensor, weight: torch.Tensor
): # pragma: no cover
"""takes in arrays of size [n, c, h, w] and returns latitude-weighted acc"""
cov = torch.sum(weight * pred * target, dim=(-1, -2))
var1 = torch.sum(weight * torch.square(pred), dim=(-1, -2))
var2 = torch.sum(weight * torch.square(target), dim=(-1, -2))
return cov, var1, var2
@torch.jit.script
def weighted_acc_torch_local(
pred: torch.Tensor, target: torch.Tensor, weight: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the weighted acc between prediction and target in a local
setting."""
eps = 1e-6
cov, var1, var2 = weighted_acc_torch_kernel(pred, target, weight)
res = cov / (torch.sqrt(var1 * var2) + eps)
# average over batches
res = torch.mean(res, dim=0)
return res
def weighted_acc_torch_local_no_reduction(
pred: torch.Tensor, target: torch.Tensor, weight: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the weighted acc between prediction and target in a local
setting without averaging."""
eps = 1e-6
cov, var1, var2 = weighted_acc_torch_kernel(pred, target, weight)
res = cov / (torch.sqrt(var1 * var2) + eps)
return res
def weighted_acc_torch_distributed(
pred: torch.Tensor, target: torch.Tensor, weight: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Calculates the weighted acc between prediction and target in a distributed
setting."""
eps = 1e-6
cov, var1, var2 = weighted_acc_torch_kernel(pred, target, weight)
# reductions:
cov = reduce_from_parallel_region(cov, "matmul")
var1 = reduce_from_parallel_region(var1, "matmul")
var2 = reduce_from_parallel_region(var2, "matmul")
# compute ratio
res = cov / (torch.sqrt(var1 * var2) + eps)
# average over batches
res = torch.mean(res, dim=0)
return res
class SimpsonQuadrature(nn.Module):
"""Implements the Simpson's rule for numerical integration."""
def __init__(self, num_intervals, interval_width, device): # pragma: no cover
super(SimpsonQuadrature, self).__init__()
# set up integration weights
weights = [0.0 for _ in range(num_intervals + 1)]
if num_intervals % 2 == 0:
# Simpsons 1/3
for j in range(1, (num_intervals // 2 + 1)):
weights[2 * j - 2] += 1.0
weights[2 * j - 1] += 4.0
weights[2 * j] += 1.0
self.weights = torch.tensor(weights, dtype=torch.float32, device=device)
self.weights *= interval_width / 3.0
else:
raise NotImplementedError(
"Error, please specify an even number of intervals"
)
def forward(self, x, dim=1): # pragma: no cover
# reshape weights to handle channels
shape = [1 for _ in range(x.dim())]
shape[dim] = -1
weights = torch.reshape(self.weights, shape)
return torch.sum(x * weights, dim=dim)
class TrapezoidQuadrature(nn.Module):
"""Implements the trapezoidal rule for numerical integration."""
def __init__(self, num_intervals, interval_width, device): # pragma: no cover
super(TrapezoidQuadrature, self).__init__()
# set up integration weights
weights = [interval_width for _ in range(num_intervals + 1)]
weights[0] *= 0.5
weights[-1] *= 0.5
self.weights = torch.tensor(weights, dtype=torch.float32, device=device)
def forward(self, x, dim=1): # pragma: no cover
# reshape weights to handle channels
shape = [1 for _ in range(x.dim())]
shape[dim] = -1
weights = torch.reshape(self.weights, shape)
return torch.sum(x * weights, dim=dim)
class Quadrature(nn.Module):
"""Implements the numerical integration using either Simpson's or Trapezoid rule."""
def __init__(self, num_intervals, interval_width, device): # pragma: no cover
super(Quadrature, self).__init__()
if num_intervals % 2 == 0:
self.quad = SimpsonQuadrature(num_intervals, interval_width, device)
else:
self.quad = TrapezoidQuadrature(num_intervals, interval_width, device)
def forward(self, x, dim=1): # pragma: no cover
return self.quad(x, dim)
|
modulus-main
|
modulus/utils/sfno/metrics/weighted_acc_rmse.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from modulus.utils.sfno.logging_utils import disable_logging
import math
import torch
import torch.distributed as dist
import datetime as dt
from typing import Union
import numpy as np
# dummy placeholders
_COMM_LIST = []
_COMM_NAMES = {}
# world comm
def get_size(comm_id: Union[str, int]) -> int: # pragma: no cover
"""Returns the size of a specified communicator."""
if isinstance(comm_id, int):
cid = comm_id
else:
cid = _COMM_NAMES[comm_id] if (comm_id in _COMM_NAMES) else len(_COMM_LIST)
if not dist.is_initialized() or (cid >= len(_COMM_LIST)):
return 1
else:
return dist.get_world_size(group=_COMM_LIST[cid])
def get_rank(comm_id: Union[str, int]) -> int: # pragma: no cover
"""Returns the rank of a specified communicator."""
if isinstance(comm_id, int):
cid = comm_id
else:
cid = _COMM_NAMES[comm_id] if (comm_id in _COMM_NAMES) else len(_COMM_LIST)
if not dist.is_initialized() or (cid >= len(_COMM_LIST)):
return 0
else:
return dist.get_rank(group=_COMM_LIST[cid])
def get_group(comm_id: Union[str, int]) -> int: # pragma: no cover
"""Returns the group of a specified communicator."""
if isinstance(comm_id, int):
cid = comm_id
else:
cid = _COMM_NAMES[comm_id] if (comm_id in _COMM_NAMES) else len(_COMM_LIST)
if not dist.is_initialized() or (cid >= len(_COMM_LIST)):
raise IndexError(f"Error, comm with id {comm_id} not available.")
else:
return _COMM_LIST[cid]
# specialized routines for world comms
def get_world_size(): # pragma: no cover
"""Returns the world size"""
if not dist.is_initialized():
return 1
else:
return dist.get_world_size()
def get_world_rank(): # pragma: no cover
"""Returns the world rank"""
if not dist.is_initialized():
return 0
else:
return dist.get_rank()
def get_local_rank(): # pragma: no cover
"""Returns the local rank of the current process."""
if os.getenv("LOCAL_RANK") is not None and False:
# Use PyTorch env var if available
return int(os.getenv("LOCAL_RANK"))
if not dist.is_initialized():
return 0
else:
num_gpu = int(os.getenv("NGPU_PER_NODE", torch.cuda.device_count()))
return get_world_rank() % num_gpu
def get_names(): # pragma: no cover
"""Returns the names of all available communicators."""
return _COMM_NAMES
def is_distributed(name: str): # pragma: no cover
"""check if distributed."""
return name in _COMM_NAMES
# get
def init(params, verbose=False): # pragma: no cover
"""Initialize distributed training."""
# set up global and local communicator
if params.wireup_info == "env":
world_size = int(os.getenv("WORLD_SIZE", 1))
world_rank = int(os.getenv("RANK", 0))
if os.getenv("WORLD_RANK") is not None:
# Use WORLD_RANK if available for backwards compatibility
world_rank = int(os.getenv("WORLD_RANK"))
port = int(os.getenv("MASTER_PORT", 0))
master_address = os.getenv("MASTER_ADDR")
if os.getenv("MASTER_ADDRESS") is not None:
# Use MASTER_ADDRESS if available for backwards compatibility
master_address = int(os.getenv("MASTER_ADDRESS"))
elif params.wireup_info == "mpi":
import socket
try:
from mpi4py import MPI
except ImportError:
Warning(
'mpi4py is not installed. Please install it using pip install "mip4py>=3.1.4"'
)
mpi_comm = MPI.COMM_WORLD.Dup()
world_size = mpi_comm.Get_size()
world_rank = mpi_comm.Get_rank()
my_host = socket.gethostname()
port = 29500
master_address = None
if world_rank == 0:
master_address_info = socket.getaddrinfo(
my_host, port, family=socket.AF_INET, proto=socket.IPPROTO_TCP
)
master_address = master_address_info[0][-1][0]
master_address = mpi_comm.bcast(master_address, root=0)
os.environ["MASTER_ADDRESS"] = master_address
os.environ["MASTER_PORT"] = str(port)
else:
raise ValueError(f"Error, wireup-info {params.wireup_info} not supported")
# set local rank to 0 if env var not available
local_rank = int(os.getenv("LOCAL_RANK", 0))
if world_size > 1:
with disable_logging():
if params.wireup_store == "file":
wireup_file_path = os.getenv("WIREUP_FILE_PATH")
wireup_store = dist.FileStore(wireup_file_path, world_size)
elif params.wireup_store == "tcp":
# create tcp store
wireup_store = dist.TCPStore(
host_name=master_address,
port=port,
world_size=world_size,
is_master=(world_rank == 0),
timeout=dt.timedelta(seconds=900),
)
else:
wireup_store = None
# initialize process groups
dist.init_process_group(
backend="nccl",
rank=world_rank,
world_size=world_size,
store=wireup_store,
)
# get sizes
world_size = get_world_size()
world_rank = get_world_rank()
local_rank = get_local_rank()
# do individual wireup for model parallel comms:
if hasattr(params, "model_parallel_sizes"):
model_parallel_sizes = params.model_parallel_sizes
else:
model_parallel_sizes = [1]
if hasattr(params, "model_parallel_names"):
model_parallel_names = params.model_parallel_names
else:
model_parallel_names = ["model"]
assert len(model_parallel_names) == len(
model_parallel_sizes
), "Please specify names for your communicators"
model_parallel_size = math.prod(model_parallel_sizes)
params["model_parallel_size"] = model_parallel_size
assert (
world_size % model_parallel_size == 0
), "Error, please make sure that the product of model parallel ranks evenly divides the total number of ranks"
# we set this to be orthogonal to the MP groups
# we can play tricks with the ddp_group later, in case if all the weights are shared
data_parallel_size = world_size // model_parallel_size
# create orthogonal communicators first
global _COMM_LIST
global _COMM_NAMES
if params.log_to_screen:
logging.info("Starting Wireup")
if world_size > 1:
# set up the strides:
model_parallel_sizes_reversed = model_parallel_sizes[::-1]
model_grid = np.reshape(
np.arange(0, model_parallel_size), model_parallel_sizes[::-1]
)
perm = np.roll(np.arange(0, len(model_parallel_sizes)), 1).tolist()
ranks_lookup = {}
comm_count = 0
for mpname in model_parallel_names:
base_group = np.reshape(model_grid, (-1, model_grid.shape[-1]))
model_groups = []
for goffset in range(0, world_size, model_parallel_size):
model_groups += sorted((goffset + base_group).tolist())
if verbose and world_rank == 0:
print(f"Creating comm groups for id {mpname}: {model_groups}")
for grp in model_groups:
if len(grp) > 1:
tmp_group = dist.new_group(ranks=grp)
if world_rank in grp:
_COMM_LIST.append(tmp_group)
_COMM_NAMES[mpname] = comm_count
comm_count += 1
ranks_lookup[mpname] = model_groups
# go for the next step
model_grid = np.transpose(model_grid, perm)
def merge_comms(comm_count, ranks_lookup, comm_name_1, comm_name_2, merge_name):
"""helper routine for creating meta comms"""
if (get_size(comm_name_1) == 1) and (get_size(comm_name_2) > 1):
if verbose and world_rank == 0:
print(
f"Creating comm groups for id {merge_name}: {ranks_lookup[comm_name_2]}"
)
_COMM_LIST.append(get_group(comm_name_2))
_COMM_NAMES[merge_name] = comm_count
comm_count += 1
elif (get_size(comm_name_1) > 1) and (get_size(comm_name_2) == 1):
if verbose and world_rank == 0:
print(
f"Creating comm groups for id {merge_name}: {ranks_lookup[comm_name_1]}"
)
_COMM_LIST.append(get_group(comm_name_1))
_COMM_NAMES[merge_name] = comm_count
comm_count += 1
elif (get_size(comm_name_1) > 1) and (get_size(comm_name_2) > 1):
def merge_ranks(list1, list2):
"""helper routine for fusing lists"""
coll = list1 + list2
pooled = [set(subList) for subList in coll]
merging = True
while merging:
merging = False
for i, group in enumerate(pooled):
merged = next(
(g for g in pooled[i + 1 :] if g.intersection(group)),
None,
)
if not merged:
continue
group.update(merged)
pooled.remove(merged)
merging = True
return [list(x) for x in pooled]
model_groups = merge_ranks(
ranks_lookup[comm_name_1], ranks_lookup[comm_name_2]
)
if verbose and world_rank == 0:
print(f"Creating comm groups for id {merge_name}: {model_groups}")
for grp in model_groups:
tmp_group = dist.new_group(ranks=grp)
if world_rank in grp:
_COMM_LIST.append(tmp_group)
_COMM_NAMES[merge_name] = comm_count
comm_count += 1
return comm_count
# merge spatial
comm_count = merge_comms(comm_count, ranks_lookup, "h", "w", "spatial")
# merge matmul
comm_count = merge_comms(comm_count, ranks_lookup, "fin", "fout", "matmul")
# now the data and model comm:
model_groups = np.reshape(
np.arange(0, world_size), (-1, model_parallel_size)
).tolist()
for grp in model_groups:
if len(grp) > 1:
tmp_group = dist.new_group(ranks=grp)
if world_rank in grp:
_COMM_LIST.append(tmp_group)
_COMM_NAMES["model"] = comm_count
comm_count += 1
if data_parallel_size == world_size:
if verbose and world_rank == 0:
print(
f"Creating comm groups for id data: {[list(range(0, world_size))]}"
)
_COMM_LIST.append(None)
_COMM_NAMES["data"] = comm_count
else:
data_groups = [sorted(list(i)) for i in zip(*model_groups)]
if verbose and world_rank == 0:
print(f"Creating comm groups for id data: {data_groups}")
for grp in data_groups:
tmp_group = dist.new_group(ranks=grp)
if world_rank in grp:
_COMM_LIST.append(tmp_group)
_COMM_NAMES["data"] = comm_count
if params.log_to_screen:
logging.info("Finished Wireup")
return
|
modulus-main
|
modulus/utils/sfno/distributed/comm.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/utils/sfno/distributed/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from modulus.utils.sfno.distributed import comm
# matmul parallel
from modulus.utils.sfno.distributed.mappings import copy_to_parallel_region
from modulus.utils.sfno.distributed.mappings import reduce_from_parallel_region
from modulus.utils.sfno.distributed.mappings import scatter_to_parallel_region
from modulus.utils.sfno.distributed.mappings import gather_from_parallel_region
from modulus.utils.sfno.distributed.helpers import _transpose
from modulus.models.sfno.initialization import trunc_normal_
class distributed_transpose_w(torch.autograd.Function): # pragma: no cover
"""Distributed transpose"""
@staticmethod
def forward(ctx, x, dim): # pragma: no cover
xlist, _ = _transpose(x, dim[0], dim[1], group=comm.get_group("w"))
x = torch.cat(xlist, dim=dim[1])
ctx.dim = dim
return x
@staticmethod
def backward(ctx, go): # pragma: no cover
dim = ctx.dim
gilist, _ = _transpose(go, dim[1], dim[0], group=comm.get_group("w"))
gi = torch.cat(gilist, dim=dim[0])
return gi, None
class distributed_transpose_h(torch.autograd.Function):
"""Distributed transpose"""
@staticmethod
def forward(ctx, x, dim): # pragma: no cover
xlist, _ = _transpose(x, dim[0], dim[1], group=comm.get_group("h"))
x = torch.cat(xlist, dim=dim[1])
ctx.dim = dim
return x
@staticmethod
def backward(ctx, go): # pragma: no cover
dim = ctx.dim
gilist, _ = _transpose(go, dim[1], dim[0], group=comm.get_group("h"))
gi = torch.cat(gilist, dim=dim[0])
return gi, None
class DistributedRealFFT2(nn.Module):
"""
Helper routine to wrap FFT similarly to the SHT
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None): # pragma: no cover
super(DistributedRealFFT2, self).__init__()
# get the comms grid:
self.comm_size_h = comm.get_size("h")
self.comm_size_w = comm.get_size("w")
self.comm_rank_w = comm.get_rank("w")
# dimensions
self.nlat = nlat
self.nlon = nlon
self.lmax = lmax or self.nlat
self.mmax = mmax or self.nlon // 2 + 1
# frequency paddings
ldist = (self.lmax + self.comm_size_h - 1) // self.comm_size_h
self.lpad = ldist * self.comm_size_h - self.lmax
mdist = (self.mmax + self.comm_size_w - 1) // self.comm_size_w
self.mpad = mdist * self.comm_size_w - self.mmax
def forward(self, x: torch.Tensor) -> torch.Tensor: # pragma: no cover
# we need to ensure that we can split the channels evenly
assert x.shape[1] % self.comm_size_h == 0
assert x.shape[1] % self.comm_size_w == 0
# h and w is split. First we make w local by transposing into channel dim
if self.comm_size_w > 1:
xt = distributed_transpose_w.apply(x, (1, -1))
else:
xt = x
# do first FFT
xtf = torch.fft.rfft(xt, n=self.nlon, dim=-1, norm="ortho")
# truncate
xtft = xtf[..., : self.mmax]
# pad the dim to allow for splitting
xtfp = F.pad(xtft, [0, self.mpad], mode="constant")
# transpose: after this, m is split and c is local
if self.comm_size_w > 1:
y = distributed_transpose_w.apply(xtfp, (-1, 1))
else:
y = xtfp
# transpose: after this, c is split and h is local
if self.comm_size_h > 1:
yt = distributed_transpose_h.apply(y, (1, -2))
else:
yt = y
# the input data might be padded, make sure to truncate to nlat:
# ytt = yt[..., :self.nlat, :]
# do second FFT:
yo = torch.fft.fft(yt, n=self.nlat, dim=-2, norm="ortho")
# pad if required, truncation is implicit
yop = F.pad(yo, [0, 0, 0, self.lpad], mode="constant")
# transpose: after this, l is split and c is local
if self.comm_size_h > 1:
y = distributed_transpose_h.apply(yop, (-2, 1))
else:
y = yop
return y
class DistributedInverseRealFFT2(nn.Module):
"""
Helper routine to wrap FFT similarly to the SHT
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None): # pragma: no cover
super(DistributedInverseRealFFT2, self).__init__()
# get the comms grid:
self.comm_size_h = comm.get_size("h")
self.comm_size_w = comm.get_size("w")
self.comm_rank_w = comm.get_rank("w")
# dimensions
self.nlat = nlat
self.nlon = nlon
self.lmax = lmax or self.nlat
self.mmax = mmax or self.nlon // 2 + 1
# spatial paddings
latdist = (self.nlat + self.comm_size_h - 1) // self.comm_size_h
self.latpad = latdist * self.comm_size_h - self.nlat
londist = (self.nlon + self.comm_size_w - 1) // self.comm_size_w
self.lonpad = londist * self.comm_size_w - self.nlon
# frequency paddings
ldist = (self.lmax + self.comm_size_h - 1) // self.comm_size_h
self.lpad = ldist * self.comm_size_h - self.lmax
mdist = (self.mmax + self.comm_size_w - 1) // self.comm_size_w
self.mpad = mdist * self.comm_size_w - self.mmax
def forward(self, x: torch.Tensor) -> torch.Tensor: # pragma: no cover
# we need to ensure that we can split the channels evenly
assert x.shape[1] % self.comm_size_h == 0
assert x.shape[1] % self.comm_size_w == 0
# transpose: after that, channels are split, l is local:
if self.comm_size_h > 1:
xt = distributed_transpose_h.apply(x, (1, -2))
else:
xt = x
# truncate
xtt = xt[..., : self.lmax, :]
# do first fft
xf = torch.fft.ifft(xtt, n=self.nlat, dim=-2, norm="ortho")
# transpose: after this, l is split and channels are local
xfp = F.pad(xf, [0, 0, 0, self.latpad])
if self.comm_size_h > 1:
y = distributed_transpose_h.apply(xfp, (-2, 1))
else:
y = xfp
# transpose: after this, channels are split and m is local
if self.comm_size_w > 1:
yt = distributed_transpose_w.apply(y, (1, -1))
else:
yt = y
# truncate
ytt = yt[..., : self.mmax]
# apply the inverse (real) FFT
x = torch.fft.irfft(ytt, n=self.nlon, dim=-1, norm="ortho")
# pad before we transpose back
xp = F.pad(x, [0, self.lonpad])
# transpose: after this, m is split and channels are local
if self.comm_size_w > 1:
out = distributed_transpose_w.apply(xp, (-1, 1))
else:
out = xp
return out
class _DistMatmulHelper(torch.autograd.Function):
"""Distributed matrix multiply helper"""
@staticmethod
def forward(
ctx, X, weight, bias, inp_group_name, out_group_name
): # pragma: no cover
# store some variables
ctx.save_for_backward(X, weight, bias)
ctx.out_group_name = out_group_name
# matrix multiplication
xconv = F.conv2d(X, weight, bias=None)
# reduce
if comm.get_size(inp_group_name) > 1:
dist.all_reduce(xconv, group=comm.get_group(inp_group_name))
# add bias
if bias is not None:
xconvbias = xconv + bias
else:
xconvbias = xconv
return xconvbias
@staticmethod
def backward(ctx, grad_out): # pragma: no cover
X, weight, bias = ctx.saved_tensors
gname = ctx.out_group_name
# do the bwd pass on dgrad
grad_input = F.conv_transpose2d(grad_out, weight, bias=None)
# reduce across nodes
if comm.get_size(gname) > 1:
dgrad_handle = dist.all_reduce(
grad_input, group=comm.get_group(gname), async_op=True
)
# weight grad
grad_weight = F.conv2d(
X.transpose(0, 1), grad_out.transpose(0, 1), bias=None
).transpose(0, 1)
if bias is not None:
grad_bias = torch.sum(grad_out, dim=(0, 2, 3), keepdim=True)
else:
grad_bias = None
if comm.get_size(gname) > 1:
dgrad_handle.wait()
return grad_input, grad_weight, grad_bias, None, None
class DistributedMatmul(nn.Module):
"""Distributed Matrix Multiply"""
def __init__(
self,
inp_dim,
out_dim,
kernel_size=1,
comm_inp_name="fin",
comm_out_name="fout",
bias=True,
): # pragma: no cover
super(DistributedMatmul, self).__init__()
# get sizes
self.comm_inp_name = comm_inp_name
self.comm_out_name = comm_out_name
comm_inp_size = comm.get_size(self.comm_inp_name)
comm_out_size = comm.get_size(self.comm_out_name)
# split:
assert (
kernel_size == 1
), "Error, only pointwise operations are currently supported"
assert (
inp_dim % comm_inp_size == 0
), f"Error, the size of input feature dim ({inp_dim}) has to be evenly divisible by the input feature comm dim ({comm_inp_size})"
assert (
out_dim % comm_out_size == 0
), f"Error, the size of output feature dim ({out_dim}) has to be evenly divisible by the output feature comm dim ({comm_out_size})"
# compute reduced dims
inp_dim_local = inp_dim // comm_inp_size
out_dim_local = out_dim // comm_out_size
# parameters
self.weight = nn.Parameter(
torch.ones(out_dim_local, inp_dim_local, kernel_size, kernel_size)
)
self.weight.is_shared_mp = ["spatial"]
self.weight.sharded_dims_mp = [
self.comm_out_name,
self.comm_inp_name,
None,
None,
]
if bias:
self.bias = nn.Parameter(torch.ones(1, out_dim_local, 1, 1))
self.bias.is_shared_mp = ["spatial"]
self.bias.sharded_dims_mp = [None, self.comm_out_name, None, None]
# init weights
self._init_weights()
def _init_weights(self): # pragma: no cover
trunc_normal_(self.weight, std=0.02)
if hasattr(self, "bias"):
nn.init.constant_(self.bias, 0.0)
def forward(self, x): # pragma: no cover
x_cp = copy_to_parallel_region(x, self.comm_out_name)
x_loc = F.conv2d(x_cp, self.weight, bias=None)
x_out = reduce_from_parallel_region(x_loc, self.comm_inp_name)
if hasattr(self, "bias"):
x_out = x_out + self.bias
return x_out
# distributed encoder/decoder
class DistributedEncoderDecoder(nn.Module):
"""Distributed Encoder/Decoder"""
def __init__(
self,
num_layers,
input_dim,
output_dim,
hidden_dim,
act,
comm_inp_name="fin",
comm_out_name="fout",
): # pragma: no cover
super(DistributedEncoderDecoder, self).__init__()
# get comms
comm_inp_size = comm.get_size(comm_inp_name)
comm_out_size = comm.get_size(comm_out_name)
# get list of modules
encoder_modules = []
current_dim = input_dim
comm_inp_name_tmp = comm_inp_name
comm_out_name_tmp = comm_out_name
for i in range(num_layers - 1):
encoder_modules.append(
DistributedMatmul(
current_dim,
hidden_dim,
1,
comm_inp_name=comm_inp_name_tmp,
comm_out_name=comm_out_name_tmp,
bias=True,
)
)
encoder_modules.append(act())
current_dim = hidden_dim
comm_inp_name_tmp, comm_out_name_tmp = (
comm_out_name_tmp,
comm_inp_name_tmp,
)
# final layer
encoder_modules.append(
DistributedMatmul(
current_dim,
output_dim,
1,
comm_inp_name=comm_inp_name_tmp,
comm_out_name=comm_out_name_tmp,
bias=False,
)
)
# create fwd sequence
self.fwd = nn.Sequential(*encoder_modules)
# store the comm names for in and out so that they can be queried
self.comm_inp_name = comm_inp_name
self.comm_out_name = comm_out_name_tmp
def forward(self, x): # pragma: no cover
return self.fwd(x)
# more complicated layers
class DistributedMLP(nn.Module):
"""Distributed MLP layer"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
output_bias=True,
comm_inp_name="fin",
comm_hidden_name="fout",
act_layer=nn.GELU,
drop_rate=0.0,
checkpointing=False,
): # pragma: no cover
super(DistributedMLP, self).__init__()
self.checkpointing = checkpointing
out_features = out_features or in_features
hidden_features = hidden_features or in_features
# get effective embedding size:
comm_inp_size = comm.get_size(comm_inp_name)
comm_hid_size = comm.get_size(comm_hidden_name)
self.fc1 = DistributedMatmul(
in_features,
hidden_features,
1,
comm_inp_name=comm_inp_name,
comm_out_name=comm_hidden_name,
bias=True,
)
self.fc2 = DistributedMatmul(
hidden_features,
out_features,
1,
comm_inp_name=comm_hidden_name,
comm_out_name=comm_inp_name,
bias=output_bias,
)
self.act = act_layer()
self.drop = nn.Dropout(drop) if drop_rate > 0.0 else nn.Identity()
def fwd(self, x): # pragma: no cover
# do the mlp
# first layer
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
# second layer
x = self.fc2(x)
x = self.drop(x)
return x
@torch.jit.ignore
def _checkpoint_forward(self, x): # pragma: no cover
return checkpoint(self.fwd, x)
def forward(self, x): # pragma: no cover
if self.checkpointing:
return self._checkpoint_forward(x)
else:
return self.fwd(x)
class DistributedPatchEmbed(nn.Module):
"""Distributed patch embedding layer"""
def __init__(
self,
img_size=(224, 224),
patch_size=(16, 16),
in_chans=3,
embed_dim=768,
input_is_matmul_parallel=False,
output_is_matmul_parallel=True,
): # pragma: no cover
super(DistributedPatchEmbed, self).__init__()
# store params
self.input_parallel = input_is_matmul_parallel
self.output_parallel = output_is_matmul_parallel
# get comm sizes:
matmul_comm_size = comm.get_size("matmul")
spatial_comm_size = comm.get_size("spatial")
# compute parameters
assert (
img_size[1] // patch_size[1]
) % spatial_comm_size == 0, (
"Error, make sure that the spatial comm size evenly divides patched W"
)
num_patches = ((img_size[1] // patch_size[1]) // spatial_comm_size) * (
img_size[0] // patch_size[0]
)
self.img_size = (img_size[0], img_size[1] // spatial_comm_size)
self.patch_size = patch_size
self.num_patches = num_patches
# get effective embedding size:
if self.output_parallel:
assert (
embed_dim % matmul_comm_size == 0
), "Error, the embed_dim needs to be divisible by matmul_parallel_size"
out_chans_local = embed_dim // matmul_comm_size
else:
out_chans_local = embed_dim
# the weights of this layer is shared across spatial parallel ranks
self.proj = nn.Conv2d(
in_chans, out_chans_local, kernel_size=patch_size, stride=patch_size
)
# make sure we reduce them across rank
self.proj.weight.is_shared_mp = ["spatial"]
self.proj.bias.is_shared_mp = ["spatial"]
def forward(self, x): # pragma: no cover
if self.input_parallel:
x = gather_from_parallel_region(x, 1, "matmul")
if self.output_parallel:
x = copy_to_parallel_region(x, "matmul")
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
# new: B, C, H*W
x = self.proj(x).flatten(2)
return x
@torch.jit.script
def compl_mul_add_fwd(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""complex multiplication and addition"""
tmp = torch.einsum("bkixys,kiot->stbkoxy", a, b)
res = (
torch.stack(
[tmp[0, 0, ...] - tmp[1, 1, ...], tmp[1, 0, ...] + tmp[0, 1, ...]], dim=-1
)
+ c
)
return res
@torch.jit.script
def compl_mul_add_fwd_c(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""Performs a complex multiplication and addition operation on three tensors"""
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
cc = torch.view_as_complex(c)
tmp = torch.einsum("bkixy,kio->bkoxy", ac, bc)
res = tmp + cc
return torch.view_as_real(res)
class DistributedAFNO2Dv2(nn.Module):
"""Distributed AFNO"""
def __init__(
self,
hidden_size,
num_blocks=8,
sparsity_threshold=0.01,
hard_thresholding_fraction=1,
hidden_size_factor=1,
input_is_matmul_parallel=False,
output_is_matmul_parallel=False,
use_complex_kernels=False,
): # pragma: no cover
"""Distributed AFNO2Dv2"""
super(DistributedAFNO2Dv2, self).__init__()
assert (
hidden_size % num_blocks == 0
), f"hidden_size {hidden_size} should be divisble by num_blocks {num_blocks}"
# get comm sizes:
matmul_comm_size = comm.get_size("matmul")
self.spatial_comm_size = comm.get_size("spatial")
# select fft function handles
if self.spatial_comm_size > 1:
self.fft_handle = distributed_rfft2.apply
self.ifft_handle = distributed_irfft2.apply
else:
self.fft_handle = torch.fft.rfft2
self.ifft_handle = torch.fft.irfft2
self.hidden_size = hidden_size
self.sparsity_threshold = sparsity_threshold
self.num_blocks = num_blocks
assert (
self.num_blocks % matmul_comm_size == 0
), "Error, num_blocks needs to be divisible by matmul_parallel_size"
self.num_blocks_local = self.num_blocks // matmul_comm_size
self.block_size = self.hidden_size // self.num_blocks
self.hard_thresholding_fraction = hard_thresholding_fraction
self.hidden_size_factor = hidden_size_factor
self.scale = 0.02
self.mult_handle = (
compl_mul_add_fwd_c if use_complex_kernels else compl_mul_add_fwd
)
# model paralellism
self.input_is_matmul_parallel = input_is_matmul_parallel
self.output_is_matmul_parallel = output_is_matmul_parallel
# new
# these weights need to be synced across all spatial ranks!
self.w1 = nn.Parameter(
self.scale
* torch.randn(
self.num_blocks_local,
self.block_size,
self.block_size * self.hidden_size_factor,
2,
)
)
self.b1 = nn.Parameter(
self.scale
* torch.randn(
self.num_blocks_local,
self.block_size * self.hidden_size_factor,
1,
1,
2,
)
)
self.w2 = nn.Parameter(
self.scale
* torch.randn(
self.num_blocks_local,
self.block_size * self.hidden_size_factor,
self.block_size,
2,
)
)
self.b2 = nn.Parameter(
self.scale * torch.randn(self.num_blocks_local, self.block_size, 1, 1, 2)
)
# make sure we reduce them across rank
self.w1.is_shared_mp = ["spatial"]
self.b1.is_shared_mp = ["spatial"]
self.w2.is_shared_mp = ["spatial"]
self.b2.is_shared_mp = ["spatial"]
def forward(self, x): # pragma: no cover
if not self.input_is_matmul_parallel:
# distribute data
x = scatter_to_parallel_region(x, 1, "matmul")
# bias
bias = x
dtype = x.dtype
x = x.float()
B, C, H, W_local = x.shape
total_modes = H // 2 + 1
kept_modes = int(total_modes * self.hard_thresholding_fraction)
H_local = H // self.spatial_comm_size
W = W_local * self.spatial_comm_size
x = self.fft_handle(x, (H, W), (-2, -1), "ortho")
x = x.view(B, self.num_blocks_local, self.block_size, H_local, W // 2 + 1)
# new
x = torch.view_as_real(x)
o2 = torch.zeros(x.shape, device=x.device)
o1 = F.relu(
self.mult_handle(
x[
:,
:,
:,
total_modes - kept_modes : total_modes + kept_modes,
:kept_modes,
:,
],
self.w1,
self.b1,
)
)
o2[
:, :, :, total_modes - kept_modes : total_modes + kept_modes, :kept_modes, :
] = self.mult_handle(o1, self.w2, self.b2)
# finalize
x = F.softshrink(o2, lambd=self.sparsity_threshold)
x = torch.view_as_complex(x)
x = x.reshape(B, C, H_local, W // 2 + 1)
x = self.ifft_handle(x, (H, W), (-2, -1), "ortho")
x = x.type(dtype) + bias
# gather
if not self.output_is_matmul_parallel:
x = gather_from_parallel_region(x, 1, "matmul")
return x
|
modulus-main
|
modulus/utils/sfno/distributed/layers.py
|
# ignore_header_test
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
import torch.distributed as dist
from modulus.utils.sfno.distributed import comm
from torch._utils import _flatten_dense_tensors
def get_memory_format(tensor): # pragma: no cover
"""Helper routine to get the memory format"""
if tensor.is_contiguous(memory_format=torch.channels_last):
return torch.channels_last
else:
return torch.contiguous_format
def gather_uneven(tensor, dim, comm_name): # pragma: no cover
"""Helper routine to gather unevenly distributed tensors"""
if comm.get_size(comm_name) == 1:
return tensor
# gather dims
dim_tensor = torch.tensor(
[tensor.shape[dim]], dtype=torch.int, device=tensor.device
)
dim_list = [torch.empty_like(dim_tensor) for _ in range(comm.get_size(comm_name))]
dim_list[comm.get_rank(comm_name)] = dim_tensor
dist.all_gather(dim_list, dim_tensor, group=comm.get_group(comm_name))
# gather tensor
gathered_shape = list(tensor.shape)
tensor_list = []
for rshape in dim_list:
gathered_shape[dim] = rshape.item()
tensor_list.append(
torch.empty(gathered_shape, dtype=tensor.dtype, device=tensor.device)
)
tensor_list[comm.get_rank(comm_name)] = tensor
dist.all_gather(tensor_list, tensor, group=comm.get_group(comm_name))
# concatenate
result = torch.cat(tensor_list, dim=dim)
return result
def sync_params(model, mode="broadcast"): # pragma: no cover
"""Helper routine to ensure shared weights are the same after initialization"""
with torch.no_grad():
# distributed sync step
for param in model.parameters():
if not hasattr(param, "is_shared_mp"):
param.is_shared_mp = ["model"]
for comm_group in param.is_shared_mp:
if comm.get_size(comm_group) > 1:
if mode == "broadcast":
tlist = [
torch.empty_like(param)
for x in range(comm.get_size(comm_group))
]
tlist[comm.get_rank(comm_group)] = param
# gather all weights in the comm group
dist.all_gather(tlist, param, group=comm.get_group(comm_group))
# use weight of rank 0
# important to use copy here otherwise the handle gets detaches from the optimizer
param.copy_(tlist[0])
elif mode == "mean":
# coalesced = _flatten_dense_tensors(param)
dist.all_reduce(
param,
op=dist.ReduceOp.AVG,
group=comm.get_group(comm_group),
async_op=False,
)
# param.copy_(coalesced)
else:
raise ValueError(f"Unknown weight synchronization mode {mode}")
def pad_helper(tensor, dim, new_size, mode="zero"): # pragma: no cover
"""Helper routine to pad a tensor along a given dimension"""
ndim = tensor.ndim
dim = (dim + ndim) % ndim
ndim_pad = ndim - dim
output_shape = [0 for _ in range(2 * ndim_pad)]
orig_size = tensor.shape[dim]
output_shape[1] = new_size - orig_size
tensor_pad = F.pad(tensor, output_shape, mode="constant", value=0.0)
if mode == "conj":
lhs_slice = [
slice(0, x) if idx != dim else slice(orig_size, new_size)
for idx, x in enumerate(tensor.shape)
]
rhs_slice = [
slice(0, x) if idx != dim else slice(1, output_shape[1] + 1)
for idx, x in enumerate(tensor.shape)
]
tensor_pad[lhs_slice] = torch.flip(
torch.conj(tensor_pad[rhs_slice]), dims=[dim]
)
return tensor_pad
def truncate_helper(tensor, dim, new_size): # pragma: no cover
"""Helper routine to truncate a tensor along a given dimension"""
input_format = get_memory_format(tensor)
ndim = tensor.ndim
dim = (dim + ndim) % ndim
output_slice = [
slice(0, x) if idx != dim else slice(0, new_size)
for idx, x in enumerate(tensor.shape)
]
tensor_trunc = tensor[output_slice].contiguous(memory_format=input_format)
return tensor_trunc
def split_tensor_along_dim(tensor, dim, num_chunks): # pragma: no cover
"""Helper routine to split a tensor along a given dimension"""
assert (
dim < tensor.dim()
), f"Error, tensor dimension is {tensor.dim()} which cannot be split along {dim}"
assert (
tensor.shape[dim] % num_chunks == 0
), f"Error, cannot split dim {dim} evenly. Dim size is \
{tensor.shape[dim]} and requested numnber of splits is {num_chunks}"
chunk_size = tensor.shape[dim] // num_chunks
tensor_list = torch.split(tensor, chunk_size, dim=dim)
return tensor_list
# distributed primitives
def _transpose(tensor, dim0, dim1, group=None, async_op=False): # pragma: no cover
"""Transpose a tensor across model parallel group."""
# get input format
input_format = get_memory_format(tensor)
# get comm params
comm_size = dist.get_world_size(group=group)
# split and local transposition
split_size = tensor.shape[dim0] // comm_size
x_send = [
y.contiguous(memory_format=input_format)
for y in torch.split(tensor, split_size, dim=dim0)
]
x_recv = [torch.empty_like(x_send[0]) for _ in range(comm_size)]
# global transposition
req = dist.all_to_all(x_recv, x_send, group=group, async_op=async_op)
return x_recv, req
def _reduce(input_, use_fp32=True, group=None): # pragma: no cover
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# All-reduce.
if use_fp32:
dtype = input_.dtype
inputf_ = input_.float()
dist.all_reduce(inputf_, group=group)
input_ = inputf_.to(dtype)
else:
dist.all_reduce(input_, group=group)
return input_
def _split(input_, dim_, group=None): # pragma: no cover
"""Split the tensor along its last dimension and keep the corresponding slice."""
# get input format
input_format = get_memory_format(input_)
# Bypass the function if we are using only 1 GPU.
comm_size = dist.get_world_size(group=group)
if comm_size == 1:
return input_
# Split along last dimension.
input_list = split_tensor_along_dim(input_, dim_, comm_size)
# Note: torch.split does not create contiguous tensors by default.
rank = dist.get_rank(group=group)
output = input_list[rank].contiguous(memory_format=input_format)
return output
def _gather(input_, dim_, group=None): # pragma: no cover
"""Gather tensors and concatinate along the last dimension."""
# get input format
input_format = get_memory_format(input_)
comm_size = dist.get_world_size(group=group)
# Bypass the function if we are using only 1 GPU.
if comm_size == 1:
return input_
# sanity checks
assert (
dim_ < input_.dim()
), f"Error, cannot gather along {dim_} for tensor with {input_.dim()} dimensions."
# Size and dimension.
comm_rank = dist.get_rank(group=group)
input_ = input_.contiguous(memory_format=input_format)
tensor_list = [torch.empty_like(input_) for _ in range(comm_size)]
tensor_list[comm_rank] = input_
dist.all_gather(tensor_list, input_, group=group)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=dim_).contiguous(memory_format=input_format)
return output
|
modulus-main
|
modulus/utils/sfno/distributed/helpers.py
|
# ignore_header_test
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from modulus.utils.sfno.distributed import comm
# torch utils
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
# helper functions
from modulus.utils.sfno.distributed.helpers import _reduce
from modulus.utils.sfno.distributed.helpers import _split
from modulus.utils.sfno.distributed.helpers import _gather
# generalized
class _CopyToParallelRegion(torch.autograd.Function):
"""Pass the input to the parallel region."""
@staticmethod
def symbolic(graph, input_, comm_id_): # pragma: no cover
"""symbolic method"""
return input_
@staticmethod
def forward(ctx, input_, comm_id_): # pragma: no cover
ctx.comm_id = comm_id_
return input_
@staticmethod
def backward(ctx, grad_output): # pragma: no cover
if comm.is_distributed(ctx.comm_id):
return _reduce(grad_output, group=comm.get_group(ctx.comm_id)), None
else:
return grad_output, None
class _ReduceFromParallelRegion(torch.autograd.Function):
"""All-reduce the input from the parallel region."""
@staticmethod
def symbolic(graph, input_, comm_id_): # pragma: no cover
"""symbolic method"""
if comm.is_distributed(comm_id_):
return _reduce(input_, group=comm.get_group(comm_id_))
else:
return input_
@staticmethod
def forward(ctx, input_, comm_id_): # pragma: no cover
if comm.is_distributed(comm_id_):
return _reduce(input_, group=comm.get_group(comm_id_))
else:
return input_
@staticmethod
def backward(ctx, grad_output): # pragma: no cover
return grad_output, None
class _ScatterToParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
@staticmethod
def symbolic(graph, input_, dim_, comm_id_): # pragma: no cover
"""symbolic method"""
return _split(input_, dim_, group=comm.get_group(comm_id_))
@staticmethod
def forward(ctx, input_, dim_, comm_id_): # pragma: no cover
ctx.dim = dim_
ctx.comm_id = comm_id_
if comm.is_distributed(comm_id_):
return _split(input_, dim_, group=comm.get_group(comm_id_))
else:
return input_
@staticmethod
def backward(ctx, grad_output): # pragma: no cover
if comm.is_distributed(ctx.comm_id):
return (
_gather(grad_output, ctx.dim, group=comm.get_group(ctx.comm_id)),
None,
None,
)
else:
return grad_output, None, None
class _GatherFromParallelRegion(torch.autograd.Function):
"""Gather the input from parallel region and concatenate."""
@staticmethod
def symbolic(graph, input_, dim_, comm_id_): # pragma: no cover
""""""
if comm.is_distributed(comm_id_):
return _gather(input_, dim_, group=comm.get_group(comm_id_))
else:
return input_
@staticmethod
def forward(ctx, input_, dim_, comm_id_): # pragma: no cover
ctx.dim = dim_
ctx.comm_id = comm_id_
if comm.is_distributed(comm_id_):
return _gather(input_, dim_, group=comm.get_group(comm_id_))
else:
return input_
@staticmethod
def backward(ctx, grad_output): # pragma: no cover
if comm.is_distributed(ctx.comm_id):
return (
_split(grad_output, ctx.dim, group=comm.get_group(ctx.comm_id)),
None,
None,
)
else:
return grad_output, None, None
# -----------------
# Helper functions.
# -----------------
# matmul parallel
def copy_to_parallel_region(input_, comm_name): # pragma: no cover
"""Parallel copy helper"""
return _CopyToParallelRegion.apply(input_, comm_name)
def reduce_from_parallel_region(input_, comm_name): # pragma: no cover
"""Parallel reduction helper"""
return _ReduceFromParallelRegion.apply(input_, comm_name)
def scatter_to_parallel_region(input_, dim, comm_name): # pragma: no cover
"""Parallel scatter helper"""
return _ScatterToParallelRegion.apply(input_, dim, comm_name)
def gather_from_parallel_region(input_, dim, comm_name): # pragma: no cover
"""Parallel gather helper"""
return _GatherFromParallelRegion.apply(input_, dim, comm_name)
# handler for additional gradient reductions
# helper for gradient reduction across channel parallel ranks
def init_gradient_reduction_hooks(
model,
device_ids,
output_device,
bucket_cap_mb=25,
broadcast_buffers=True,
find_unused_parameters=False,
gradient_as_bucket_view=True,
static_graph=False,
): # pragma: no cover
"""
Initialize gradient reduction hooks for a given model.
"""
# early exit if we are not in a distributed setting:
if not dist.is_initialized():
return model
# set this to false in init and then find out if we can use it:
need_hooks = False
ddp_group = comm.get_group("data")
# this is the trivial case
if comm.get_size("model") == 1:
# the simple case, we can just continue then
ddp_group = None
else:
# count parameters and reduction groups
num_parameters_total = 0
num_parameters_shared_model = 0
for param in model.parameters():
# if it does not have any annotation, we assume it is shared between all model ranks
if not hasattr(param, "is_shared_mp"):
param.is_shared_mp = ["model"]
# add the sharing type to the dict
num_parameters_total += 1
if "model" in param.is_shared_mp:
num_parameters_shared_model += 1
# if all parameters are shared between all model ranks, then the situation is easy
if num_parameters_shared_model == num_parameters_total:
# we can always use DDP
ddp_group = None
# register some pre-multiply reduction hooks
print(
"Setting up gradient hooks to account for shared parameter multiplicity"
)
for param in model.parameters():
param.register_hook(lambda grad: grad * float(comm.get_size("model")))
else:
ddp_group = comm.get_group("data")
broadcast_buffers = False
need_hooks = True
# we can set up DDP and exit here
print("Setting up DDP communication hooks")
model = DistributedDataParallel(
model,
device_ids=device_ids,
output_device=output_device,
bucket_cap_mb=bucket_cap_mb,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=gradient_as_bucket_view,
static_graph=static_graph,
process_group=ddp_group,
)
if not need_hooks:
return model
print("Setting up custom communication hooks")
# define comm hook:
def reduction_comm_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]: # pragma: no cover
"""reduction comm hook"""
# allreduce everything first:
buff = bucket.buffer()
# get future for allreduce
fut = dist.all_reduce(
buff, op=dist.ReduceOp.AVG, group=comm.get_group("data"), async_op=True
).get_future()
# get grads for shared weights
params = bucket.parameters()
def grad_reduction(fut, grads, group):
"""reduce remaining gradients"""
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(
coalesced,
op=dist.ReduceOp.SUM,
group=comm.get_group(group),
async_op=False,
)
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
return bucket.buffer()
for group in comm.get_names():
if group == "data":
continue
grads = []
for p in params:
if group in p.is_shared_mp:
grads.append(p.grad.data)
if not grads:
continue
# append the new reduction functions
fut = fut.then(lambda x: grad_reduction(x, grads=grads, group=group))
return fut
# register model comm hook
model.register_comm_hook(state=None, hook=reduction_comm_hook)
return model
|
modulus-main
|
modulus/utils/sfno/distributed/mappings.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch.cuda import amp
from typing import Tuple
# for spatial model-parallelism
from modulus.utils.sfno.distributed import comm
from modulus.utils.sfno.distributed.mappings import (
gather_from_parallel_region,
copy_to_parallel_region,
)
class DistributedInstanceNorm2d(nn.Module): # pragma: no cover
"""
Computes a distributed instance norm using Welford's online algorithm
"""
def __init__(
self, num_features, eps=1e-05, affine=False, device=None, dtype=None
): # pragma: no cover
super(DistributedInstanceNorm2d, self).__init__()
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.weight.is_shared_mp = ["spatial"]
self.bias.is_shared_mp = ["spatial"]
self.gather_mode = "welford"
@torch.jit.ignore
def _gather_hw(self, x: torch.Tensor) -> torch.Tensor: # pragma: no cover
# gather the data over the spatial communicator
xh = gather_from_parallel_region(x, -2, "h")
xw = gather_from_parallel_region(xh, -1, "w")
return xw
@torch.jit.ignore
def _gather_spatial(self, x: torch.Tensor) -> torch.Tensor: # pragma: no cover
# gather the data over the spatial communicator
xs = gather_from_parallel_region(x, -1, "spatial")
return xs
def _stats_naive(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]: # pragma: no cover
"""Computes the statistics in the naive way by first gathering the tensors and then computing them"""
x = self._gather_hw(x)
var, mean = torch.var_mean(x, dim=(-2, -1), unbiased=False, keepdim=True)
return var, mean
def _stats_welford(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]: # pragma: no cover
"""Computes the statistics locally, then uses the Welford online algorithm to reduce them"""
var, mean = torch.var_mean(x, dim=(-2, -1), unbiased=False, keepdim=False)
# workaround to not use shapes, as otherwise cuda graphs won't work
count = torch.ones_like(x[0, 0], requires_grad=False)
count = torch.sum(count, dim=(-2, -1), keepdim=False)
vars = self._gather_spatial(var.unsqueeze(-1))
means = self._gather_spatial(mean.unsqueeze(-1))
counts = self._gather_spatial(count.unsqueeze(-1))
m2s = vars * counts
mean = means[..., 0]
m2 = m2s[..., 0]
count = counts[..., 0]
# use Welford's algorithm to accumulate them into a single mean and variance
for i in range(1, comm.get_size("spatial")):
delta = means[..., i] - mean
m2 = (
m2
+ m2s[..., i]
+ delta**2 * count * counts[..., i] / (count + counts[..., i])
)
if i == 1:
mean = (mean * count + means[..., i] * counts[..., i]) / (
count + counts[..., i]
)
else:
mean = mean + delta * counts[..., i] / (count + counts[..., i])
# update the current count
count = count + counts[..., i]
var = m2 / count
var = var.reshape(1, -1, 1, 1)
mean = mean.reshape(1, -1, 1, 1)
return var, mean
def forward(self, x: torch.Tensor) -> torch.Tensor: # pragma: no cover
with amp.autocast(enabled=False):
dtype = x.dtype
x = x.float()
# start by computing std and mean
if self.gather_mode == "naive":
var, mean = self._stats_naive(x)
elif self.gather_mode == "welford":
var, mean = self._stats_welford(x)
else:
raise ValueError(f"Unknown gather mode {self.gather_mode}")
# this is absolutely necessary to get the correct graph in the backward pass
mean = copy_to_parallel_region(mean, "spatial")
var = copy_to_parallel_region(var, "spatial")
x = x.to(dtype)
mean = mean.to(dtype)
var = var.to(dtype)
# apply the normalization
x = (x - mean) / torch.sqrt(var + self.eps)
# affine transform if we use it
if self.affine:
x = self.weight.reshape(-1, 1, 1) * x + self.bias.reshape(-1, 1, 1)
return x
|
modulus-main
|
modulus/utils/sfno/distributed/layer_norm.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import json
import numpy as np
from torch import Tensor
from sklearn.neighbors import NearestNeighbors
import logging
from .graph_utils import (
cell_to_adj,
create_graph,
create_heterograph,
add_edge_features,
add_node_features,
latlon2xyz,
get_edge_len,
)
logger = logging.getLogger(__name__)
class Graph:
"""Graph class for creating the graph2mesh, multimesh, and mesh2graph graphs.
Parameters
----------
icospheres_path : str
Path to the icospheres json file.
If the file does not exist, it will try to generate it using PyMesh.
lat_lon_grid : Tensor
Tensor with shape (lat, lon, 2) that includes the latitudes and longitudes
meshgrid.
dtype : torch.dtype, optional
Data type of the graph, by default torch.float
"""
def __init__(
self, icospheres_path: str, lat_lon_grid: Tensor, dtype=torch.float
) -> None:
self.dtype = dtype
# Get or generate the icospheres
try:
with open(icospheres_path, "r") as f:
loaded_dict = json.load(f)
icospheres = {
key: (np.array(value) if isinstance(value, list) else value)
for key, value in loaded_dict.items()
}
logger.info(f"Opened pre-computed graph at {icospheres_path}.")
except:
from modulus.utils.graphcast.icospheres import (
generate_and_save_icospheres,
) # requires PyMesh
logger.info(
f"Could not open {icospheres_path}...generating mesh from scratch."
)
generate_and_save_icospheres()
self.icospheres = icospheres
self.max_order = (
len([key for key in self.icospheres.keys() if "faces" in key]) - 2
)
# flatten lat/lon gird
self.lat_lon_grid_flat = lat_lon_grid.permute(2, 0, 1).view(2, -1).permute(1, 0)
def create_mesh_graph(self, verbose: bool = True) -> Tensor:
"""Create the multimesh graph.
Parameters
----------
verbose : bool, optional
verbosity, by default True
Returns
-------
DGLGraph
Multimesh graph.
"""
# create the bi-directional mesh graph
multimesh_faces = self.icospheres["order_0_faces"]
for i in range(1, self.max_order + 1):
multimesh_faces = np.concatenate(
(multimesh_faces, self.icospheres["order_" + str(i) + "_faces"])
)
src, dst = cell_to_adj(multimesh_faces)
mesh_graph = create_graph(
src, dst, to_bidirected=True, add_self_loop=False, dtype=torch.int32
)
mesh_pos = torch.tensor(
self.icospheres["order_" + str(self.max_order) + "_vertices"],
dtype=torch.float32,
)
mesh_graph = add_edge_features(mesh_graph, mesh_pos)
mesh_graph = add_node_features(mesh_graph, mesh_pos)
# ensure fields set to dtype to avoid later conversions
mesh_graph.ndata["x"] = mesh_graph.ndata["x"].to(dtype=self.dtype)
mesh_graph.edata["x"] = mesh_graph.edata["x"].to(dtype=self.dtype)
if verbose:
print("mesh graph:", mesh_graph)
return mesh_graph
def create_g2m_graph(self, verbose: bool = True) -> Tensor:
"""Create the graph2mesh graph.
Parameters
----------
verbose : bool, optional
verbosity, by default True
Returns
-------
DGLGraph
Graph2mesh graph.
"""
# get the max edge length of icosphere with max order
edge_src = self.icospheres["order_" + str(self.max_order) + "_vertices"][
self.icospheres["order_" + str(self.max_order) + "_faces"][:, 0]
]
edge_dst = self.icospheres["order_" + str(self.max_order) + "_vertices"][
self.icospheres["order_" + str(self.max_order) + "_faces"][:, 1]
]
edge_len_1 = np.max(get_edge_len(edge_src, edge_dst))
edge_src = self.icospheres["order_" + str(self.max_order) + "_vertices"][
self.icospheres["order_" + str(self.max_order) + "_faces"][:, 0]
]
edge_dst = self.icospheres["order_" + str(self.max_order) + "_vertices"][
self.icospheres["order_" + str(self.max_order) + "_faces"][:, 2]
]
edge_len_2 = np.max(get_edge_len(edge_src, edge_dst))
edge_src = self.icospheres["order_" + str(self.max_order) + "_vertices"][
self.icospheres["order_" + str(self.max_order) + "_faces"][:, 1]
]
edge_dst = self.icospheres["order_" + str(self.max_order) + "_vertices"][
self.icospheres["order_" + str(self.max_order) + "_faces"][:, 2]
]
edge_len_3 = np.max(get_edge_len(edge_src, edge_dst))
edge_len = max([edge_len_1, edge_len_2, edge_len_3])
# create the grid2mesh bipartite graph
cartesian_grid = latlon2xyz(self.lat_lon_grid_flat)
n_nbrs = 4
neighbors = NearestNeighbors(n_neighbors=n_nbrs).fit(
self.icospheres["order_" + str(self.max_order) + "_vertices"]
)
distances, indices = neighbors.kneighbors(cartesian_grid)
src, dst = [], []
for i in range(len(cartesian_grid)):
for j in range(n_nbrs):
if distances[i][j] <= 0.6 * edge_len:
src.append(i)
dst.append(indices[i][j])
# NOTE this gives 1,624,344 edges, in the paper it is 1,618,746
# this number is very sensitive to the chosen edge_len, not clear
# in the paper what they use.
g2m_graph = create_heterograph(
src, dst, ("grid", "g2m", "mesh"), dtype=torch.int32
) # number of edges is 3,114,720, exactly matches with the paper
g2m_graph.srcdata["pos"] = cartesian_grid.to(torch.float32)
g2m_graph.dstdata["pos"] = torch.tensor(
self.icospheres["order_" + str(self.max_order) + "_vertices"],
dtype=torch.float32,
)
g2m_graph = add_edge_features(
g2m_graph, (g2m_graph.srcdata["pos"], g2m_graph.dstdata["pos"])
)
# avoid potential conversions at later points
g2m_graph.srcdata["pos"] = g2m_graph.srcdata["pos"].to(dtype=self.dtype)
g2m_graph.dstdata["pos"] = g2m_graph.dstdata["pos"].to(dtype=self.dtype)
g2m_graph.ndata["pos"]["grid"] = g2m_graph.ndata["pos"]["grid"].to(
dtype=self.dtype
)
g2m_graph.ndata["pos"]["mesh"] = g2m_graph.ndata["pos"]["mesh"].to(
dtype=self.dtype
)
g2m_graph.edata["x"] = g2m_graph.edata["x"].to(dtype=self.dtype)
if verbose:
print("g2m graph:", g2m_graph)
return g2m_graph
def create_m2g_graph(self, verbose: bool = True) -> Tensor:
"""Create the mesh2grid graph.
Parameters
----------
verbose : bool, optional
verbosity, by default True
Returns
-------
DGLGraph
Mesh2grid graph.
"""
# create the mesh2grid bipartite graph
cartesian_grid = latlon2xyz(self.lat_lon_grid_flat)
n_nbrs = 1
neighbors = NearestNeighbors(n_neighbors=n_nbrs).fit(
self.icospheres["order_" + str(self.max_order) + "_face_centroid"]
)
_, indices = neighbors.kneighbors(cartesian_grid)
indices = indices.flatten()
src = [
p
for i in indices
for p in self.icospheres["order_" + str(self.max_order) + "_faces"][i]
]
dst = [i for i in range(len(cartesian_grid)) for _ in range(3)]
m2g_graph = create_heterograph(
src, dst, ("mesh", "m2g", "grid"), dtype=torch.int32
) # number of edges is 3,114,720, exactly matches with the paper
m2g_graph.srcdata["pos"] = torch.tensor(
self.icospheres["order_" + str(self.max_order) + "_vertices"],
dtype=torch.float32,
)
m2g_graph.dstdata["pos"] = cartesian_grid.to(dtype=torch.float32)
m2g_graph = add_edge_features(
m2g_graph, (m2g_graph.srcdata["pos"], m2g_graph.dstdata["pos"])
)
# avoid potential conversions at later points
m2g_graph.srcdata["pos"] = m2g_graph.srcdata["pos"].to(dtype=self.dtype)
m2g_graph.dstdata["pos"] = m2g_graph.dstdata["pos"].to(dtype=self.dtype)
m2g_graph.ndata["pos"]["grid"] = m2g_graph.ndata["pos"]["grid"].to(
dtype=self.dtype
)
m2g_graph.ndata["pos"]["mesh"] = m2g_graph.ndata["pos"]["mesh"].to(
dtype=self.dtype
)
m2g_graph.edata["x"] = m2g_graph.edata["x"].to(dtype=self.dtype)
if verbose:
print("m2g graph:", m2g_graph)
return m2g_graph
|
modulus-main
|
modulus/utils/graphcast/graph.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dgl
from dgl import DGLGraph
import torch
from torch import Tensor, testing
import numpy as np
from torch.nn import functional as F
from typing import List, Tuple, Union
def create_graph(
src: List,
dst: List,
to_bidirected: bool = True,
add_self_loop: bool = False,
dtype: torch.dtype = torch.int32,
) -> DGLGraph:
"""
Creates a DGL graph from an adj matrix in COO format.
Parameters
----------
src : List
List of source nodes
dst : List
List of destination nodes
to_bidirected : bool, optional
Whether to make the graph bidirectional, by default True
add_self_loop : bool, optional
Whether to add self loop to the graph, by default False
dtype : torch.dtype, optional
Graph index data type, by default torch.int32
Returns
-------
DGLGraph
The dgl Graph.
"""
graph = dgl.graph((src, dst), idtype=dtype)
if to_bidirected:
graph = dgl.to_bidirected(graph)
if add_self_loop:
graph = dgl.add_self_loop(graph)
return graph
def create_heterograph(
src: List, dst: List, labels: str, dtype: torch.dtype = torch.int32
) -> DGLGraph:
"""Creates a heterogeneous DGL graph from an adj matrix in COO format.
Parameters
----------
src : List
List of source nodes
dst : List
List of destination nodes
labels : str
Label of the edge type
dtype : torch.dtype, optional
Graph index data type, by default torch.int32
Returns
-------
DGLGraph
The dgl Graph.
"""
graph = dgl.heterograph({labels: ("coo", (src, dst))}, idtype=dtype)
return graph
def add_edge_features(graph: DGLGraph, pos: Tensor, normalize: bool = True) -> DGLGraph:
"""Adds edge features to the graph.
Parameters
----------
graph : DGLGraph
The graph to add edge features to.
pos : Tensor
The node positions.
normalize : bool, optional
Whether to normalize the edge features, by default True
Returns
-------
DGLGraph
The graph with edge features.
"""
if isinstance(pos, tuple):
src_pos, dst_pos = pos
else:
src_pos = dst_pos = pos
src, dst = graph.edges()
src_pos, dst_pos = src_pos[src.long()], dst_pos[dst.long()]
dst_latlon = xyz2latlon(dst_pos, unit="rad")
dst_lat, dst_lon = dst_latlon[:, 0], dst_latlon[:, 1]
# azimuthal & polar rotation
theta_azimuthal = azimuthal_angle(dst_lon)
theta_polar = polar_angle(dst_lat)
src_pos = geospatial_rotation(src_pos, theta=theta_azimuthal, axis="z", unit="rad")
dst_pos = geospatial_rotation(dst_pos, theta=theta_azimuthal, axis="z", unit="rad")
# y values should be zero
try:
testing.assert_close(dst_pos[:, 1], torch.zeros_like(dst_pos[:, 1]))
except:
raise ValueError("Invalid projection of edge nodes to local ccordinate system")
src_pos = geospatial_rotation(src_pos, theta=theta_polar, axis="y", unit="rad")
dst_pos = geospatial_rotation(dst_pos, theta=theta_polar, axis="y", unit="rad")
# x values should be one, y & z values should be zero
try:
testing.assert_close(dst_pos[:, 0], torch.ones_like(dst_pos[:, 0]))
testing.assert_close(dst_pos[:, 1], torch.zeros_like(dst_pos[:, 1]))
testing.assert_close(dst_pos[:, 2], torch.zeros_like(dst_pos[:, 2]))
except:
raise ValueError("Invalid projection of edge nodes to local ccordinate system")
# prepare edge features
disp = src_pos - dst_pos
disp_norm = torch.linalg.norm(disp, dim=-1, keepdim=True)
# normalize using the longest edge
if normalize:
max_disp_norm = torch.max(disp_norm)
graph.edata["x"] = torch.cat(
(disp / max_disp_norm, disp_norm / max_disp_norm), dim=-1
)
else:
graph.edata["x"] = torch.cat((disp, disp_norm), dim=-1)
return graph
def add_node_features(graph: DGLGraph, pos: Tensor) -> DGLGraph:
"""Adds cosine of latitude, sine and cosine of longitude as the node features
to the graph.
Parameters
----------
graph : DGLGraph
The graph to add node features to.
pos : Tensor
The node positions.
Returns
-------
graph : DGLGraph
The graph with node features.
"""
latlon = xyz2latlon(pos)
lat, lon = latlon[:, 0], latlon[:, 1]
graph.ndata["x"] = torch.stack(
(torch.cos(lat), torch.sin(lon), torch.cos(lon)), dim=-1
)
return graph
def latlon2xyz(latlon: Tensor, radius: float = 1, unit: str = "deg") -> Tensor:
"""
Converts latlon in degrees to xyz
Based on: https://stackoverflow.com/questions/1185408
- The x-axis goes through long,lat (0,0);
- The y-axis goes through (0,90);
- The z-axis goes through the poles.
Parameters
----------
latlon : Tensor
Tensor of shape (N, 2) containing latitudes and longitudes
radius : float, optional
Radius of the sphere, by default 1
unit : str, optional
Unit of the latlon, by default "deg"
Returns
-------
Tensor
Tensor of shape (N, 3) containing x, y, z coordinates
"""
if unit == "deg":
latlon = deg2rad(latlon)
elif unit == "rad":
pass
else:
raise ValueError("Not a valid unit")
lat, lon = latlon[:, 0], latlon[:, 1]
x = radius * torch.cos(lat) * torch.cos(lon)
y = radius * torch.cos(lat) * torch.sin(lon)
z = radius * torch.sin(lat)
return torch.stack((x, y, z), dim=1)
def xyz2latlon(xyz: Tensor, radius: float = 1, unit: str = "deg") -> Tensor:
"""
Converts xyz to latlon in degrees
Based on: https://stackoverflow.com/questions/1185408
- The x-axis goes through long,lat (0,0);
- The y-axis goes through (0,90);
- The z-axis goes through the poles.
Parameters
----------
xyz : Tensor
Tensor of shape (N, 3) containing x, y, z coordinates
radius : float, optional
Radius of the sphere, by default 1
unit : str, optional
Unit of the latlon, by default "deg"
Returns
-------
Tensor
Tensor of shape (N, 2) containing latitudes and longitudes
"""
lat = torch.arcsin(xyz[:, 2] / radius)
lon = torch.arctan2(xyz[:, 1], xyz[:, 0])
if unit == "deg":
return torch.stack((rad2deg(lat), rad2deg(lon)), dim=1)
elif unit == "rad":
return torch.stack((lat, lon), dim=1)
else:
raise ValueError("Not a valid unit")
def geospatial_rotation(
invar: Tensor, theta: Tensor, axis: str, unit: str = "rad"
) -> Tensor:
"""Rotation using right hand rule
Parameters
----------
invar : Tensor
Tensor of shape (N, 3) containing x, y, z coordinates
theta : Tensor
Tensor of shape (N, ) containing the rotation angle
axis : str
Axis of rotation
unit : str, optional
Unit of the theta, by default "rad"
Returns
-------
Tensor
Tensor of shape (N, 3) containing the rotated x, y, z coordinates
"""
# get the right unit
if unit == "deg":
invar = rad2deg(invar)
elif unit == "rad":
pass
else:
raise ValueError("Not a valid unit")
invar = torch.unsqueeze(invar, -1)
rotation = torch.zeros((theta.size(0), 3, 3))
cos = torch.cos(theta)
sin = torch.sin(theta)
if axis == "x":
rotation[:, 0, 0] += 1.0
rotation[:, 1, 1] += cos
rotation[:, 1, 2] -= sin
rotation[:, 2, 1] += sin
rotation[:, 2, 2] += cos
elif axis == "y":
rotation[:, 0, 0] += cos
rotation[:, 0, 2] += sin
rotation[:, 1, 1] += 1.0
rotation[:, 2, 0] -= sin
rotation[:, 2, 2] += cos
elif axis == "z":
rotation[:, 0, 0] += cos
rotation[:, 0, 1] -= sin
rotation[:, 1, 0] += sin
rotation[:, 1, 1] += cos
rotation[:, 2, 2] += 1.0
else:
raise ValueError("Invalid axis")
outvar = torch.matmul(rotation, invar)
outvar = outvar.squeeze()
return outvar
def azimuthal_angle(lon: Tensor) -> Tensor:
"""
Gives the azimuthal angle of a point on the sphere
Parameters
----------
lon : Tensor
Tensor of shape (N, ) containing the longitude of the point
Returns
-------
Tensor
Tensor of shape (N, ) containing the azimuthal angle
"""
angle = torch.where(lon >= 0.0, 2 * np.pi - lon, -lon)
return angle
def polar_angle(lat: Tensor) -> Tensor:
"""
Gives the polar angle of a point on the sphere
Parameters
----------
lat : Tensor
Tensor of shape (N, ) containing the latitude of the point
Returns
-------
Tensor
Tensor of shape (N, ) containing the polar angle
"""
angle = torch.where(lat >= 0.0, lat, 2 * np.pi + lat)
return angle
def deg2rad(deg: Tensor) -> Tensor:
"""Converts degrees to radians
Parameters
----------
deg :
Tensor of shape (N, ) containing the degrees
Returns
-------
Tensor
Tensor of shape (N, ) containing the radians
"""
return deg * np.pi / 180
def rad2deg(rad):
"""Converts radians to degrees
Parameters
----------
rad :
Tensor of shape (N, ) containing the radians
Returns
-------
Tensor
Tensor of shape (N, ) containing the degrees
"""
return rad * 180 / np.pi
def get_edge_len(edge_src: Tensor, edge_dst: Tensor, axis: int = 1):
"""returns the length of the edge
Parameters
----------
edge_src : Tensor
Tensor of shape (N, 3) containing the source of the edge
edge_dst : Tensor
Tensor of shape (N, 3) containing the destination of the edge
axis : int, optional
Axis along which the norm is computed, by default 1
Returns
-------
Tensor
Tensor of shape (N, ) containing the length of the edge
"""
return np.linalg.norm(edge_src - edge_dst, axis=axis)
def cell_to_adj(cells: List[List[int]]):
"""creates adjancy matrix in COO format from mesh cells
Parameters
----------
cells : List[List[int]]
List of cells, each cell is a list of 3 vertices
Returns
-------
src, dst : List[int], List[int]
List of source and destination vertices
"""
num_cells = np.shape(cells)[0]
src = [cells[i][indx] for i in range(num_cells) for indx in [0, 1, 2]]
dst = [cells[i][indx] for i in range(num_cells) for indx in [1, 2, 0]]
return src, dst
|
modulus-main
|
modulus/utils/graphcast/graph_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/utils/graphcast/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn as nn
from torch.autograd.function import once_differentiable
class CellAreaWeightedLossFunction(nn.Module):
"""Loss function with cell area weighting.
Parameters
----------
area : torch.Tensor
Cell area with shape [H, W].
"""
def __init__(self, area):
super().__init__()
self.area = area
def forward(self, invar, outvar):
"""
Implicit forward function which computes the loss given
a prediction and the corresponding targets.
Parameters
----------
invar : torch.Tensor
prediction of shape [T, C, H, W].
outvar : torch.Tensor
target values of shape [T, C, H, W].
"""
loss = (invar - outvar) ** 2
loss = loss.mean(dim=(0, 1))
loss = torch.mul(loss, self.area)
loss = loss.mean()
return loss
class CustomCellAreaWeightedLossAutogradFunction(torch.autograd.Function):
"""Autograd fuunction for custom loss with cell area weighting."""
@staticmethod
def forward(ctx, invar: torch.Tensor, outvar: torch.Tensor, area: torch.Tensor):
"""Forward of custom loss function with cell area weighting."""
diff = invar - outvar # T x C x H x W
loss = diff**2
loss = loss.mean(dim=(0, 1))
loss = torch.mul(loss, area)
loss = loss.mean()
loss_grad = diff * (2.0 / (math.prod(invar.shape)))
loss_grad *= area.unsqueeze(0).unsqueeze(0)
ctx.save_for_backward(loss_grad)
return loss
@staticmethod
@once_differentiable
def backward(ctx, grad_loss: torch.Tensor):
"""Backward method of custom loss function with cell area weighting."""
# grad_loss should be 1, multiply nevertheless
# to avoid issues with cases where this isn't the case
(grad_invar,) = ctx.saved_tensors
return grad_invar * grad_loss, None, None
class CustomCellAreaWeightedLossFunction(CellAreaWeightedLossFunction):
"""Custom loss function with cell area weighting.
Parameters
----------
area : torch.Tensor
Cell area with shape [H, W].
"""
def __init__(self, area: torch.Tensor):
super().__init__(area)
def forward(self, invar: torch.Tensor, outvar: torch.Tensor) -> torch.Tensor:
"""
Implicit forward function which computes the loss given
a prediction and the corresponding targets.
Parameters
----------
invar : torch.Tensor
prediction of shape [T, C, H, W].
outvar : torch.Tensor
target values of shape [T, C, H, W].
"""
return CustomCellAreaWeightedLossAutogradFunction.apply(
invar, outvar, self.area
)
|
modulus-main
|
modulus/utils/graphcast/loss.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netCDF4 as nc
import numpy as np
import torch
from torch import Tensor
import os
from .graph_utils import deg2rad
class StaticData:
"""Class to load static data from netCDF files. Static data includes land-sea mask,
geopotential, and latitude-longitude coordinates.
Parameters
----------
static_dataset_path : str
Path to directory containing static data.
latitudes : Tensor
Tensor with shape (lat,) that includes latitudes.
longitudes : Tensor
Tensor with shape (lon,) that includes longitudes.
"""
def __init__(
self,
static_dataset_path: str,
latitudes: Tensor,
longitudes: Tensor,
) -> None: # pragma: no cover
self.lsm_path = os.path.join(static_dataset_path, "land_sea_mask.nc")
self.geop_path = os.path.join(static_dataset_path, "geopotential.nc")
self.lat = latitudes
self.lon = longitudes
def get_lsm(self) -> Tensor: # pragma: no cover
"""Get land-sea mask from netCDF file.
Returns
-------
Tensor
Land-sea mask with shape (1, 1, lat, lon).
"""
ds = nc.Dataset(self.lsm_path)
lsm = np.expand_dims(ds["lsm"], axis=0)
return torch.tensor(lsm, dtype=torch.float32)
def get_geop(self, normalize: bool = True) -> Tensor: # pragma: no cover
"""Get geopotential from netCDF file.
Parameters
----------
normalize : bool, optional
Whether to normalize the geopotential, by default True
Returns
-------
Tensor
Normalized geopotential with shape (1, 1, lat, lon).
"""
ds = nc.Dataset(self.geop_path)
geop = np.expand_dims(ds["z"], axis=0)
if normalize:
geop = (geop - geop.mean()) / geop.std()
return torch.tensor(geop, dtype=torch.float32)
def get_lat_lon(self) -> Tensor: # pragma: no cover
"""Computes cosine of latitudes and sine and cosine of longitudes.
Returns
-------
Tensor
Tensor with shape (1, 3, lat, lon) tha includes cosine of latitudes,
sine and cosine of longitudes.
"""
# cos latitudes
cos_lat = torch.cos(deg2rad(self.lat))
cos_lat = cos_lat.view(1, 1, self.lat.size(0), 1)
cos_lat_mg = cos_lat.expand(1, 1, self.lat.size(0), self.lon.size(0))
# sin longitudes
sin_lon = torch.sin(deg2rad(self.lon))
sin_lon = sin_lon.view(1, 1, 1, self.lon.size(0))
sin_lon_mg = sin_lon.expand(1, 1, self.lat.size(0), self.lon.size(0))
# cos longitudes
cos_lon = torch.cos(deg2rad(self.lon))
cos_lon = cos_lon.view(1, 1, 1, self.lon.size(0))
cos_lon_mg = cos_lon.expand(1, 1, self.lat.size(0), self.lon.size(0))
outvar = torch.cat((cos_lat_mg, sin_lon_mg, cos_lon_mg), dim=1)
return outvar
def get(self) -> Tensor: # pragma: no cover
"""Get all static data.
Returns
-------
Tensor
Tensor with shape (1, 5, lat, lon) that includes land-sea mask,
geopotential, cosine of latitudes, sine and cosine of longitudes.
"""
lsm = self.get_lsm()
geop = self.get_geop()
lat_lon = self.get_lat_lon()
return torch.concat((lsm, geop, lat_lon), dim=1)
|
modulus-main
|
modulus/utils/graphcast/data_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import json
try:
import pymesh
except ImportError:
Warning("pymesh is not installed. Please install it to use icosphere.")
# TODO apply a transformation to make faces parallel to ploes
def generate_and_save_icospheres(
save_path: str = "icospheres.json", level: int = 6
) -> None: # pragma: no cover
"""enerate icospheres from level 0 to 6 (inclusive) and save them to a json file.
Parameters
----------
path : str
Path to save the json file.
"""
radius = 1
center = np.array((0, 0, 0))
icospheres = {"vertices": [], "faces": []}
# Generate icospheres from level 0 to 6 (inclusive)
for order in range(level + 1):
icosphere = pymesh.generate_icosphere(radius, center, refinement_order=order)
icospheres["order_" + str(order) + "_vertices"] = icosphere.vertices
icospheres["order_" + str(order) + "_faces"] = icosphere.faces
icosphere.add_attribute("face_centroid")
icospheres[
"order_" + str(order) + "_face_centroid"
] = icosphere.get_face_attribute("face_centroid")
# save icosphere vertices and faces to a json file
icospheres_dict = {
key: (value.tolist() if isinstance(value, np.ndarray) else value)
for key, value in icospheres.items()
}
with open(save_path, "w") as f:
json.dump(icospheres_dict, f)
if __name__ == "__main__":
generate_and_save_icospheres(level=6)
|
modulus-main
|
modulus/utils/graphcast/icospheres.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .module import Module
|
modulus-main
|
modulus/models/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
import torch
import xarray
import datetime
from urllib.parse import urlparse
import glob
import modulus
from modulus.models.sfno import sfnonet
from modulus.utils import filesystem
from modulus.utils.sfno.zenith_angle import cos_zenith_angle
from modulus.utils.sfno.YParams import ParamsBase
from modulus.models.graphcast.graph_cast_net import GraphCastNet
from modulus.models.dlwp import DLWP
import logging
logger = logging.getLogger(__name__)
class _DummyModule(torch.nn.Module):
"""Hack to handle that checkpoint parameter names begin with "module." """
def __init__(self, model):
super().__init__()
self.module = model
class _CosZenWrapper(torch.nn.Module):
def __init__(self, model, lon, lat):
super().__init__()
self.model = model
self.lon = lon
self.lat = lat
def forward(self, x, time):
lon_grid, lat_grid = np.meshgrid(self.lon, self.lat)
cosz = cos_zenith_angle(time, lon_grid, lat_grid)
cosz = cosz.astype(np.float32)
z = torch.from_numpy(cosz).to(device=x.device)
x, z = torch.broadcast_tensors(x, z)
x = torch.cat([x, z], dim=1)
return self.model(x)
def sfno(package: filesystem.Package, pretrained: bool = True) -> torch.nn.Module:
"""Load SFNO model from checkpoints trained with era5_wind"""
path = package.get("config.json")
params = ParamsBase.from_json(path)
model = sfnonet.SphericalFourierNeuralOperatorNet(params)
logger.info(str(params.to_dict()))
if pretrained:
weights = package.get("weights.tar")
checkpoint = torch.load(weights)
load_me = _DummyModule(model)
state = checkpoint["model_state"]
state = {"module.device_buffer": model.device_buffer, **state}
load_me.load_state_dict(state)
if params.add_zenith:
nlat = params.img_shape_x
nlon = params.img_shape_y
lat = 90 - np.arange(nlat) * 0.25
lon = np.arange(nlon) * 0.25
model = _CosZenWrapper(model, lon, lat)
return model
class _GraphCastWrapper(torch.nn.Module):
def __init__(self, model, dtype):
super().__init__()
self.model = model
self.dtype = dtype
def forward(self, x):
x = x.to(self.dtype)
y = self.model(x)
return y
def graphcast_34ch(
package: filesystem.Package, pretrained: bool = True
) -> torch.nn.Module:
"""Load Graphcast 34 channel model from a checkpoint"""
num_channels = 34
icospheres_path = package.get("icospheres.json")
static_data_path = package.get("static", recursive=True)
# instantiate the model, set dtype
base_model = (
GraphCastNet(
meshgraph_path=icospheres_path,
static_dataset_path=static_data_path,
input_dim_grid_nodes=num_channels,
input_dim_mesh_nodes=3,
input_dim_edges=4,
output_dim_grid_nodes=num_channels,
processor_layers=16,
hidden_dim=512,
do_concat_trick=True,
)
.to(dtype=torch.bfloat16)
.to("cuda") # TODO hardcoded
)
# set model to inference mode
base_model.eval()
model = _GraphCastWrapper(base_model, torch.bfloat16)
if pretrained:
path = package.get("weights.tar")
checkpoint = torch.load(path)
weights = checkpoint["model_state_dict"]
weights = _fix_state_dict_keys(weights, add_module=False)
model.model.load_state_dict(weights, strict=True)
return model
class _DLWPWrapper(torch.nn.Module):
def __init__(
self,
model,
lsm,
longrid,
latgrid,
topographic_height,
ll_to_cs_mapfile_path,
cs_to_ll_mapfile_path,
):
super(_DLWPWrapper, self).__init__()
self.model = model
self.lsm = lsm
self.longrid = longrid
self.latgrid = latgrid
self.topographic_height = topographic_height
# load map weights
# Note: these map files are created using TempestRemap library
# https://github.com/ClimateGlobalChange/tempestremap
# To generate the maps, the below sequence of commands can be
# executed once TempestRemap is installed.
# GenerateRLLMesh --lat 721 --lon 1440 --file out_latlon.g --lat_begin 90 --lat_end -90 --out_format Netcdf4
# GenerateCSMesh --res <desired-res> --file out_cubedsphere.g --out_format Netcdf4
# GenerateOverlapMesh --a out_latlon.g --b out_cubedsphere.g --out overlap_latlon_cubedsphere.g --out_format Netcdf4
# GenerateOfflineMap --in_mesh out_latlon.g --out_mesh out_cubedsphere.g --ov_mesh overlap_latlon_cubedsphere.g --in_np 1 --in_type FV --out_type FV --out_map map_LL_CS.nc --out_format Netcdf4
# GenerateOverlapMesh --a out_cubedsphere.g --b out_latlon.g --out overlap_cubedsphere_latlon.g --out_format Netcdf4
# GenerateOfflineMap --in_mesh out_cubedsphere.g --out_mesh out_latlon.g --ov_mesh overlap_cubedsphere_latlon.g --in_np 1 --in_type FV --out_type FV --out_map map_CS_LL.nc --out_format Netcdf4
self.input_map_wts = xarray.open_dataset(ll_to_cs_mapfile_path)
self.output_map_wts = xarray.open_dataset(cs_to_ll_mapfile_path)
def prepare_input(self, input, time):
device = input.device
dtype = input.dtype
i = self.input_map_wts.row.values - 1
j = self.input_map_wts.col.values - 1
data = self.input_map_wts.S.values
M = torch.sparse_coo_tensor(np.array((i, j)), data).type(dtype).to(device)
N, T, C = input.shape[0], input.shape[1], input.shape[2]
input = (M @ input.reshape(N * T * C, -1).T).T
S = int((M.shape[0] / 6) ** 0.5)
input = input.reshape(N, T, C, 6, S, S)
input_list = list(torch.split(input, 1, dim=1))
input_list = [tensor.squeeze(1) for tensor in input_list]
repeat_vals = (input.shape[0], -1, -1, -1, -1) # repeat along batch dimension
for i in range(len(input_list)):
tisr = np.maximum(
cos_zenith_angle(
time
- datetime.timedelta(hours=6 * (input.shape[1] - 1))
+ datetime.timedelta(hours=6 * i),
self.longrid,
self.latgrid,
),
0,
) - (
1 / np.pi
) # subtract mean value
tisr = (
torch.tensor(tisr, dtype=dtype)
.to(device)
.unsqueeze(dim=0)
.unsqueeze(dim=0)
) # add channel and batch size dimension
tisr = tisr.expand(*repeat_vals) # TODO - find better way to batch TISR
input_list[i] = torch.cat(
(input_list[i], tisr), dim=1
) # concat along channel dim
input_model = torch.cat(
input_list, dim=1
) # concat the time dimension into channels
lsm_tensor = torch.tensor(self.lsm, dtype=dtype).to(device).unsqueeze(dim=0)
lsm_tensor = lsm_tensor.expand(*repeat_vals)
topographic_height_tensor = (
torch.tensor((self.topographic_height - 3.724e03) / 8.349e03, dtype=dtype)
.to(device)
.unsqueeze(dim=0)
)
topographic_height_tensor = topographic_height_tensor.expand(*repeat_vals)
input_model = torch.cat(
(input_model, lsm_tensor, topographic_height_tensor), dim=1
)
return input_model
def prepare_output(self, output):
device = output.device
dtype = output.dtype
output = torch.split(output, output.shape[1] // 2, dim=1)
output = torch.stack(output, dim=1) # add time dimension back in
i = self.output_map_wts.row.values - 1
j = self.output_map_wts.col.values - 1
data = self.output_map_wts.S.values
M = torch.sparse_coo_tensor(np.array((i, j)), data).type(dtype).to(device)
N, T, C = output.shape[0], 2, output.shape[2]
output = (M @ output.reshape(N * T * C, -1).T).T
output = output.reshape(N, T, C, 721, 1440)
return output
def forward(self, x, time):
x = self.prepare_input(x, time)
y = self.model(x)
return self.prepare_output(y)
def dlwp(package, pretrained=True):
# load static datasets
lsm = xarray.open_dataset(package.get("land_sea_mask_rs_cs.nc"))["lsm"].values
topographic_height = xarray.open_dataset(package.get("geopotential_rs_cs.nc"))[
"z"
].values
latlon_grids = xarray.open_dataset(package.get("latlon_grid_field_rs_cs.nc"))
latgrid, longrid = latlon_grids["latgrid"].values, latlon_grids["longrid"].values
# load maps
parsed_uri = urlparse(package.root)
if parsed_uri.scheme == "file":
root_path = parsed_uri.path
else:
root_path = package.root
ll_to_cs_file = glob.glob(root_path + package.seperator + "map_LL*_CS*.nc")
cs_to_ll_file = glob.glob(root_path + package.seperator + "map_CS*_LL*.nc")
if ll_to_cs_file:
file_path = ll_to_cs_file[0] # take the first match
if parsed_uri.scheme == "file":
ll_to_cs_relative_path = file_path[len(root_path) :].lstrip(
package.seperator
)
else:
ll_to_cs_relative_path = file_path[len(root_path) :]
if cs_to_ll_file:
file_path = cs_to_ll_file[0]
if parsed_uri.scheme == "file":
cs_to_ll_relative_path = file_path[len(root_path) :].lstrip(
package.seperator
)
else:
cs_to_ll_relative_path = file_path[len(root_path) :]
ll_to_cs_mapfile_path = package.get(ll_to_cs_relative_path)
cs_to_ll_mapfile_path = package.get(cs_to_ll_relative_path)
with open(package.get("config.json")) as json_file:
config = json.load(json_file)
core_model = DLWP(
nr_input_channels=config["nr_input_channels"],
nr_output_channels=config["nr_output_channels"],
)
if pretrained:
weights_path = package.get("weights.pt")
weights = torch.load(weights_path)
fixed_weights = _fix_state_dict_keys(weights, add_module=False)
core_model.load_state_dict(fixed_weights)
model = _DLWPWrapper(
core_model,
lsm,
longrid,
latgrid,
topographic_height,
ll_to_cs_mapfile_path,
cs_to_ll_mapfile_path,
)
model.eval()
return model
def _fix_state_dict_keys(state_dict, add_module=False):
"""Add or remove 'module.' from state_dict keys
Parameters
----------
state_dict : Dict
Model state_dict
add_module : bool, optional
If True, will add 'module.' to keys, by default False
Returns
-------
Dict
Model state_dict with fixed keys
"""
fixed_state_dict = {}
for key, value in state_dict.items():
if add_module:
new_key = "module." + key
else:
new_key = key.replace("module.", "")
fixed_state_dict[new_key] = value
return fixed_state_dict
|
modulus-main
|
modulus/models/fcn_mip_plugin.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import torch
import logging
import inspect
import importlib
import tempfile
import tarfile
import pkg_resources
from typing import Union, List, Dict, Any
from pathlib import Path
import torch.nn as nn
import modulus
from modulus.models.meta import ModelMetaData
from modulus.registry import ModelRegistry
from modulus.utils.filesystem import _get_fs, _download_cached
class Module(torch.nn.Module):
"""The base class for all network models in Modulus.
This should be used as a direct replacement for torch.nn.module and provides
additional functionality for saving and loading models, as well as
handling file system abstractions.
There is one important requirement for all models in Modulus. They must
have json serializable arguments in their __init__ function. This is
required for saving and loading models and allow models to be instantiated
from a checkpoint.
Parameters
----------
meta : ModelMetaData, optional
Meta data class for storing info regarding model, by default None
"""
_file_extension = ".mdlus" # Set file extension for saving and loading
__model_checkpoint_version__ = (
"0.1.0" # Used for file versioning and is not the same as modulus version
)
def __new__(cls, *args, **kwargs):
out = super().__new__(cls)
sig = inspect.signature(cls.__init__)
bound_args = sig.bind_partial(
*([None] + list(args)), **kwargs
) # Add None to account for self
bound_args.apply_defaults()
bound_args.arguments.pop("self", None)
out._args = {
"__name__": cls.__name__,
"__module__": cls.__module__,
"__args__": {k: v for k, v in bound_args.arguments.items()},
}
return out
def __init__(self, meta: Union[ModelMetaData, None] = None):
super().__init__()
self.meta = meta
self.register_buffer("device_buffer", torch.empty(0))
self._setup_logger()
def _setup_logger(self):
self.logger = logging.getLogger("core.module")
handler = logging.StreamHandler()
formatter = logging.Formatter(
f"[%(asctime)s - %(levelname)s] %(message)s", datefmt="%H:%M:%S"
)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.WARNING)
@classmethod
def instantiate(cls, arg_dict: Dict[str, Any]) -> "Module":
"""Instantiate a model from a dictionary of arguments
Parameters
----------
arg_dict : Dict[str, Any]
Dictionary of arguments to instantiate model with. This should be
have three keys: '__name__', '__module__', and '__args__'. The first two
are used to import the class and the last is used to instantiate
the class. The '__args__' key should be a dictionary of arguments
to pass to the class's __init__ function.
Returns
-------
Module
Examples
--------
>>> from modulus.models import Module
>>> fcn = Module.instantiate({'__name__': 'FullyConnected', '__module__': 'modulus.models.mlp', '__args__': {'in_features': 10}})
>>> fcn
FullyConnected(
(layers): ModuleList(
(0): FCLayer(
(activation_fn): SiLU()
(linear): Linear(in_features=10, out_features=512, bias=True)
)
(1-5): 5 x FCLayer(
(activation_fn): SiLU()
(linear): Linear(in_features=512, out_features=512, bias=True)
)
)
(final_layer): FCLayer(
(activation_fn): Identity()
(linear): Linear(in_features=512, out_features=512, bias=True)
)
)
"""
# Add a check if the class is one in the model registry
_cls_name = arg_dict["__name__"]
registry = ModelRegistry()
if _cls_name in registry.list_models():
_cls = registry.factory(_cls_name)
else: # Otherwise, try to import the class
_mod = importlib.import_module(arg_dict["__module__"])
_cls = getattr(_mod, arg_dict["__name__"])
return _cls(**arg_dict["__args__"])
def debug(self):
"""Turn on debug logging"""
self.logger.handlers.clear()
handler = logging.StreamHandler()
formatter = logging.Formatter(
f"[%(asctime)s - %(levelname)s - {self.meta.name}] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.DEBUG)
# TODO: set up debug log
# fh = logging.FileHandler(f'modulus-core-{self.meta.name}.log')
def save(self, file_name: Union[str, None] = None, verbose: bool = False) -> None:
"""Simple utility for saving just the model
Parameters
----------
file_name : Union[str,None], optional
File name to save model weight to. When none is provide it will default to
the model's name set in the meta data, by default None
verbose : bool, optional
Whether to save the model in verbose mode which will include git hash, etc, by default False
Raises
------
ValueError
If file_name does not end with .mdlus extension
"""
if file_name is not None and not file_name.endswith(self._file_extension):
raise ValueError(
f"File name must end with {self._file_extension} extension"
)
with tempfile.TemporaryDirectory() as temp_dir:
local_path = Path(temp_dir)
torch.save(self.state_dict(), local_path / "model.pt")
with open(local_path / "args.json", "w") as f:
json.dump(self._args, f)
# Save the modulus version and git hash (if available)
metadata_info = {
"modulus_version": modulus.__version__,
"mdlus_file_version": self.__model_checkpoint_version__,
}
if verbose:
import git
repo = git.Repo(search_parent_directories=True)
try:
metadata_info["git_hash"] = repo.head.object.hexsha
except git.InvalidGitRepositoryError:
metadata_info["git_hash"] = None
with open(local_path / "metadata.json", "w") as f:
json.dump(metadata_info, f)
# Once all files are saved, package them into a tar file
with tarfile.open(local_path / "model.tar", "w") as tar:
for file in local_path.iterdir():
tar.add(str(file), arcname=file.name)
if file_name is None:
file_name = self.meta.name + ".mdlus"
# Save files to remote destination
fs = _get_fs(file_name)
fs.put(str(local_path / "model.tar"), file_name)
@staticmethod
def _check_checkpoint(local_path: str) -> bool:
if not local_path.joinpath("args.json").exists():
raise IOError(f"File 'args.json' not found in checkpoint")
if not local_path.joinpath("metadata.json").exists():
raise IOError(f"File 'metadata.json' not found in checkpoint")
if not local_path.joinpath("model.pt").exists():
raise IOError(f"Model weights 'model.pt' not found in checkpoint")
# Check if the checkpoint version is compatible with the current version
with open(local_path.joinpath("metadata.json"), "r") as f:
metadata_info = json.load(f)
if (
metadata_info["mdlus_file_version"]
!= Module.__model_checkpoint_version__
):
raise IOError(
f"Model checkpoint version {metadata_info['mdlus_file_version']} is not compatible with current version {Module.__version__}"
)
def load(
self, file_name: str, map_location: Union[None, str, torch.device] = None
) -> None:
"""Simple utility for loading the model weights from checkpoint
Parameters
----------
file_name : str
Checkpoint file name
map_location : Union[None, str, torch.device], optional
Map location for loading the model weights, by default None will use model's device
Raises
------
IOError
If file_name provided does not exist or is not a valid checkpoint
"""
# Download and cache the checkpoint file if needed
cached_file_name = _download_cached(file_name)
# Use a temporary directory to extract the tar file
with tempfile.TemporaryDirectory() as temp_dir:
local_path = Path(temp_dir)
# Open the tar file and extract its contents to the temporary directory
with tarfile.open(cached_file_name, "r") as tar:
tar.extractall(path=local_path)
# Check if the checkpoint is valid
Module._check_checkpoint(local_path)
# Load the model weights
device = map_location if map_location is not None else self.device
model_dict = torch.load(
local_path.joinpath("model.pt"), map_location=device
)
self.load_state_dict(model_dict)
@classmethod
def from_checkpoint(cls, file_name: str) -> "Module":
"""Simple utility for constructing a model from a checkpoint
Parameters
----------
file_name : str
Checkpoint file name
Returns
-------
Module
Raises
------
IOError
If file_name provided does not exist or is not a valid checkpoint
"""
# Download and cache the checkpoint file if needed
cached_file_name = _download_cached(file_name)
# Use a temporary directory to extract the tar file
with tempfile.TemporaryDirectory() as temp_dir:
local_path = Path(temp_dir)
# Open the tar file and extract its contents to the temporary directory
with tarfile.open(cached_file_name, "r") as tar:
tar.extractall(path=local_path)
# Check if the checkpoint is valid
Module._check_checkpoint(local_path)
# Load model arguments and instantiate the model
with open(local_path.joinpath("args.json"), "r") as f:
args = json.load(f)
model = cls.instantiate(args)
# Load the model weights
model_dict = torch.load(
local_path.joinpath("model.pt"), map_location=model.device
)
model.load_state_dict(model_dict)
return model
@staticmethod
def from_torch(
torch_model_class: torch.nn.Module, meta: ModelMetaData = None
) -> "Module":
"""Construct a Modulus module from a PyTorch module
Parameters
----------
torch_model_class : torch.nn.Module
PyTorch module class
meta : ModelMetaData, optional
Meta data for the model, by default None
Returns
-------
Module
"""
# Define an internal class as before
class ModulusModel(Module):
def __init__(self, *args, **kwargs):
super().__init__(meta=meta)
self.inner_model = torch_model_class(*args, **kwargs)
def forward(self, x):
return self.inner_model(x)
# Get the argument names and default values of the PyTorch model's init method
init_argspec = inspect.getfullargspec(torch_model_class.__init__)
model_argnames = init_argspec.args[1:] # Exclude 'self'
model_defaults = init_argspec.defaults or []
defaults_dict = dict(
zip(model_argnames[-len(model_defaults) :], model_defaults)
)
# Define the signature of new init
params = [inspect.Parameter("self", inspect.Parameter.POSITIONAL_OR_KEYWORD)]
params += [
inspect.Parameter(
argname,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=defaults_dict.get(argname, inspect.Parameter.empty),
)
for argname in model_argnames
]
init_signature = inspect.Signature(params)
# Replace ModulusModel.__init__ signature with new init signature
ModulusModel.__init__.__signature__ = init_signature
# Generate a unique name for the created class
new_class_name = f"{torch_model_class.__name__}ModulusModel"
ModulusModel.__name__ = new_class_name
# Add this class to the dict of models classes
registry = ModelRegistry()
registry.register(ModulusModel, new_class_name)
return ModulusModel
@property
def device(self) -> torch.device:
"""Get device model is on
Returns
-------
torch.device
PyTorch device
"""
return self.device_buffer.device
def num_parameters(self) -> int:
"""Gets the number of learnable parameters"""
count = 0
for name, param in self.named_parameters():
count += param.numel()
return count
|
modulus-main
|
modulus/models/module.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
@dataclass
class ModelMetaData:
"""Data class for storing essential meta data needed for all Modulus Models"""
# Model info
name: str = "ModulusModule"
# Optimization
jit: bool = False
cuda_graphs: bool = False
amp: bool = False
amp_cpu: bool = None
amp_gpu: bool = None
torch_fx: bool = False
# Inference
onnx: bool = False
onnx_gpu: bool = None
onnx_cpu: bool = None
onnx_runtime: bool = False
trt: bool = False
# Physics informed
var_dim: int = -1
func_torch: bool = False
auto_grad: bool = False
def __post_init__(self):
self.amp_cpu = self.amp if self.amp_cpu is None else self.amp_cpu
self.amp_gpu = self.amp if self.amp_gpu is None else self.amp_gpu
self.onnx_cpu = self.onnx if self.onnx_cpu is None else self.onnx_cpu
self.onnx_gpu = self.onnx if self.onnx_gpu is None else self.onnx_gpu
|
modulus-main
|
modulus/models/meta.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn as nn
from dataclasses import dataclass
import modulus
from modulus.models.layers import get_activation
from modulus.models.meta import ModelMetaData
from modulus.models.module import Module
from typing import Tuple, Union
Tensor = torch.Tensor
def _get_same_padding(x: int, k: int, s: int) -> int:
"""Function to compute "same" padding. Inspired from:
https://github.com/huggingface/pytorch-image-models/blob/0.5.x/timm/models/layers/padding.py
"""
return max(s * math.ceil(x / s) - s - x + k, 0)
def _pad_periodically_equatorial(
main_face, left_face, right_face, top_face, bottom_face, nr_rot, size=2
):
if nr_rot != 0:
top_face = torch.rot90(top_face, k=nr_rot, dims=(-2, -1))
bottom_face = torch.rot90(bottom_face, k=nr_rot, dims=(-1, -2))
padded_data_temp = torch.cat(
(left_face[..., :, -size:], main_face, right_face[..., :, :size]), dim=-1
)
top_pad = torch.cat(
(top_face[..., :, :size], top_face, top_face[..., :, -size:]), dim=-1
) # hacky - extend on the left and right side
bottom_pad = torch.cat(
(bottom_face[..., :, :size], bottom_face, bottom_face[..., :, -size:]), dim=-1
) # hacky - extend on the left and right side
padded_data = torch.cat(
(bottom_pad[..., -size:, :], padded_data_temp, top_pad[..., :size, :]), dim=-2
)
return padded_data
def _pad_periodically_polar(
main_face,
left_face,
right_face,
top_face,
bottom_face,
rot_axis_left,
rot_axis_right,
size=2,
):
left_face = torch.rot90(left_face, dims=rot_axis_left)
right_face = torch.rot90(right_face, dims=rot_axis_right)
padded_data_temp = torch.cat(
(bottom_face[..., -size:, :], main_face, top_face[..., :size, :]), dim=-2
)
left_pad = torch.cat(
(left_face[..., :size, :], left_face, left_face[..., -size:, :]), dim=-2
) # hacky - extend the left and right
right_pad = torch.cat(
(right_face[..., :size, :], right_face, right_face[..., -size:, :]), dim=-2
) # hacky - extend the left and right
padded_data = torch.cat(
(left_pad[..., :, -size:], padded_data_temp, right_pad[..., :, :size]), dim=-1
)
return padded_data
def _cubed_conv_wrapper(faces, equator_conv, polar_conv):
# compute the required padding
padding_size = _get_same_padding(
x=faces[0].size(-1), k=equator_conv.kernel_size[0], s=equator_conv.stride[0]
)
padding_size = padding_size // 2
output = []
if padding_size != 0:
for i in range(6):
if i == 0:
x = _pad_periodically_equatorial(
faces[0],
faces[3],
faces[1],
faces[5],
faces[4],
nr_rot=0,
size=padding_size,
)
output.append(equator_conv(x))
elif i == 1:
x = _pad_periodically_equatorial(
faces[1],
faces[0],
faces[2],
faces[5],
faces[4],
nr_rot=1,
size=padding_size,
)
output.append(equator_conv(x))
elif i == 2:
x = _pad_periodically_equatorial(
faces[2],
faces[1],
faces[3],
faces[5],
faces[4],
nr_rot=2,
size=padding_size,
)
output.append(equator_conv(x))
elif i == 3:
x = _pad_periodically_equatorial(
faces[3],
faces[2],
faces[0],
faces[5],
faces[4],
nr_rot=3,
size=padding_size,
)
output.append(equator_conv(x))
elif i == 4:
x = _pad_periodically_polar(
faces[4],
faces[3],
faces[1],
faces[0],
faces[5],
rot_axis_left=(-1, -2),
rot_axis_right=(-2, -1),
size=padding_size,
)
output.append(polar_conv(x))
else: # i=5
x = _pad_periodically_polar(
faces[5],
faces[3],
faces[1],
faces[4],
faces[0],
rot_axis_left=(-2, -1),
rot_axis_right=(-1, -2),
size=padding_size,
)
x = torch.flip(x, [-1])
x = polar_conv(x)
output.append(torch.flip(x, [-1]))
else:
for i in range(6):
if i in [0, 1, 2, 3]:
output.append(equator_conv(faces[i]))
elif i == 4:
output.append(polar_conv(faces[i]))
else: # i=5
x = torch.flip(faces[i], [-1])
x = polar_conv(x)
output.append(torch.flip(x, [-1]))
return output
def _cubed_non_conv_wrapper(faces, layer):
output = []
for i in range(6):
output.append(layer(faces[i]))
return output
@dataclass
class MetaData(ModelMetaData):
name: str = "DLWP"
# Optimization
jit: bool = False
cuda_graphs: bool = True
amp_cpu: bool = True
amp_gpu: bool = True
# Inference
onnx: bool = False
# Physics informed
var_dim: int = 1
func_torch: bool = False
auto_grad: bool = False
class DLWP(Module):
"""A Convolutional model for Deep Learning Weather Prediction that
works on Cubed-sphere grids.
This model expects the input to be of shape [N, C, 6, Res, Res]
Parameters
----------
nr_input_channels : int
Number of channels in the input
nr_output_channels : int
Number of channels in the output
nr_initial_channels : int
Number of channels in the initial convolution. This governs the overall channels
in the model.
activation_fn : str
Activation function for the convolutions
depth : int
Depth for the U-Net
clamp_activation : Tuple of ints, floats or None
The min and max value used for torch.clamp()
Example
-------
>>> model = modulus.models.dlwp.DLWP(
... nr_input_channels=2,
... nr_output_channels=4,
... )
>>> input = torch.randn(4, 2, 6, 64, 64) # [N, C, F, Res, Res]
>>> output = model(input)
>>> output.size()
torch.Size([4, 4, 6, 64, 64])
Note
----
Reference: Weyn, Jonathan A., et al. "Subβseasonal forecasting with a large ensemble
of deepβlearning weather prediction models." Journal of Advances in Modeling Earth
Systems 13.7 (2021): e2021MS002502.
"""
def __init__(
self,
nr_input_channels: int,
nr_output_channels: int,
nr_initial_channels: int = 64,
activation_fn: str = "leaky_relu",
depth: int = 2,
clamp_activation: Tuple[Union[float, int, None], Union[float, int, None]] = (
None,
10.0,
),
):
super().__init__(meta=MetaData())
self.nr_input_channels = nr_input_channels
self.nr_output_channels = nr_output_channels
self.nr_initial_channels = nr_initial_channels
self.activation_fn = get_activation(activation_fn)
self.depth = depth
self.clamp_activation = clamp_activation
# define layers
# define non-convolutional layers
self.avg_pool = nn.AvgPool2d(2)
self.upsample_layer = nn.Upsample(scale_factor=2)
# define layers
self.equatorial_downsample = []
self.equatorial_upsample = []
self.equatorial_mid_layers = []
self.polar_downsample = []
self.polar_upsample = []
self.polar_mid_layers = []
for i in range(depth):
if i == 0:
ins = self.nr_input_channels
else:
ins = self.nr_initial_channels * (2 ** (i - 1))
outs = self.nr_initial_channels * (2 ** (i))
self.equatorial_downsample.append(nn.Conv2d(ins, outs, kernel_size=3))
self.polar_downsample.append(nn.Conv2d(ins, outs, kernel_size=3))
self.equatorial_downsample.append(nn.Conv2d(outs, outs, kernel_size=3))
self.polar_downsample.append(nn.Conv2d(outs, outs, kernel_size=3))
for i in range(2):
if i == 0:
ins = outs
outs = ins * 2
else:
ins = outs
outs = ins // 2
self.equatorial_mid_layers.append(nn.Conv2d(ins, outs, kernel_size=3))
self.polar_mid_layers.append(nn.Conv2d(ins, outs, kernel_size=3))
for i in range(depth - 1, -1, -1):
if i == 0:
outs = self.nr_initial_channels
outs_final = outs
else:
outs = self.nr_initial_channels * (2 ** (i))
outs_final = outs // 2
ins = outs * 2
self.equatorial_upsample.append(nn.Conv2d(ins, outs, kernel_size=3))
self.polar_upsample.append(nn.Conv2d(ins, outs, kernel_size=3))
self.equatorial_upsample.append(nn.Conv2d(outs, outs_final, kernel_size=3))
self.polar_upsample.append(nn.Conv2d(outs, outs_final, kernel_size=3))
self.equatorial_downsample = nn.ModuleList(self.equatorial_downsample)
self.polar_downsample = nn.ModuleList(self.polar_downsample)
self.equatorial_mid_layers = nn.ModuleList(self.equatorial_mid_layers)
self.polar_mid_layers = nn.ModuleList(self.polar_mid_layers)
self.equatorial_upsample = nn.ModuleList(self.equatorial_upsample)
self.polar_upsample = nn.ModuleList(self.polar_upsample)
self.equatorial_last = nn.Conv2d(outs, self.nr_output_channels, kernel_size=1)
self.polar_last = nn.Conv2d(outs, self.nr_output_channels, kernel_size=1)
# define activation layers
def activation(self, x: Tensor):
x = self.activation_fn(x)
if any(isinstance(c, (float, int)) for c in self.clamp_activation):
x = torch.clamp(
x, min=self.clamp_activation[0], max=self.clamp_activation[1]
)
return x
def forward(self, cubed_sphere_input):
# do some input checks
assert cubed_sphere_input.size(2) == 6, "The input must have 6 faces."
assert cubed_sphere_input.size(3) == cubed_sphere_input.size(
4
), "The input must have equal height and width"
# split the cubed_sphere_input into individual faces
faces = torch.split(
cubed_sphere_input, split_size_or_sections=1, dim=2
) # split along face dim
faces = [torch.squeeze(face, dim=2) for face in faces]
encoder_states = []
for i, (equatorial_layer, polar_layer) in enumerate(
zip(self.equatorial_downsample, self.polar_downsample)
):
faces = _cubed_conv_wrapper(faces, equatorial_layer, polar_layer)
faces = _cubed_non_conv_wrapper(faces, self.activation)
if i % 2 != 0:
encoder_states.append(faces)
faces = _cubed_non_conv_wrapper(faces, self.avg_pool)
for i, (equatorial_layer, polar_layer) in enumerate(
zip(self.equatorial_mid_layers, self.polar_mid_layers)
):
faces = _cubed_conv_wrapper(faces, equatorial_layer, polar_layer)
faces = _cubed_non_conv_wrapper(faces, self.activation)
j = 0
for i, (equatorial_layer, polar_layer) in enumerate(
zip(self.equatorial_upsample, self.polar_upsample)
):
if i % 2 == 0:
encoder_faces = encoder_states[len(encoder_states) - j - 1]
faces = _cubed_non_conv_wrapper(faces, self.upsample_layer)
faces = [
torch.cat((face_1, face_2), dim=1)
for face_1, face_2 in zip(faces, encoder_faces)
]
j += 1
faces = _cubed_conv_wrapper(faces, equatorial_layer, polar_layer)
faces = _cubed_non_conv_wrapper(faces, self.activation)
faces = _cubed_conv_wrapper(faces, self.equatorial_last, self.polar_last)
output = torch.stack(faces, dim=2)
return output
|
modulus-main
|
modulus/models/dlwp/dlwp.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .dlwp import DLWP
|
modulus-main
|
modulus/models/dlwp/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import modulus
Tensor = torch.Tensor
class Identity(nn.Module):
"""Identity activation function
Dummy function for removing activations from a model
Example
-------
>>> idnt_func = modulus.models.layers.Identity()
>>> input = torch.randn(2, 2)
>>> output = idnt_func(input)
>>> torch.allclose(input, output)
True
"""
def forward(self, x: Tensor) -> Tensor:
return x
class Stan(nn.Module):
"""Self-scalable Tanh (Stan) for 1D Tensors
Parameters
----------
out_features : int, optional
Number of features, by default 1
Note
----
References: Gnanasambandam, Raghav and Shen, Bo and Chung, Jihoon and Yue, Xubo and others.
Self-scalable Tanh (Stan): Faster Convergence and Better Generalization
in Physics-informed Neural Networks. arXiv preprint arXiv:2204.12589, 2022.
Example
-------
>>> stan_func = modulus.models.layers.Stan(out_features=1)
>>> input = torch.Tensor([[0],[1],[2]])
>>> stan_func(input)
tensor([[0.0000],
[1.5232],
[2.8921]], grad_fn=<MulBackward0>)
"""
def __init__(self, out_features: int = 1):
super().__init__()
self.beta = nn.Parameter(torch.ones(out_features))
def forward(self, x: Tensor) -> Tensor:
if x.shape[-1] != self.beta.shape[-1]:
raise ValueError(
f"The last dimension of the input must be equal to the dimension of Stan parameters. Got inputs: {x.shape}, params: {self.beta.shape}"
)
return torch.tanh(x) * (1.0 + self.beta * x)
class SquarePlus(nn.Module):
"""Squareplus activation
Note
----
Reference: arXiv preprint arXiv:2112.11687
Example
-------
>>> sqr_func = modulus.models.layers.SquarePlus()
>>> input = torch.Tensor([[1,2],[3,4]])
>>> sqr_func(input)
tensor([[1.6180, 2.4142],
[3.3028, 4.2361]])
"""
def __init__(self):
super().__init__()
self.b = 4
def forward(self, x: Tensor) -> Tensor:
return 0.5 * (x + torch.sqrt(x * x + self.b))
# Dictionary of activation functions
ACT2FN = {
"relu": nn.ReLU,
"leaky_relu": (nn.LeakyReLU, {"negative_slope": 0.1}),
"prelu": nn.PReLU,
"relu6": nn.ReLU6,
"elu": nn.ELU,
"selu": nn.SELU,
"silu": nn.SiLU,
"gelu": nn.GELU,
"sigmoid": nn.Sigmoid,
"logsigmoid": nn.LogSigmoid,
"softplus": nn.Softplus,
"softshrink": nn.Softshrink,
"softsign": nn.Softsign,
"tanh": nn.Tanh,
"tanhshrink": nn.Tanhshrink,
"threshold": (nn.Threshold, {"threshold": 1.0, "value": 1.0}),
"hardtanh": nn.Hardtanh,
"identity": Identity,
"stan": Stan,
"squareplus": SquarePlus,
}
def get_activation(activation: str) -> nn.Module:
"""Returns an activation function given a string
Parameters
----------
activation : str
String identifier for the desired activation function
Returns
-------
Activation function
Raises
------
KeyError
If the specified activation function is not found in the dictionary
"""
try:
activation = activation.lower()
module = ACT2FN[activation]
if isinstance(module, tuple):
return module[0](**module[1])
else:
return module()
except KeyError:
raise KeyError(
f"Activation function {activation} not found. Available options are: {list(ACT2FN.keys())}"
)
|
modulus-main
|
modulus/models/layers/activations.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class SpectralConv1d(nn.Module):
"""1D Fourier layer. It does FFT, linear transform, and Inverse FFT.
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
modes1 : int
Number of Fourier modes to multiply, at most floor(N/2) + 1
"""
def __init__(self, in_channels: int, out_channels: int, modes1: int):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
torch.empty(in_channels, out_channels, self.modes1, 2)
)
self.reset_parameters()
def compl_mul1d(
self,
input: Tensor,
weights: Tensor,
) -> Tensor:
"""Complex multiplication
Parameters
----------
input : Tensor
Input tensor
weights : Tensor
Weights tensor
Returns
-------
Tensor
Product of complex multiplication
"""
# (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)
cweights = torch.view_as_complex(weights)
return torch.einsum("bix,iox->box", input, cweights)
def forward(self, x: Tensor) -> Tensor:
bsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(
bsize,
self.out_channels,
x.size(-1) // 2 + 1,
device=x.device,
dtype=torch.cfloat,
)
out_ft[:, :, : self.modes1] = self.compl_mul1d(
x_ft[:, :, : self.modes1],
self.weights1,
)
# Return to physical space
x = torch.fft.irfft(out_ft, n=x.size(-1))
return x
def reset_parameters(self):
"""Reset spectral weights with distribution scale*U(0,1)"""
self.weights1.data = self.scale * torch.rand(self.weights1.data.shape)
class SpectralConv2d(nn.Module):
"""2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
modes1 : int
Number of Fourier modes to multiply in first dimension, at most floor(N/2) + 1
modes2 : int
Number of Fourier modes to multiply in second dimension, at most floor(N/2) + 1
"""
def __init__(self, in_channels: int, out_channels: int, modes1: int, modes2: int):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.modes2 = modes2
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
torch.empty(in_channels, out_channels, self.modes1, self.modes2, 2)
)
self.weights2 = nn.Parameter(
torch.empty(in_channels, out_channels, self.modes1, self.modes2, 2)
)
self.reset_parameters()
def compl_mul2d(self, input: Tensor, weights: Tensor) -> Tensor:
"""Complex multiplication
Parameters
----------
input : Tensor
Input tensor
weights : Tensor
Weights tensor
Returns
-------
Tensor
Product of complex multiplication
"""
# (batch, in_channel, x, y), (in_channel, out_channel, x, y) -> (batch, out_channel, x, y)
cweights = torch.view_as_complex(weights)
return torch.einsum("bixy,ioxy->boxy", input, cweights)
def forward(self, x: Tensor) -> Tensor:
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[:, :, : self.modes1, : self.modes2] = self.compl_mul2d(
x_ft[:, :, : self.modes1, : self.modes2],
self.weights1,
)
out_ft[:, :, -self.modes1 :, : self.modes2] = self.compl_mul2d(
x_ft[:, :, -self.modes1 :, : self.modes2],
self.weights2,
)
# Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
def reset_parameters(self):
"""Reset spectral weights with distribution scale*U(0,1)"""
self.weights1.data = self.scale * torch.rand(self.weights1.data.shape)
self.weights2.data = self.scale * torch.rand(self.weights2.data.shape)
class SpectralConv3d(nn.Module):
"""3D Fourier layer. It does FFT, linear transform, and Inverse FFT.
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
modes1 : int
Number of Fourier modes to multiply in first dimension, at most floor(N/2) + 1
modes2 : int
Number of Fourier modes to multiply in second dimension, at most floor(N/2) + 1
modes3 : int
Number of Fourier modes to multiply in third dimension, at most floor(N/2) + 1
"""
def __init__(
self, in_channels: int, out_channels: int, modes1: int, modes2: int, modes3: int
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.modes2 = modes2
self.modes3 = modes3
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.weights2 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.weights3 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.weights4 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.reset_parameters()
def compl_mul3d(
self,
input: Tensor,
weights: Tensor,
) -> Tensor:
"""Complex multiplication
Parameters
----------
input : Tensor
Input tensor
weights : Tensor
Weights tensor
Returns
-------
Tensor
Product of complex multiplication
"""
# (batch, in_channel, x, y, z), (in_channel, out_channel, x, y, z) -> (batch, out_channel, x, y, z)
cweights = torch.view_as_complex(weights)
return torch.einsum("bixyz,ioxyz->boxyz", input, cweights)
def forward(self, x: Tensor) -> Tensor:
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfftn(x, dim=[-3, -2, -1])
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-3),
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[:, :, : self.modes1, : self.modes2, : self.modes3] = self.compl_mul3d(
x_ft[:, :, : self.modes1, : self.modes2, : self.modes3], self.weights1
)
out_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3] = self.compl_mul3d(
x_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3], self.weights2
)
out_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3] = self.compl_mul3d(
x_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3], self.weights3
)
out_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3] = self.compl_mul3d(
x_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3], self.weights4
)
# Return to physical space
x = torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1)))
return x
def reset_parameters(self):
"""Reset spectral weights with distribution scale*U(0,1)"""
self.weights1.data = self.scale * torch.rand(self.weights1.data.shape)
self.weights2.data = self.scale * torch.rand(self.weights2.data.shape)
self.weights3.data = self.scale * torch.rand(self.weights3.data.shape)
self.weights4.data = self.scale * torch.rand(self.weights4.data.shape)
class SpectralConv4d(nn.Module):
"""4D Fourier layer. It does FFT, linear transform, and Inverse FFT.
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
modes1 : int
Number of Fourier modes to multiply in first dimension, at most floor(N/2) + 1
modes2 : int
Number of Fourier modes to multiply in second dimension, at most floor(N/2) + 1
modes3 : int
Number of Fourier modes to multiply in third dimension, at most floor(N/2) + 1
"""
def __init__(
self,
in_channels: int,
out_channels: int,
modes1: int,
modes2: int,
modes3: int,
modes4: int,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
# Number of Fourier modes to multiply, at most floor(N/2) + 1
self.modes1 = modes1
self.modes2 = modes2
self.modes3 = modes3
self.modes4 = modes4
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
torch.empty(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
2,
)
)
self.weights2 = nn.Parameter(
torch.empty(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
2,
)
)
self.weights3 = nn.Parameter(
torch.empty(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
2,
)
)
self.weights4 = nn.Parameter(
torch.empty(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
2,
)
)
self.weights5 = nn.Parameter(
torch.empty(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
2,
)
)
self.weights6 = nn.Parameter(
torch.empty(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
2,
)
)
self.weights7 = nn.Parameter(
torch.empty(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
2,
)
)
self.weights8 = nn.Parameter(
torch.empty(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
self.modes4,
2,
)
)
self.reset_parameters()
def compl_mul4d(
self,
input: Tensor,
weights: Tensor,
) -> Tensor:
"""Complex multiplication
Parameters
----------
input : Tensor
Input tensor
weights : Tensor
Weights tensor
Returns
-------
Tensor
Product of complex multiplication
"""
# (batch, in_channel, x, y, z), (in_channel, out_channel, x, y, z) -> (batch, out_channel, x, y, z)
cweights = torch.view_as_complex(weights)
return torch.einsum("bixyzt,ioxyzt->boxyzt", input, cweights)
def forward(self, x: Tensor) -> Tensor:
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfftn(x, dim=[-4, -3, -2, -1])
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-4),
x.size(-3),
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
# print(f'mod: size x: {x_ft.size()}, out: {out_ft.size()}')
# print(f'mod: x_ft[weight4]: {x_ft[:, :, self.modes1 :, self.modes2 :, : -self.modes3, :self.modes4].size()} weight4: {self.weights4.size()}')
out_ft[
:, :, : self.modes1, : self.modes2, : self.modes3, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, : self.modes1, : self.modes2, : self.modes3, : self.modes4],
self.weights1,
)
out_ft[
:, :, -self.modes1 :, : self.modes2, : self.modes3, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3, : self.modes4],
self.weights2,
)
out_ft[
:, :, : self.modes1, -self.modes2 :, : self.modes3, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3, : self.modes4],
self.weights3,
)
out_ft[
:, :, : self.modes1, : self.modes2, -self.modes3 :, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, : self.modes1, : self.modes2, -self.modes3 :, : self.modes4],
self.weights4,
)
out_ft[
:, :, -self.modes1 :, -self.modes2 :, : self.modes3, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3, : self.modes4],
self.weights5,
)
out_ft[
:, :, -self.modes1 :, : self.modes2, -self.modes3 :, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, -self.modes1 :, : self.modes2, -self.modes3 :, : self.modes4],
self.weights6,
)
out_ft[
:, :, : self.modes1, -self.modes2 :, -self.modes3 :, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, : self.modes1, -self.modes2 :, -self.modes3 :, : self.modes4],
self.weights7,
)
out_ft[
:, :, -self.modes1 :, -self.modes2 :, -self.modes3 :, : self.modes4
] = self.compl_mul4d(
x_ft[:, :, -self.modes1 :, -self.modes2 :, -self.modes3 :, : self.modes4],
self.weights8,
)
# Return to physical space
x = torch.fft.irfftn(out_ft, s=(x.size(-4), x.size(-3), x.size(-2), x.size(-1)))
return x
def reset_parameters(self):
"""Reset spectral weights with distribution scale*U(0,1)"""
self.weights1.data = self.scale * torch.rand(self.weights1.data.shape)
self.weights2.data = self.scale * torch.rand(self.weights2.data.shape)
self.weights3.data = self.scale * torch.rand(self.weights3.data.shape)
self.weights4.data = self.scale * torch.rand(self.weights4.data.shape)
self.weights5.data = self.scale * torch.rand(self.weights5.data.shape)
self.weights6.data = self.scale * torch.rand(self.weights6.data.shape)
self.weights7.data = self.scale * torch.rand(self.weights7.data.shape)
self.weights8.data = self.scale * torch.rand(self.weights8.data.shape)
|
modulus-main
|
modulus/models/layers/spectral_layers.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .activations import Identity, Stan, SquarePlus, get_activation
from .weight_norm import WeightNormLinear
from .spectral_layers import (
SpectralConv1d,
SpectralConv2d,
SpectralConv3d,
SpectralConv4d,
)
from .fully_connected_layers import (
FCLayer,
Conv1dFCLayer,
Conv2dFCLayer,
Conv3dFCLayer,
ConvNdFCLayer,
ConvNdKernel1Layer,
)
|
modulus-main
|
modulus/models/layers/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import torch
from torch.autograd import Function
from nvfuser._C import Fusion, FusionDefinition, DataType
_torch_dtype_to_nvfuser = {
torch.double: DataType.Double,
torch.float: DataType.Float,
torch.half: DataType.Half,
torch.int: DataType.Int,
torch.int32: DataType.Int32,
torch.bool: DataType.Bool,
torch.bfloat16: DataType.BFloat16,
torch.cfloat: DataType.ComplexFloat,
torch.cdouble: DataType.ComplexDouble,
}
@functools.lru_cache(maxsize=None)
def silu_backward_for(dtype: torch.dtype, dim: int): # pragma: no cover
"""
nvfuser frontend implmentation of SiLU backward as a fused kernel and with
activations recomputation
Parameters
----------
dtype : torch.dtype
Data type to use for the implementation
dim : int
Dimension of the input tensor
Returns
-------
fusion :
An nvfuser fused executor for SiLU backward
"""
try:
dtype = _torch_dtype_to_nvfuser[dtype]
except KeyError:
raise TypeError("Unsupported dtype")
fusion = Fusion()
with FusionDefinition(fusion) as fd:
x = fd.define_tensor(dim, dtype)
one = fd.define_constant(1.0)
# y = sigmoid(x)
y = fd.ops.sigmoid(x)
# z = sigmoid(x)
grad_input = fd.ops.mul(y, fd.ops.add(one, fd.ops.mul(x, fd.ops.sub(one, y))))
grad_input = fd.ops.cast(grad_input, dtype)
fd.add_output(grad_input)
return fusion
@functools.lru_cache(maxsize=None)
def silu_double_backward_for(dtype: torch.dtype, dim: int): # pragma: no cover
"""
nvfuser frontend implmentation of SiLU double backward as a fused kernel and with
activations recomputation
Parameters
----------
dtype : torch.dtype
Data type to use for the implementation
dim : int
Dimension of the input tensor
Returns
-------
fusion :
An nvfuser fused executor for SiLU backward
"""
try:
dtype = _torch_dtype_to_nvfuser[dtype]
except KeyError:
raise TypeError("Unsupported dtype")
fusion = Fusion()
with FusionDefinition(fusion) as fd:
x = fd.define_tensor(dim, dtype)
one = fd.define_constant(1.0)
# y = sigmoid(x)
y = fd.ops.sigmoid(x)
# dy = y * (1 - y)
dy = fd.ops.mul(y, fd.ops.sub(one, y))
# z = 1 + x * (1 - y)
z = fd.ops.add(one, fd.ops.mul(x, fd.ops.sub(one, y)))
# term1 = dy * z
term1 = fd.ops.mul(dy, z)
# term2 = y * ((1 - y) - x * dy)
term2 = fd.ops.mul(y, fd.ops.sub(fd.ops.sub(one, y), fd.ops.mul(x, dy)))
grad_input = fd.ops.add(term1, term2)
grad_input = fd.ops.cast(grad_input, dtype)
fd.add_output(grad_input)
return fusion
@functools.lru_cache(maxsize=None)
def silu_triple_backward_for(dtype: torch.dtype, dim: int): # pragma: no cover
"""
nvfuser frontend implmentation of SiLU triple backward as a fused kernel and with
activations recomputation
Parameters
----------
dtype : torch.dtype
Data type to use for the implementation
dim : int
Dimension of the input tensor
Returns
-------
fusion :
An nvfuser fused executor for SiLU backward
"""
try:
dtype = _torch_dtype_to_nvfuser[dtype]
except KeyError:
raise TypeError("Unsupported dtype")
fusion = Fusion()
with FusionDefinition(fusion) as fd:
x = fd.define_tensor(dim, dtype)
one = fd.define_constant(1.0)
two = fd.define_constant(2.0)
# y = sigmoid(x)
y = fd.ops.sigmoid(x)
# dy = y * (1 - y)
dy = fd.ops.mul(y, fd.ops.sub(one, y))
# ddy = (1 - 2y) * dy
ddy = fd.ops.mul(fd.ops.sub(one, fd.ops.mul(two, y)), dy)
# term1 = ddy * (2 + x - 2xy)
term1 = fd.ops.mul(
ddy, fd.ops.sub(fd.ops.add(two, x), fd.ops.mul(two, fd.ops.mul(x, y)))
)
# term2 = dy * (1 - 2 (y + x * dy))
term2 = fd.ops.mul(
dy, fd.ops.sub(one, fd.ops.mul(two, fd.ops.add(y, fd.ops.mul(x, dy))))
)
grad_input = fd.ops.add(term1, term2)
grad_input = fd.ops.cast(grad_input, dtype)
fd.add_output(grad_input)
return fusion
class FusedSiLU(Function):
"""
Fused SiLU activation implementation using nvfuser for a custom fused backward
with activation recomputation
"""
@staticmethod
def forward(ctx, x):
"""
Forward method for SiLU activation
Parameters
----------
ctx :
torch context
x :
input tensor
Returns
-------
output activation
"""
ctx.save_for_backward(x)
return torch.nn.functional.silu(x)
@staticmethod
def backward(ctx, grad_output): # pragma: no cover
"""
Backward method for SiLU activation
Parameters
----------
ctx :
torch context
grad_output :
output gradients
Returns
-------
input gradients
"""
(x,) = ctx.saved_tensors
return FusedSiLU_deriv_1.apply(x) * grad_output
class FusedSiLU_deriv_1(Function):
"""
Fused SiLU first derivative implementation using nvfuser
with activation recomputation
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
silu_backward = silu_backward_for(x.dtype, x.dim())
return silu_backward.execute([x])[0]
@staticmethod
def backward(ctx, grad_output): # pragma: no cover
(x,) = ctx.saved_tensors
return FusedSiLU_deriv_2.apply(x) * grad_output
class FusedSiLU_deriv_2(Function):
"""
Fused SiLU second derivative implementation using nvfuser
with activation recomputation
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
silu_double_backward = silu_double_backward_for(x.dtype, x.dim())
return silu_double_backward.execute([x])[0]
@staticmethod
def backward(ctx, grad_output): # pragma: no cover
(x,) = ctx.saved_tensors
return FusedSiLU_deriv_3.apply(x) * grad_output
class FusedSiLU_deriv_3(Function):
"""
Fused SiLU third derivative implementation using nvfuser
with activation recomputation
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
silu_triple_backward = silu_triple_backward_for(x.dtype, x.dim())
return silu_triple_backward.execute([x])[0]
@staticmethod
def backward(ctx, grad_output): # pragma: no cover
(x,) = ctx.saved_tensors
y = torch.sigmoid(x)
dy = y * (1 - y)
ddy = (1 - 2 * y) * dy
dddy = (1 - 2 * y) * ddy - 2 * dy * dy
z = 1 - 2 * (y + x * dy)
term1 = dddy * (2 + x - 2 * x * y)
term2 = 2 * ddy * z
term3 = dy * (-2) * (2 * dy + x * ddy)
return (term1 + term2 + term3) * grad_output
|
modulus-main
|
modulus/models/layers/fused_silu.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import modulus
Tensor = torch.Tensor
class WeightNormLinear(nn.Module):
"""Weight Norm Layer for 1D Tensors
Parameters
----------
in_features : int
Size of the input features
out_features : int
Size of the output features
bias : bool, optional
Apply the bias to the output of linear layer, by default True
Example
-------
>>> wnorm = modulus.models.layers.WeightNormLinear(2,4)
>>> input = torch.rand(2,2)
>>> output = wnorm(input)
>>> output.size()
torch.Size([2, 4])
"""
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.empty((out_features, in_features)))
self.weight_g = nn.Parameter(torch.empty((out_features, 1)))
if bias:
self.bias = nn.Parameter(torch.empty(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self) -> None:
"""Reset normalization weights"""
nn.init.xavier_uniform_(self.weight)
nn.init.constant_(self.weight_g, 1.0)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def forward(self, input: Tensor) -> Tensor:
norm = self.weight.norm(dim=1, p=2, keepdim=True)
weight = self.weight_g * self.weight / norm
return F.linear(input, weight, self.bias)
def extra_repr(self) -> str:
"""Print information about weight norm"""
return "in_features={}, out_features={}, bias={}".format(
self.in_features, self.out_features, self.bias is not None
)
|
modulus-main
|
modulus/models/layers/weight_norm.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from typing import Optional
from typing import Union
import torch.nn as nn
from torch import Tensor
from .activations import Identity
from .weight_norm import WeightNormLinear
class FCLayer(nn.Module):
"""Densely connected NN layer
Parameters
----------
in_features : int
Size of input features
out_features : int
Size of output features
activation_fn : Union[nn.Module, None], optional
Activation function to use. Can be None for no activation, by default None
weight_norm : bool, optional
Applies weight normalization to the layer, by default False
activation_par : Union[nn.Parameter, None], optional
Additional parameters for the activation function, by default None
"""
def __init__(
self,
in_features: int,
out_features: int,
activation_fn: Union[nn.Module, None] = None,
weight_norm: bool = False,
activation_par: Union[nn.Parameter, None] = None,
) -> None:
super().__init__()
if activation_fn is None:
self.activation_fn = Identity()
else:
self.activation_fn = activation_fn
self.weight_norm = weight_norm
self.activation_par = activation_par
if weight_norm:
self.linear = WeightNormLinear(in_features, out_features, bias=True)
else:
self.linear = nn.Linear(in_features, out_features, bias=True)
self.reset_parameters()
def reset_parameters(self) -> None:
"""Reset fully connected weights"""
nn.init.constant_(self.linear.bias, 0)
nn.init.xavier_uniform_(self.linear.weight)
if self.weight_norm:
nn.init.constant_(self.linear.weight_g, 1.0)
def forward(self, x: Tensor) -> Tensor:
x = self.linear(x)
if self.activation_par is None:
x = self.activation_fn(x)
else:
x = self.activation_fn(self.activation_par * x)
return x
class ConvFCLayer(nn.Module):
"""Base class for 1x1 Conv layer for image channels
Parameters
----------
activation_fn : Union[nn.Module, None], optional
Activation function to use. Can be None for no activation, by default None
activation_par : Union[nn.Parameter, None], optional
Additional parameters for the activation function, by default None
"""
def __init__(
self,
activation_fn: Union[nn.Module, None] = None,
activation_par: Union[nn.Parameter, None] = None,
) -> None:
super().__init__()
if activation_fn is None:
self.activation_fn = Identity()
else:
self.activation_fn = activation_fn
self.activation_par = activation_par
def apply_activation(self, x: Tensor) -> Tensor:
"""Applied activation / learnable activations
Parameters
----------
x : Tensor
Input tensor
"""
if self.activation_par is None:
x = self.activation_fn(x)
else:
x = self.activation_fn(self.activation_par * x)
return x
class Conv1dFCLayer(ConvFCLayer):
"""Channel-wise FC like layer with 1d convolutions
Parameters
----------
in_features : int
Size of input features
out_features : int
Size of output features
activation_fn : Union[nn.Module, None], optional
Activation function to use. Can be None for no activation, by default None
activation_par : Union[nn.Parameter, None], optional
Additional parameters for the activation function, by default None
"""
def __init__(
self,
in_features: int,
out_features: int,
activation_fn: Union[nn.Module, None] = None,
activation_par: Union[nn.Parameter, None] = None,
) -> None:
super().__init__(activation_fn, activation_par)
self.in_channels = in_features
self.out_channels = out_features
self.conv = nn.Conv1d(in_features, out_features, kernel_size=1, bias=True)
self.reset_parameters()
def reset_parameters(self) -> None:
"""Reset layer weights"""
nn.init.constant_(self.conv.bias, 0)
nn.init.xavier_uniform_(self.conv.weight)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.apply_activation(x)
return x
class Conv2dFCLayer(ConvFCLayer):
"""Channel-wise FC like layer with 2d convolutions
Parameters
----------
in_features : int
Size of input features
out_features : int
Size of output features
activation_fn : Union[nn.Module, None], optional
Activation function to use. Can be None for no activation, by default None
activation_par : Union[nn.Parameter, None], optional
Additional parameters for the activation function, by default None
"""
def __init__(
self,
in_channels: int,
out_channels: int,
activation_fn: Union[nn.Module, None] = None,
activation_par: Union[nn.Parameter, None] = None,
) -> None:
super().__init__(activation_fn, activation_par)
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=True)
self.reset_parameters()
def reset_parameters(self) -> None:
"""Reset layer weights"""
nn.init.constant_(self.conv.bias, 0)
self.conv.bias.requires_grad = False
nn.init.xavier_uniform_(self.conv.weight)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.apply_activation(x)
return x
class Conv3dFCLayer(ConvFCLayer):
"""Channel-wise FC like layer with 3d convolutions
Parameters
----------
in_features : int
Size of input features
out_features : int
Size of output features
activation_fn : Union[nn.Module, None], optional
Activation function to use. Can be None for no activation, by default None
activation_par : Union[nn.Parameter, None], optional
Additional parameters for the activation function, by default None
"""
def __init__(
self,
in_channels: int,
out_channels: int,
activation_fn: Union[nn.Module, None] = None,
activation_par: Union[nn.Parameter, None] = None,
) -> None:
super().__init__(activation_fn, activation_par)
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=1, bias=True)
self.reset_parameters()
def reset_parameters(self) -> None:
"""Reset layer weights"""
nn.init.constant_(self.conv.bias, 0)
nn.init.xavier_uniform_(self.conv.weight)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.apply_activation(x)
return x
class ConvNdFCLayer(ConvFCLayer):
"""Channel-wise FC like layer with convolutions of arbitrary dimensions
CAUTION: if n_dims <= 3, use specific version for that n_dims instead
Parameters
----------
in_features : int
Size of input features
out_features : int
Size of output features
activation_fn : Union[nn.Module, None], optional
Activation function to use. Can be None for no activation, by default None
activation_par : Union[nn.Parameter, None], optional
Additional parameters for the activation function, by default None
"""
def __init__(
self,
in_channels: int,
out_channels: int,
activation_fn: Union[nn.Module, None] = None,
activation_par: Union[nn.Parameter, None] = None,
) -> None:
super().__init__(activation_fn, activation_par)
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = ConvNdKernel1Layer(in_channels, out_channels)
self.reset_parameters()
def reset_parameters(self):
self.conv.apply(self.initialise_parameters) # recursively apply initialisations
def initialise_parameters(self, model):
"""Reset layer weights"""
if hasattr(model, "bias"):
nn.init.constant_(model.bias, 0)
if hasattr(model, "weight"):
nn.init.xavier_uniform_(model.weight)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.apply_activation(x)
return x
class ConvNdKernel1Layer(nn.Module):
"""Channel-wise FC like layer for convolutions of arbitrary dimensions
CAUTION: if n_dims <= 3, use specific version for that n_dims instead
Parameters
----------
in_features : int
Size of input features
out_features : int
Size of output features
"""
def __init__(
self,
in_channels: int,
out_channels: int,
) -> None:
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=True)
def forward(self, x: Tensor) -> Tensor:
dims = list(x.size())
dims[1] = self.out_channels
x = self.conv(x.view(dims[0], self.in_channels, -1)).view(dims)
return x
|
modulus-main
|
modulus/models/layers/fully_connected_layers.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
from torch.autograd import Function
import torch.fft
import torch.onnx
# Note 1: for DFT operators, the less verbose way of registering an operator is via
# `register_custom_op_symbolic`. However, it does not currently work due to
# torch.fft.rfft* functions returning Complex type which is not yet supported in ONNX.
# Note 2:
# - current ONNX Contrib implementation does not support configurable normalization, so
# "normalized" must be 0, the normalization is done outside of Contrib ops.
# See also comments in `_scale_output_backward` function for more details.
# - "onesided" is not configurable either - must be set to 1.
# - Contrib implementation requires DFT dimensions to be the last ones,
# otherwise axes permutation is required.
# See:
# https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/contrib_ops/cuda/math/fft_ops.h#L19
def rfft(
input: Tensor,
n: Optional[int] = None,
dim: int = -1,
norm: Optional[str] = None,
) -> Tensor:
"""ONNX compatable method to compute the 1d Fourier transform of real-valued input.
Parameters
----------
input : Tensor
Real input tensor
n : Optional[int], optional
Signal strength, by default None
dim : int, optional
Dimension along which to take the real FFT, by default -1
norm : Optional[str], optional
Normalization mode with options "forward", "backward and "ortho". When set to None,
normalization will default to backward (no normalization), by default None
Note
----
The function is equivalent to `torch.fft.rfft` when not running in ONNX export mode
"""
if not torch.onnx.is_in_onnx_export():
return torch.fft.rfft(input, n=n, dim=dim, norm=norm)
assert isinstance(dim, int)
return _rfft_onnx(input, (n,), (dim,), norm)
def rfft2(
input: Tensor,
s: Optional[Tuple[int]] = None,
dim: Tuple[int] = (-2, -1),
norm: Optional[str] = None,
) -> Tensor:
"""ONNX compatable method to compute the 2d Fourier transform of real-valued input.
Parameters
----------
input : Tensor
Real input tensor
s : Optional[Tuple[int]], optional
Signal size in the transformed dimensions, by default None
dim : Tuple[int], optional
Dimensions along which to take the real 2D FFT, by default (-2, -1)
norm : Optional[str], optional
Normalization mode with options "forward", "backward" and "ortho". When set to None,
normalization will default to backward (normalize by 1/n), by default None
Note
----
The function is equivalent to `torch.fft.rfft2` when not running in ONNX export mode
"""
if not torch.onnx.is_in_onnx_export():
return torch.fft.rfft2(input, s=s, dim=dim, norm=norm)
assert isinstance(dim, tuple) and len(dim) == 2
return _rfft_onnx(input, s, dim, norm)
def irfft(
input: Tensor,
n: Optional[int] = None,
dim: int = -1,
norm: Optional[str] = None,
) -> Tensor:
"""ONNX compatable method to compute the inverse of `rfft`.
Parameters
----------
input : Tensor
Real input tensor
n : Optional[int], optional
Signal strength, by default None
dim : int, optional
Dimension along which to take the real IFFT, by default -1
norm : Optional[str], optional
Normalization mode with options "forward", "backward" and "ortho". When set to None,
normalization will default to backward (no normalization), by default None
Note
----
The function is equivalent to `torch.fft.irfft` when not running in ONNX export mode
"""
if not torch.onnx.is_in_onnx_export():
return torch.fft.irfft(input, n=n, dim=dim, norm=norm)
assert isinstance(dim, int)
return _irfft_onnx(input, (n,), (dim,), norm)
def irfft2(
input: Tensor,
s: Optional[Tuple[int]] = None,
dim: Tuple[int] = (-2, -1),
norm: Optional[str] = None,
) -> Tensor:
"""ONNX compatable method to compute the inverse of `rfft2`.
Parameters
----------
input : Tensor
Real input tensor
s : Optional[Tuple[int]], optional
Signal size in the transformed dimensions, by default None
dim : Tuple[int], optional
Dimensions along which to take the real 2D IFFT, by default (-2, -1)
norm : Optional[str], optional
Normalization mode with options "forward", "backward" and "ortho". When set to None,
normalization will default to backward (normalize by 1/n), by default None
Note
----
The function is equivalent to `torch.fft.irfft2` when not running in ONNX export mode
"""
if not torch.onnx.is_in_onnx_export():
return torch.fft.irfft2(input, s=s, dim=dim, norm=norm)
assert isinstance(dim, tuple) and len(dim) == 2
return _irfft_onnx(input, s, dim, norm)
def view_as_complex(input: Tensor) -> Tensor:
"""ONNX compatable method to view input as complex tensor
Parameters
----------
input : Tensor
The input Tensor
Note
----
The function is equivalent to `torch.view_as_complex` when not running in ONNX export mode
Raises
------
AssertionError
If input tensor shape is not [...,2] during ONNX runtime where the last dimension
denotes the real / imaginary tensors
"""
if not torch.onnx.is_in_onnx_export():
return torch.view_as_complex(input)
# Just return the input unchanged - during ONNX export
# there will be no complex type.
assert input.size(-1) == 2
return input
def real(input: Tensor) -> Tensor:
"""ONNX compatable method to view input as real tensor
Parameters
----------
input : Tensor
The input Tensor
Note
----
The function is equivalent to `input.real` when not running in ONNX export mode
Raises
------
AssertionError
If input tensor shape is not [...,2] during ONNX runtime where the last dimension
denotes the real / imaginary tensors
"""
if not torch.onnx.is_in_onnx_export():
return input.real
# There is no complex type during ONNX export, so assuming
# complex numbers are represented as if after `view_as_real`.
assert input.size(-1) == 2
return input[..., 0]
def imag(input: Tensor) -> Tensor:
"""ONNX compatable method to view input as imaginary tensor
Parameters
----------
input : Tensor
The input Tensor
Note
----
The function is equivalent to `input.imag` when not running in ONNX export mode
Raises
------
AssertionError
If input tensor shape is not [...,2] during ONNX runtime where the last dimension
denotes the real / imaginary tensors
"""
if not torch.onnx.is_in_onnx_export():
return input.imag
# There is no complex type during ONNX export, so assuming
# complex numbers are represented as if after `view_as_real`.
assert input.size(-1) == 2
return input[..., 1]
def _rfft_onnx(
input: Tensor, s: Optional[Tuple[Optional[int]]], dim: Tuple[int], norm: str
) -> Tensor:
if s is not None:
_check_padding_rfft(s, dim, input.size())
ndim = len(dim)
assert ndim in [1, 2], ndim
perm = not _is_last_dims(dim, input.ndim)
if perm:
perm_in, perm_out = _create_axes_perm(input.ndim, dim)
# Add a dimension to account for complex output.
perm_out.append(len(perm_out))
# Transpose -> RFFT -> Transpose (inverse).
input = input.permute(perm_in)
rfft_func = OnnxRfft if ndim == 1 else OnnxRfft2
output = rfft_func.apply(input)
output = _scale_output_forward(output, norm, input.size(), ndim)
if perm:
output = output.permute(perm_out)
return output
def _irfft_onnx(
input: Tensor, s: Optional[Tuple[Optional[int]]], dim: Tuple[int], norm: str
) -> Tensor:
if s is not None:
_check_padding_irfft(s, dim, input.size())
ndim = len(dim)
assert ndim in [1, 2], ndim
# Whether to permute axes when DFT axis is not the last.
perm = not _is_last_dims(dim, input.ndim)
if perm:
# Do not include last dimension (input is complex).
perm_in, perm_out = _create_axes_perm(input.ndim - 1, dim)
# Add a dimension to account for complex input.
perm_in.append(len(perm_in))
# Transpose -> IRFFT -> Transpose (inverse).
input = input.permute(perm_in)
irfft_func = OnnxIrfft if ndim == 1 else OnnxIrfft2
output = irfft_func.apply(input)
output = _scale_output_backward(output, norm, input.size(), ndim)
if perm:
output = output.permute(perm_out)
return output
def _contrib_rfft(g: torch.Graph, input: torch.Value, ndim: int) -> torch.Value:
assert ndim in [1, 2], ndim
# See https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.Rfft
output = g.op(
"com.microsoft::Rfft",
input,
normalized_i=0,
onesided_i=1,
signal_ndim_i=ndim,
)
return output
def _contrib_irfft(g: torch.Graph, input: torch.Value, ndim: int) -> torch.Value:
assert ndim in [1, 2], ndim
# See https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#com.microsoft.Irfft
output = g.op(
"com.microsoft::Irfft",
input,
normalized_i=0,
onesided_i=1,
signal_ndim_i=ndim,
)
return output
def _is_last_dims(dim: Tuple[int], inp_ndim: int) -> bool:
ndim = len(dim)
for i, idim in enumerate(dim):
# This takes care of both positive and negative axis indices.
if idim % inp_ndim != inp_ndim - ndim + i:
return False
return True
def _check_padding_rfft(
sizes: Tuple[Optional[int]], dim: Tuple[int], inp_sizes: Tuple[int]
) -> None:
assert len(sizes) == len(dim), f"{sizes}, {dim}"
for i, s in enumerate(sizes):
if s is None or s < 0:
continue
# Current Contrib RFFT does not support pad/trim yet.
if s != inp_sizes[dim[i]]:
raise RuntimeError(
f"Padding/trimming is not yet supported, "
f"got sizes {sizes}, DFT dims {dim}, "
f"input dims {inp_sizes}."
)
def _check_padding_irfft(
sizes: Tuple[Optional[int]], dim: Tuple[int], inp_sizes: Tuple[int]
) -> None:
assert len(sizes) == len(dim), f"{sizes}, {dim}"
# All but last dims must be equal to input dims.
for i, s in enumerate(sizes[:-1]):
if s is None or s < 0:
continue
# Current Contrib RFFT does not support pad/trim yet.
if s != inp_sizes[dim[i]]:
raise RuntimeError(
f"Padding/trimming is not yet supported, "
f"got sizes {sizes}, DFT dims {dim}, "
f"input dims {inp_sizes}."
)
# Check last dim.
s = sizes[-1]
if s is not None and s > 0:
expected_size = 2 * (inp_sizes[dim[-1]] - 1)
if s != expected_size:
raise RuntimeError(
f"Padding/trimming is not yet supported, got sizes {sizes}"
f", DFT dims {dim}, input dims {inp_sizes}"
f", expected last size {expected_size}."
)
def _create_axes_perm(ndim: int, dims: Tuple[int]) -> Tuple[List[int], List[int]]:
"""Creates permuted axes indices for RFFT/IRFFT operators."""
perm_in = list(range(ndim))
perm_out = list(perm_in)
# Move indices to the right to make 'dims' as innermost dimensions.
for i in range(-1, -(len(dims) + 1), -1):
perm_in[dims[i]], perm_in[i] = perm_in[i], perm_in[dims[i]]
# Move indices to the left to restore original shape.
for i in range(-len(dims), 0):
perm_out[dims[i]], perm_out[i] = perm_out[i], perm_out[dims[i]]
return perm_in, perm_out
def _scale_output_forward(
output: Tensor, norm: str, sizes: torch.Size, ndim: int
) -> Tensor:
"""Scales the RFFT output according to norm parameter."""
norm = "backward" if norm is None else norm
assert norm in ["forward", "backward", "ortho"], norm
# No normalization for "backward" in RFFT ops.
if norm in ["forward", "ortho"]:
# Assuming DFT dimensions are the last. This is required by the current Contrib ops,
# so the axes permutation of the input is done accordingly.
dft_size = math.prod(sizes[-ndim:]).float()
denom = torch.sqrt(dft_size) if norm == "ortho" else dft_size
output = output / denom
return output
def _scale_output_backward(
output: Tensor, norm: str, sizes: torch.Size, ndim: int
) -> Tensor:
"""Scales the IRFFT output according to norm parameter."""
norm = "backward" if norm is None else norm
assert norm in ["forward", "backward", "ortho"], norm
# Things get interesting here: Contrib IRFFT op uses cuFFT cufftXtExec
# followed by a custom CUDA kernel (`_Normalize`) which always performs
# normalization (division by N) which means "norm" is essentially
# always "backward" here. So we need to cancel this normalization
# when norm is "forward" or "ortho".
if norm in ["forward", "ortho"]:
# Last dimension is complex numbers representation.
# Second-to-last dim corresponds to last dim in RFFT transform.
# This is required by the current Contrib ops,
# so the axes permutation of the input is done previously.
assert len(sizes) >= ndim + 1
dft_size = math.prod(sizes[-(ndim + 1) : -2])
dft_size *= 2 * (sizes[-2] - 1)
dft_size = dft_size.float()
# Since cuFFT scales by 1/dft_size, replace this scale with appropriate one.
scale = dft_size if norm == "forward" else torch.sqrt(dft_size)
output = scale * output
return output
class OnnxRfft(Function):
"""Auto-grad function to mimic rfft for ONNX exporting
Note
----
Should only be called during an ONNX export
"""
@staticmethod
def forward(ctx, input: Tensor) -> Tensor:
assert torch.onnx.is_in_onnx_export(), "Must be called only during ONNX export."
# We need to mimic the behavior of Contrib RFFT which assumes
# DFT of last dim and no normalization.
y = torch.fft.rfft(input, dim=-1, norm="backward")
return torch.view_as_real(y)
@staticmethod
def symbolic(g: torch.Graph, input: torch.Value) -> torch.Value:
"""Symbolic representation for onnx graph"""
return _contrib_rfft(g, input, ndim=1)
class OnnxRfft2(Function):
"""Auto-grad function to mimic rfft2 for ONNX exporting
Note
----
Should only be called during an ONNX export
"""
@staticmethod
def forward(ctx, input: Tensor) -> Tensor:
assert torch.onnx.is_in_onnx_export(), "Must be called only during ONNX export."
# We need to mimic the behavior of Contrib RFFT which assumes
# DFT of last dims and no normalization.
y = torch.fft.rfft2(input, dim=(-2, -1), norm="backward")
return torch.view_as_real(y)
@staticmethod
def symbolic(g: torch.Graph, input: torch.Value) -> torch.Value:
"""Symbolic representation for onnx graph"""
return _contrib_rfft(g, input, ndim=2)
class OnnxIrfft(Function):
"""Auto-grad function to mimic irfft for ONNX exporting
Note
----
Should only be called during an ONNX export
"""
@staticmethod
def forward(ctx, input: Tensor) -> Tensor:
assert torch.onnx.is_in_onnx_export(), "Must be called only during ONNX export."
# We need to mimic the behavior of Contrib IRFFT which assumes
# DFT of last dim and 1/n normalization.
return torch.fft.irfft(torch.view_as_complex(input), dim=-1, norm="backward")
@staticmethod
def symbolic(g: torch.Graph, input: torch.Value) -> torch.Value:
"""Symbolic representation for onnx graph"""
return _contrib_irfft(g, input, ndim=1)
class OnnxIrfft2(Function):
"""Auto-grad function to mimic irfft2 for ONNX exporting.
Note
----
Should only be called during an ONNX export
"""
@staticmethod
def forward(ctx, input: Tensor) -> Tensor:
assert torch.onnx.is_in_onnx_export(), "Must be called only during ONNX export."
# We need to mimic the behavior of Contrib IRFFT which assumes
# DFT of last dims and 1/n normalization.
return torch.fft.irfft2(
torch.view_as_complex(input), dim=(-2, -1), norm="backward"
)
@staticmethod
def symbolic(g: torch.Graph, input: torch.Value) -> torch.Value:
"""Symbolic representation for onnx graph"""
return _contrib_irfft(g, input, ndim=2)
|
modulus-main
|
modulus/models/layers/fft.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
class ComplexReLU(nn.Module):
"""
Complex-valued variants of the ReLU activation function
"""
def __init__(
self, negative_slope=0.0, mode="real", bias_shape=None, scale=1.0
): # pragma: no cover
super(ComplexReLU, self).__init__()
# store parameters
self.mode = mode
if self.mode in ["modulus", "halfplane"]:
if bias_shape is not None:
self.bias = nn.Parameter(
scale * torch.ones(bias_shape, dtype=torch.float32)
)
else:
self.bias = nn.Parameter(scale * torch.ones((1), dtype=torch.float32))
else:
self.bias = 0
self.negative_slope = negative_slope
self.act = nn.LeakyReLU(negative_slope=negative_slope)
def forward(self, z: torch.Tensor) -> torch.Tensor: # pragma: no cover
if self.mode == "cartesian":
zr = torch.view_as_real(z)
za = self.act(zr)
out = torch.view_as_complex(za)
elif self.mode == "modulus":
zabs = torch.sqrt(torch.square(z.real) + torch.square(z.imag))
out = torch.where(zabs + self.bias > 0, (zabs + self.bias) * z / zabs, 0.0)
# out = self.act(zabs - self.bias) * torch.exp(1.j * z.angle())
elif self.mode == "halfplane":
# bias is an angle parameter in this case
modified_angle = torch.angle(z) - self.bias
condition = torch.logical_and(
(0.0 <= modified_angle), (modified_angle < torch.pi / 2.0)
)
out = torch.where(condition, z, self.negative_slope * z)
elif self.mode == "real":
zr = torch.view_as_real(z)
outr = zr.clone()
outr[..., 0] = self.act(zr[..., 0])
out = torch.view_as_complex(outr)
else:
raise NotImplementedError
return out
class ComplexActivation(nn.Module):
"""
A module implementing complex-valued activation functions.
The module supports different modes of operation, depending on how
the complex numbers are treated for the activation function:
- "cartesian": the activation function is applied separately to the
real and imaginary parts of the complex input.
- "modulus": the activation function is applied to the modulus of
the complex input, after adding a learnable bias.
- any other mode: the complex input is returned as-is (identity operation).
"""
def __init__(
self, activation, mode="cartesian", bias_shape=None
): # pragma: no cover
super(ComplexActivation, self).__init__()
# store parameters
self.mode = mode
if self.mode == "modulus":
if bias_shape is not None:
self.bias = nn.Parameter(torch.zeros(bias_shape, dtype=torch.float32))
else:
self.bias = nn.Parameter(torch.zeros((1), dtype=torch.float32))
else:
bias = torch.zeros((1), dtype=torch.float32)
self.register_buffer("bias", bias)
# real valued activation
self.act = activation
def forward(self, z: torch.Tensor) -> torch.Tensor: # pragma: no cover
if self.mode == "cartesian":
zr = torch.view_as_real(z)
za = self.act(zr)
out = torch.view_as_complex(za)
elif self.mode == "modulus":
zabs = torch.sqrt(torch.square(z.real) + torch.square(z.imag))
out = self.act(zabs + self.bias) * torch.exp(1.0j * z.angle())
else:
# identity
out = z
return out
|
modulus-main
|
modulus/models/sfno/activations.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import tensorly as tl
tl.set_backend("pytorch")
from functools import partial
from modulus.models.sfno.contractions import (
_contract_diagonal,
_contract_dhconv,
_contract_sep_diagonal,
_contract_sep_dhconv,
_contract_diagonal_real,
_contract_dhconv_real,
_contract_sep_diagonal_real,
_contract_sep_dhconv_real,
)
from tltorch.factorized_tensors.core import FactorizedTensor
einsum_symbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _contract_dense(
x, weight, separable=False, operator_type="diagonal"
): # pragma: no cover
order = tl.ndim(x)
# batch-size, in_channels, x, y...
x_syms = list(einsum_symbols[:order])
# in_channels, out_channels, x, y...
weight_syms = list(x_syms[1:]) # no batch-size
# batch-size, out_channels, x, y...
if separable:
out_syms = [x_syms[0]] + list(weight_syms)
else:
weight_syms.insert(1, einsum_symbols[order]) # outputs
out_syms = list(weight_syms)
out_syms[0] = x_syms[0]
if operator_type == "diagonal":
pass
elif operator_type == "block-diagonal":
weight_syms.insert(-1, einsum_symbols[order + 1])
out_syms[-1] = weight_syms[-2]
elif operator_type == "dhconv":
weight_syms.pop()
else:
raise ValueError(f"Unkonw operator type {operator_type}")
eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms)
if not torch.is_tensor(weight):
weight = weight.to_tensor()
return tl.einsum(eq, x, weight)
def _contract_cp(
x, cp_weight, separable=False, operator_type="diagonal"
): # pragma: no cover
order = tl.ndim(x)
x_syms = str(einsum_symbols[:order])
rank_sym = einsum_symbols[order]
out_sym = einsum_symbols[order + 1]
out_syms = list(x_syms)
if separable:
factor_syms = [einsum_symbols[1] + rank_sym] # in only
else:
out_syms[1] = out_sym
factor_syms = [einsum_symbols[1] + rank_sym, out_sym + rank_sym] # in, out
factor_syms += [xs + rank_sym for xs in x_syms[2:]] # x, y, ...
if operator_type == "diagonal":
pass
elif operator_type == "block-diagonal":
out_syms[-1] = einsum_symbols[order + 2]
factor_syms += [out_syms[-1] + rank_sym]
elif operator_type == "dhconv":
factor_syms.pop()
else:
raise ValueError(f"Unkonw operator type {operator_type}")
eq = (
x_syms + "," + rank_sym + "," + ",".join(factor_syms) + "->" + "".join(out_syms)
)
return tl.einsum(eq, x, cp_weight.weights, *cp_weight.factors)
def _contract_tucker(
x, tucker_weight, separable=False, operator_type="diagonal"
): # pragma: no cover
order = tl.ndim(x)
x_syms = str(einsum_symbols[:order])
out_sym = einsum_symbols[order]
out_syms = list(x_syms)
if separable:
core_syms = einsum_symbols[order + 1 : 2 * order]
# factor_syms = [einsum_symbols[1]+core_syms[0]] #in only
factor_syms = [xs + rs for (xs, rs) in zip(x_syms[1:], core_syms)] # x, y, ...
else:
core_syms = einsum_symbols[order + 1 : 2 * order + 1]
out_syms[1] = out_sym
factor_syms = [
einsum_symbols[1] + core_syms[0],
out_sym + core_syms[1],
] # out, in
factor_syms += [
xs + rs for (xs, rs) in zip(x_syms[2:], core_syms[2:])
] # x, y, ...
if operator_type == "diagonal":
pass
elif operator_type == "block-diagonal":
raise NotImplementedError(
f"Operator type {operator_type} not implemented for Tucker"
)
else:
raise ValueError(f"Unkonw operator type {operator_type}")
eq = (
x_syms
+ ","
+ core_syms
+ ","
+ ",".join(factor_syms)
+ "->"
+ "".join(out_syms)
)
return tl.einsum(eq, x, tucker_weight.core, *tucker_weight.factors)
def _contract_tt(
x, tt_weight, separable=False, operator_type="diagonal"
): # pragma: no cover
order = tl.ndim(x)
x_syms = list(einsum_symbols[:order])
weight_syms = list(x_syms[1:]) # no batch-size
if not separable:
weight_syms.insert(1, einsum_symbols[order]) # outputs
out_syms = list(weight_syms)
out_syms[0] = x_syms[0]
else:
out_syms = list(x_syms)
if operator_type == "diagonal":
pass
elif operator_type == "block-diagonal":
weight_syms.insert(-1, einsum_symbols[order + 1])
out_syms[-1] = weight_syms[-2]
elif operator_type == "dhconv":
weight_syms.pop()
else:
raise ValueError(f"Unkonw operator type {operator_type}")
rank_syms = list(einsum_symbols[order + 2 :])
tt_syms = []
for i, s in enumerate(weight_syms):
tt_syms.append([rank_syms[i], s, rank_syms[i + 1]])
eq = (
"".join(x_syms)
+ ","
+ ",".join("".join(f) for f in tt_syms)
+ "->"
+ "".join(out_syms)
)
return tl.einsum(eq, x, *tt_weight.factors)
# jitted PyTorch contractions:
def _contract_dense_pytorch(
x, weight, separable=False, operator_type="diagonal", complex=True
): # pragma: no cover
# to cheat the fused optimizers convert to real here
x = torch.view_as_real(x)
if separable:
if operator_type == "diagonal":
if complex:
x = _contract_sep_diagonal(x, weight)
else:
x = _contract_sep_diagonal_real(x, weight)
elif operator_type == "dhconv":
if complex:
x = _contract_sep_dhconv(x, weight)
else:
x = _contract_sep_dhconv_real(x, weight)
else:
raise ValueError(f"Unkonw operator type {operator_type}")
else:
if operator_type == "diagonal":
if complex:
x = _contract_diagonal(x, weight)
else:
x = _contract_diagonal_real(x, weight)
elif operator_type == "dhconv":
if complex:
x = _contract_dhconv(x, weight)
else:
x = _contract_dhconv_real(x, weight)
else:
raise ValueError(f"Unkonw operator type {operator_type}")
# to cheat the fused optimizers convert to real here
x = torch.view_as_complex(x)
return x
def get_contract_fun(
weight,
implementation="reconstructed",
separable=False,
operator_type="diagonal",
complex=True,
): # pragma: no cover
"""Generic ND implementation of Fourier Spectral Conv contraction
Parameters
----------
weight : tensorly-torch's FactorizedTensor
implementation : {'reconstructed', 'factorized'}, default is 'reconstructed'
whether to reconstruct the weight and do a forward pass (reconstructed)
or contract directly the factors of the factorized weight with the input
(factorized)
Returns
-------
function : (x, weight) -> x * weight in Fourier space
"""
if implementation == "reconstructed":
return _contract_dense
elif implementation == "factorized":
if torch.is_tensor(weight):
handle = partial(
_contract_dense_pytorch,
separable=separable,
complex=complex,
operator_type=operator_type,
)
return handle
elif isinstance(weight, FactorizedTensor):
if weight.name.lower() == "complexdense" or weight.name.lower() == "dense":
return _contract_dense
elif weight.name.lower() == "complextucker":
return _contract_tucker
elif weight.name.lower() == "complextt":
return _contract_tt
elif weight.name.lower() == "complexcp":
return _contract_cp
else:
raise ValueError(f"Got unexpected factorized weight type {weight.name}")
else:
raise ValueError(
f"Got unexpected weight type of class {weight.__class__.__name__}"
)
else:
raise ValueError(
f'Got {implementation}, expected "reconstructed" or "factorized"'
)
|
modulus-main
|
modulus/models/sfno/factorizations.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from modulus.utils.sfno.distributed import comm
from modulus.utils.sfno.distributed.mappings import (
reduce_from_parallel_region,
copy_to_parallel_region,
)
class Preprocessor2D(nn.Module):
"""
Preprocessing methods to flatten image history, add static features, and
convert the data format from NCHW to NHWC.
"""
def __init__(self, params): # pragma: no cover
super(Preprocessor2D, self).__init__()
self.n_history = params.n_history
self.transform_to_nhwc = params.enable_nhwc
self.history_normalization_mode = params.history_normalization_mode
if self.history_normalization_mode == "exponential":
self.history_normalization_decay = params.history_normalization_decay
# inverse ordering, since first element is oldest
history_normalization_weights = torch.exp(
(-self.history_normalization_decay)
* torch.arange(
start=self.n_history, end=-1, step=-1, dtype=torch.float32
)
)
history_normalization_weights = history_normalization_weights / torch.sum(
history_normalization_weights
)
history_normalization_weights = torch.reshape(
history_normalization_weights, (1, -1, 1, 1, 1)
)
elif self.history_normalization_mode == "mean":
history_normalization_weights = torch.Tensor(
1.0 / float(self.n_history + 1), dtype=torch.float32
)
history_normalization_weights = torch.reshape(
history_normalization_weights, (1, -1, 1, 1, 1)
)
else:
history_normalization_weights = torch.ones(
self.n_history + 1, dtype=torch.float32
)
self.register_buffer(
"history_normalization_weights",
history_normalization_weights,
persistent=False,
)
self.history_mean = None
self.history_std = None
self.history_diff_mean = None
self.history_diff_var = None
self.history_eps = 1e-6
self.img_shape = [params.img_shape_x, params.img_shape_y]
# unpredicted input channels:
self.unpredicted_inp_train = None
self.unpredicted_tar_train = None
self.unpredicted_inp_eval = None
self.unpredicted_tar_eval = None
# process static features
static_features = None
# needed for sharding
start_x = params.img_local_offset_x
end_x = min(start_x + params.img_local_shape_x, params.img_shape_x)
pad_x = params.img_local_shape_x - (end_x - start_x)
start_y = params.img_local_offset_y
end_y = min(start_y + params.img_local_shape_y, params.img_shape_y)
pad_y = params.img_local_shape_y - (end_y - start_y)
# set up grid
if params.add_grid:
with torch.no_grad():
tx = torch.linspace(0, 1, params.img_shape_x + 1, dtype=torch.float32)[
0:-1
]
ty = torch.linspace(0, 1, params.img_shape_y + 1, dtype=torch.float32)[
0:-1
]
x_grid, y_grid = torch.meshgrid(tx, ty, indexing="ij")
x_grid, y_grid = x_grid.unsqueeze(0).unsqueeze(0), y_grid.unsqueeze(
0
).unsqueeze(0)
grid = torch.cat([x_grid, y_grid], dim=1)
# shard spatially:
grid = grid[:, :, start_x:end_x, start_y:end_y]
# pad if needed
grid = F.pad(grid, [0, pad_y, 0, pad_x])
# transform if requested
if params.gridtype == "sinusoidal":
num_freq = 1
if hasattr(params, "grid_num_frequencies"):
num_freq = int(params.grid_num_frequencies)
singrid = None
for freq in range(1, num_freq + 1):
if singrid is None:
singrid = torch.sin(grid)
else:
singrid = torch.cat(
[singrid, torch.sin(freq * grid)], dim=1
)
static_features = singrid
else:
static_features = grid
if params.add_orography:
from utils.conditioning_inputs import get_orography
oro = torch.tensor(
get_orography(params.orography_path), dtype=torch.float32
)
oro = torch.reshape(oro, (1, 1, oro.shape[0], oro.shape[1]))
# shard
oro = oro[:, :, start_x:end_x, start_y:end_y]
# pad if needed
oro = F.pad(oro, [0, pad_y, 0, pad_x])
if static_features is None:
static_features = oro
else:
static_features = torch.cat([static_features, oro], dim=1)
if params.add_landmask:
from utils.conditioning_inputs import get_land_mask
lsm = torch.tensor(get_land_mask(params.landmask_path), dtype=torch.long)
# one hot encode and move channels to front:
lsm = torch.permute(torch.nn.functional.one_hot(lsm), (2, 0, 1)).to(
torch.float32
)
lsm = torch.reshape(lsm, (1, lsm.shape[0], lsm.shape[1], lsm.shape[2]))
# shard
lsm = lsm[:, :, start_x:end_x, start_y:end_y]
# pad if needed
lsm = F.pad(lsm, [0, pad_y, 0, pad_x])
if static_features is None:
static_features = lsm
else:
static_features = torch.cat([static_features, lsm], dim=1)
self.do_add_static_features = False
if static_features is not None:
self.do_add_static_features = True
self.register_buffer("static_features", static_features, persistent=False)
def flatten_history(self, x): # pragma: no cover
"""Flatten input so that history is included as part of channels"""
if x.dim() == 5:
b_, t_, c_, h_, w_ = x.shape
x = torch.reshape(x, (b_, t_ * c_, h_, w_))
return x
def expand_history(self, x, nhist): # pragma: no cover
"""Expand history from flattened data"""
if x.dim() == 4:
b_, ct_, h_, w_ = x.shape
x = torch.reshape(x, (b_, nhist, ct_ // nhist, h_, w_))
return x
def add_static_features(self, x): # pragma: no cover
"""Adds static features to the input"""
if self.do_add_static_features:
# we need to replicate the grid for each batch:
static = torch.tile(self.static_features, dims=(x.shape[0], 1, 1, 1))
x = torch.cat([x, static], dim=1)
return x
def remove_static_features(self, x): # pragma: no cover
"""
Removes static features from the input
only remove if something was added in the first place
"""
if self.do_add_static_features:
nfeat = self.static_features.shape[1]
x = x[:, : x.shape[1] - nfeat, :, :]
return x
def append_history(self, x1, x2, step): # pragma: no cover
"""
Appends history to the main input.
Without history, just returns the second tensor (x2).
"""
# take care of unpredicted features first
# this is necessary in order to copy the targets unpredicted features
# (such as zenith angle) into the inputs unpredicted features,
# such that they can be forward in the next autoregressive step
# update the unpredicted input
if self.training:
if (self.unpredicted_tar_train is not None) and (
step < self.unpredicted_tar_train.shape[1]
):
utar = self.unpredicted_tar_train[:, step : (step + 1), :, :, :]
if self.n_history == 0:
self.unpredicted_inp_train.copy_(utar)
else:
self.unpredicted_inp_train.copy_(
torch.cat(
[self.unpredicted_inp_train[:, 1:, :, :, :], utar], dim=1
)
)
else:
if (self.unpredicted_tar_eval is not None) and (
step < self.unpredicted_tar_eval.shape[1]
):
utar = self.unpredicted_tar_eval[:, step : (step + 1), :, :, :]
if self.n_history == 0:
self.unpredicted_inp_eval.copy_(utar)
else:
self.unpredicted_inp_eval.copy_(
torch.cat(
[self.unpredicted_inp_eval[:, 1:, :, :, :], utar], dim=1
)
)
# without history, just return the second tensor
if self.n_history > 0:
# this is more complicated
x1 = self.expand_history(x1, nhist=self.n_history + 1)
x2 = self.expand_history(x2, nhist=1)
# append
res = torch.cat([x1[:, 1:, :, :, :], x2], dim=1)
# flatten again
res = self.flatten_history(res)
else:
res = x2
return res
def append_channels(self, x, xc): # pragma: no cover
"""Appends channels"""
xdim = x.dim()
x = self.expand_history(x, self.n_history + 1)
xc = self.expand_history(xc, self.n_history + 1)
# concatenate
xo = torch.cat([x, xc], dim=2)
# flatten if requested
if xdim == 4:
xo = self.flatten_history(xo)
return xo
def history_compute_stats(self, x): # pragma: no cover
"""Compute stats from history timesteps"""
if self.history_normalization_mode == "none":
self.history_mean = torch.zeros(
(1, 1, 1, 1), dtype=torch.float32, device=x.device
)
self.history_std = torch.ones(
(1, 1, 1, 1), dtype=torch.float32, device=x.device
)
elif self.history_normalization_mode == "timediff":
# reshaping
xdim = x.dim()
if xdim == 4:
b_, c_, h_, w_ = x.shape
xr = torch.reshape(
x, (b_, (self.n_history + 1), c_ // (self.n_history + 1), h_, w_)
)
else:
xshape = x.shape
xr = x
# time difference mean:
self.history_diff_mean = torch.mean(
torch.sum(xr[:, 1:, ...] - xr[:, 0:-1, ...], dim=(4, 5)), dim=(1, 2)
)
# reduce across gpus
if comm.get_size("spatial") > 1:
self.history_diff_mean = reduce_from_parallel_region(
self.history_diff_mean, "spatial"
)
self.history_diff_mean = self.history_diff_mean / float(
self.img_shape[0] * self.img_shape[1]
)
# time difference std
self.history_diff_var = torch.mean(
torch.sum(
torch.square(
(xr[:, 1:, ...] - xr[:, 0:-1, ...]) - self.history_diff_mean
),
dim=(4, 5),
),
dim=(1, 2),
)
# reduce across gpus
if comm.get_size("spatial") > 1:
self.history_diff_var = reduce_from_parallel_region(
self.history_diff_var, "spatial"
)
self.history_diff_var = self.history_diff_var / float(
self.img_shape[0] * self.img_shape[1]
)
# time difference stds
self.history_diff_mean = copy_to_parallel_region(
self.history_diff_mean, "spatial"
)
self.history_diff_var = copy_to_parallel_region(
self.history_diff_var, "spatial"
)
else:
xdim = x.dim()
if xdim == 4:
b_, c_, h_, w_ = x.shape
xr = torch.reshape(
x, (b_, (self.n_history + 1), c_ // (self.n_history + 1), h_, w_)
)
else:
xshape = x.shape
xr = x
# mean
# compute weighted mean over dim 1, but sum over dim=3,4
self.history_mean = torch.sum(
xr * self.history_normalization_weights, dim=(1, 3, 4), keepdim=True
)
# reduce across gpus
if comm.get_size("spatial") > 1:
self.history_mean = reduce_from_parallel_region(
self.history_mean, "spatial"
)
self.history_mean = self.history_mean / float(
self.img_shape[0] * self.img_shape[1]
)
# compute std
self.history_std = torch.sum(
torch.square(xr - self.history_mean)
* self.history_normalization_weights,
dim=(1, 3, 4),
keepdim=True,
)
# reduce across gpus
if comm.get_size("spatial") > 1:
self.history_std = reduce_from_parallel_region(
self.history_std, "spatial"
)
self.history_std = torch.sqrt(
self.history_std / float(self.img_shape[0] * self.img_shape[1])
)
# squeeze
self.history_mean = torch.squeeze(self.history_mean, dim=1)
self.history_std = torch.squeeze(self.history_std, dim=1)
# copy to parallel region
self.history_mean = copy_to_parallel_region(self.history_mean, "spatial")
self.history_std = copy_to_parallel_region(self.history_std, "spatial")
return
def history_normalize(self, x, target=False): # pragma: no cover
"""Normalize history"""
if self.history_normalization_mode in ["none", "timediff"]:
return x
xdim = x.dim()
if xdim == 4:
b_, c_, h_, w_ = x.shape
xr = torch.reshape(
x, (b_, (self.n_history + 1), c_ // (self.n_history + 1), h_, w_)
)
else:
xshape = x.shape
xr = x
x = self.flatten_history(x)
# normalize
if target:
# strip off the unpredicted channels
xn = (x - self.history_mean[:, : x.shape[1], :, :]) / self.history_std[
:, : x.shape[1], :, :
]
else:
# tile to include history
hm = torch.tile(self.history_mean, (1, self.n_history + 1, 1, 1))
hs = torch.tile(self.history_std, (1, self.n_history + 1, 1, 1))
xn = (x - hm) / hs
if xdim == 5:
xn = torch.reshape(xn, xshape)
return xn
def history_denormalize(self, xn, target=False): # pragma: no cover
"""Denormalize history"""
if self.history_normalization_mode in ["none", "timediff"]:
return xn
assert self.history_mean is not None
assert self.history_std is not None
xndim = xn.dim()
if xndim == 5:
xnshape = xn.shape
xn = self.flatten_history(xn)
# de-normalize
if target:
# strip off the unpredicted channels
x = (
xn * self.history_std[:, : xn.shape[1], :, :]
+ self.history_mean[:, : xn.shape[1], :, :]
)
else:
# tile to include history
hm = torch.tile(self.history_mean, (1, self.n_history + 1, 1, 1))
hs = torch.tile(self.history_std, (1, self.n_history + 1, 1, 1))
x = xn * hs + hm
if xndim == 5:
x = torch.reshape(x, xnshape)
return x
def cache_unpredicted_features(
self, x, y=None, xz=None, yz=None
): # pragma: no cover
"""Caches features not predicted by the model (such as zenith angle)"""
if self.training:
if (self.unpredicted_inp_train is not None) and (xz is not None):
self.unpredicted_inp_train.copy_(xz)
else:
self.unpredicted_inp_train = xz
if (self.unpredicted_tar_train is not None) and (yz is not None):
self.unpredicted_tar_train.copy_(yz)
else:
self.unpredicted_tar_train = yz
else:
if (self.unpredicted_inp_eval is not None) and (xz is not None):
self.unpredicted_inp_eval.copy_(xz)
else:
self.unpredicted_inp_eval = xz
if (self.unpredicted_tar_eval is not None) and (yz is not None):
self.unpredicted_tar_eval.copy_(yz)
else:
self.unpredicted_tar_eval = yz
return x, y
def append_unpredicted_features(self, inp): # pragma: no cover
"""Appends features not predicted by the model (such as zenith angle) from the input"""
if self.training:
if self.unpredicted_inp_train is not None:
inp = self.append_channels(inp, self.unpredicted_inp_train)
else:
if self.unpredicted_inp_eval is not None:
inp = self.append_channels(inp, self.unpredicted_inp_eval)
return inp
def remove_unpredicted_features(self, inp): # pragma: no cover
"""Removes features not predicted by the model (such as zenith angle) from the input"""
if self.training:
if self.unpredicted_inp_train is not None:
inpf = self.expand_history(inp, nhist=self.n_history + 1)
inpc = inpf[
:, :, : inpf.shape[2] - self.unpredicted_inp_train.shape[2], :, :
]
inp = self.flatten_history(inpc)
else:
if self.unpredicted_inp_eval is not None:
inpf = self.expand_history(inp, nhist=self.n_history + 1)
inpc = inpf[
:, :, : inpf.shape[2] - self.unpredicted_inp_eval.shape[2], :, :
]
inp = self.flatten_history(inpc)
return inp
def get_preprocessor(params): # pragma: no cover
"""Returns the preprocessor module"""
return Preprocessor2D(params)
|
modulus-main
|
modulus/models/sfno/preprocessor.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import warnings
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x): # pragma: no cover
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): # pragma: no cover
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
modulus-main
|
modulus/models/sfno/initialization.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda import amp
# import FactorizedTensor from tensorly for tensorized operations
import tensorly as tl
tl.set_backend("pytorch")
# from tensorly.plugins import use_opt_einsum
# use_opt_einsum('optimal')
from tltorch.factorized_tensors.core import FactorizedTensor
# import convenience functions for factorized tensors
from modulus.models.sfno.activations import ComplexReLU
from modulus.models.sfno.contractions import compl_muladd2d_fwd, compl_mul2d_fwd
from modulus.models.sfno.contractions import _contract_localconv_fwd
from modulus.models.sfno.contractions import (
_contract_blockconv_fwd,
_contractadd_blockconv_fwd,
)
from modulus.models.sfno.factorizations import get_contract_fun
# for the experimental module
from modulus.models.sfno.contractions import (
compl_exp_muladd2d_fwd,
compl_exp_mul2d_fwd,
real_mul2d_fwd,
real_muladd2d_fwd,
)
import torch_harmonics as th
import torch_harmonics.distributed as thd
class SpectralConvS2(nn.Module):
"""
Spectral Convolution according to Driscoll & Healy. Designed for convolutions on
the two-sphere S2 using the Spherical Harmonic Transforms in torch-harmonics, but
supports convolutions on the periodic domain via the RealFFT2 and InverseRealFFT2
wrappers.
"""
def __init__(
self,
forward_transform,
inverse_transform,
in_channels,
out_channels,
scale="auto",
operator_type="diagonal",
rank=0.2,
factorization=None,
separable=False,
decomposition_kwargs=dict(),
bias=False,
use_tensorly=True,
): # pragma: no cover
super(SpectralConvS2, self).__init__()
if scale == "auto":
# heuristic
scale = 2 / (in_channels + out_channels)
self.forward_transform = forward_transform
self.inverse_transform = inverse_transform
self.modes_lat = self.inverse_transform.lmax
self.modes_lon = self.inverse_transform.mmax
self.scale_residual = (
self.forward_transform.nlat != self.inverse_transform.nlat
) or (self.forward_transform.nlon != self.inverse_transform.nlon)
if hasattr(self.forward_transform, "grid"):
self.scale_residual = self.scale_residual or (
self.forward_transform.grid != self.inverse_transform.grid
)
# Make sure we are using a Complex Factorized Tensor
if factorization is None:
factorization = "ComplexDense" # No factorization
complex_weight = factorization[:7].lower() == "complex"
# remember factorization details
self.operator_type = operator_type
self.rank = rank
self.factorization = factorization
self.separable = separable
assert self.inverse_transform.lmax == self.modes_lat
assert self.inverse_transform.mmax == self.modes_lon
weight_shape = [in_channels]
if not self.separable:
weight_shape += [out_channels]
if isinstance(self.inverse_transform, thd.DistributedInverseRealSHT):
self.modes_lat_local = self.inverse_transform.lmax_local
self.modes_lon_local = self.inverse_transform.mmax_local
self.lpad_local = self.inverse_transform.lpad_local
self.mpad_local = self.inverse_transform.mpad_local
else:
self.modes_lat_local = self.modes_lat
self.modes_lon_local = self.modes_lon
self.lpad = 0
self.mpad = 0
# unpadded weights
if self.operator_type == "diagonal":
weight_shape += [self.modes_lat_local, self.modes_lon_local]
elif self.operator_type == "dhconv":
weight_shape += [self.modes_lat_local]
else:
raise ValueError(f"Unsupported operator type f{self.operator_type}")
if use_tensorly:
# form weight tensors
self.weight = FactorizedTensor.new(
weight_shape,
rank=self.rank,
factorization=factorization,
fixed_rank_modes=False,
**decomposition_kwargs,
)
# initialization of weights
self.weight.normal_(0, scale)
else:
if complex_weight:
init = scale * torch.randn(*weight_shape, 2)
self.weight = nn.Parameter(init)
else:
init = scale * torch.randn(*weight_shape)
self.weight = nn.Parameter(init)
if self.operator_type == "dhconv":
self.weight.is_shared_mp = ["matmul", "w"]
self.weight.sharded_dims_mp = [None for _ in weight_shape]
self.weight.sharded_dims_mp[-1] = "h"
else:
self.weight.is_shared_mp = ["matmul"]
self.weight.sharded_dims_mp = [None for _ in weight_shape]
self.weight.sharded_dims_mp[-1] = "w"
self.weight.sharded_dims_mp[-2] = "h"
# get the contraction handle
self._contract = get_contract_fun(
self.weight,
implementation="factorized",
separable=separable,
complex=complex_weight,
operator_type=operator_type,
)
if bias:
self.bias = nn.Parameter(scale * torch.zeros(1, out_channels, 1, 1))
def forward(self, x): # pragma: no cover
dtype = x.dtype
residual = x
x = x.float()
B, C, H, W = x.shape
with amp.autocast(enabled=False):
x = self.forward_transform(x)
if self.scale_residual:
x = x.contiguous()
residual = self.inverse_transform(x)
residual = residual.to(dtype)
# approach with unpadded weights
xp = torch.zeros_like(x)
xp[..., : self.modes_lat_local, : self.modes_lon_local] = self._contract(
x[..., : self.modes_lat_local, : self.modes_lon_local],
self.weight,
separable=self.separable,
operator_type=self.operator_type,
)
x = xp.contiguous()
with amp.autocast(enabled=False):
x = self.inverse_transform(x)
if hasattr(self, "bias"):
x = x + self.bias
x = x.type(dtype)
return x, residual
class SpectralAttentionS2(nn.Module):
"""
Spherical non-linear FNO layer
"""
def __init__(
self,
forward_transform,
inverse_transform,
embed_dim,
operator_type="diagonal",
sparsity_threshold=0.0,
hidden_size_factor=2,
complex_activation="real",
scale="auto",
bias=False,
spectral_layers=1,
drop_rate=0.0,
): # pragma: no cover
super(SpectralAttentionS2, self).__init__()
self.embed_dim = embed_dim
self.sparsity_threshold = sparsity_threshold
self.operator_type = operator_type
self.spectral_layers = spectral_layers
if scale == "auto":
self.scale = 1 / (embed_dim * embed_dim)
self.modes_lat = forward_transform.lmax
self.modes_lon = forward_transform.mmax
# only storing the forward handle to be able to call it
self.forward_transform = forward_transform
self.inverse_transform = inverse_transform
self.scale_residual = (
(self.forward_transform.nlat != self.inverse_transform.nlat)
or (self.forward_transform.nlon != self.inverse_transform.nlon)
or (self.forward_transform.grid != self.inverse_transform.grid)
)
assert inverse_transform.lmax == self.modes_lat
assert inverse_transform.mmax == self.modes_lon
hidden_size = int(hidden_size_factor * self.embed_dim)
if operator_type == "diagonal":
self.mul_add_handle = compl_muladd2d_fwd
self.mul_handle = compl_mul2d_fwd
# weights
w = [self.scale * torch.randn(self.embed_dim, hidden_size, 2)]
for l in range(1, self.spectral_layers):
w.append(self.scale * torch.randn(hidden_size, hidden_size, 2))
self.w = nn.ParameterList(w)
self.wout = nn.Parameter(
self.scale * torch.randn(hidden_size, self.embed_dim, 2)
)
if bias:
self.b = nn.ParameterList(
[
self.scale * torch.randn(hidden_size, 1, 1, 2)
for _ in range(self.spectral_layers)
]
)
self.activations = nn.ModuleList([])
for l in range(0, self.spectral_layers):
self.activations.append(
ComplexReLU(
mode=complex_activation,
bias_shape=(hidden_size, 1, 1),
scale=self.scale,
)
)
elif operator_type == "l-dependant":
self.mul_add_handle = compl_exp_muladd2d_fwd
self.mul_handle = compl_exp_mul2d_fwd
# weights
w = [
self.scale * torch.randn(self.modes_lat, self.embed_dim, hidden_size, 2)
]
for l in range(1, self.spectral_layers):
w.append(
self.scale
* torch.randn(self.modes_lat, hidden_size, hidden_size, 2)
)
self.w = nn.ParameterList(w)
if bias:
self.b = nn.ParameterList(
[
self.scale * torch.randn(hidden_size, 1, 1, 2)
for _ in range(self.spectral_layers)
]
)
self.wout = nn.Parameter(
self.scale * torch.randn(self.modes_lat, hidden_size, self.embed_dim, 2)
)
self.activations = nn.ModuleList([])
for l in range(0, self.spectral_layers):
self.activations.append(
ComplexReLU(
mode=complex_activation,
bias_shape=(hidden_size, 1, 1),
scale=self.scale,
)
)
else:
raise ValueError("Unknown operator type")
self.drop = nn.Dropout(drop_rate) if drop_rate > 0.0 else nn.Identity()
def forward_mlp(self, x): # pragma: no cover
"""forward pass of the MLP"""
B, C, H, W = x.shape
xr = torch.view_as_real(x)
for l in range(self.spectral_layers):
if hasattr(self, "b"):
xr = self.mul_add_handle(xr, self.w[l], self.b[l])
else:
xr = self.mul_handle(xr, self.w[l])
xr = torch.view_as_complex(xr)
xr = self.activations[l](xr)
xr = self.drop(xr)
xr = torch.view_as_real(xr)
# final MLP
x = self.mul_handle(xr, self.wout)
x = torch.view_as_complex(x)
return x
def forward(self, x): # pragma: no cover
dtype = x.dtype
residual = x
x = x.to(torch.float32)
# FWD transform
with amp.autocast(enabled=False):
x = self.forward_transform(x)
if self.scale_residual:
x = x.contiguous()
residual = self.inverse_transform(x)
residual = residual.to(dtype)
# MLP
x = self.forward_mlp(x)
# BWD transform
x = x.contiguous()
with amp.autocast(enabled=False):
x = self.inverse_transform(x)
# cast back to initial precision
x = x.to(dtype)
return x, residual
|
modulus-main
|
modulus/models/sfno/s2convolutions.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.fft
from torch.nn.modules.container import Sequential
from torch.utils.checkpoint import checkpoint
from torch.cuda import amp
import math
from torch_harmonics import *
from modulus.models.sfno.contractions import *
from modulus.models.sfno.activations import *
from modulus.models.sfno.initialization import trunc_normal_
from modulus.models.layers import get_activation
@torch.jit.script
def drop_path(
x: torch.Tensor, drop_prob: float = 0.0, training: bool = False
) -> torch.Tensor: # pragma: no cover
"""Drop paths (Stochastic Depth) per sample (when applied in main path of
residual blocks).
This is the same as the DropConnect impl for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in
a separate paper. See discussion:
https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956
We've opted for changing the layer and argument names to 'drop path' rather than
mix DropConnect as a layer name and use 'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1.0 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2d ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual
blocks).
"""
def __init__(self, drop_prob=None): # pragma: no cover
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x): # pragma: no cover
return drop_path(x, self.drop_prob, self.training)
class PatchEmbed(nn.Module):
"""
Divides the input image into patches and embeds them into a specified dimension
using a convolutional layer.
"""
def __init__(
self, img_size=(224, 224), patch_size=(16, 16), in_chans=3, embed_dim=768
): # pragma: no cover
super(PatchEmbed, self).__init__()
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
def forward(self, x): # pragma: no cover
# gather input
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
# new: B, C, H*W
x = self.proj(x).flatten(2)
return x
class EncoderDecoder(nn.Module):
"""
Basic Encoder/Decoder
"""
def __init__(
self,
num_layers,
input_dim,
output_dim,
hidden_dim,
act,
): # pragma: no cover
super(EncoderDecoder, self).__init__()
encoder_modules = []
current_dim = input_dim
for i in range(num_layers):
encoder_modules.append(nn.Conv2d(current_dim, hidden_dim, 1, bias=True))
encoder_modules.append(get_activation(act))
current_dim = hidden_dim
encoder_modules.append(nn.Conv2d(current_dim, output_dim, 1, bias=False))
self.fwd = nn.Sequential(*encoder_modules)
def forward(self, x):
return self.fwd(x)
class MLP(nn.Module):
"""
Basic CNN with support for gradient checkpointing
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer="gelu",
output_bias=True,
drop_rate=0.0,
checkpointing=0,
**kwargs,
): # pragma: no cover
super(MLP, self).__init__()
self.checkpointing = checkpointing
out_features = out_features or in_features
hidden_features = hidden_features or in_features
fc1 = nn.Conv2d(in_features, hidden_features, 1, bias=True)
act = get_activation(act_layer)
fc2 = nn.Conv2d(hidden_features, out_features, 1, bias=output_bias)
if drop_rate > 0.0:
drop = nn.Dropout(drop_rate)
self.fwd = nn.Sequential(fc1, act, drop, fc2, drop)
else:
self.fwd = nn.Sequential(fc1, act, fc2)
@torch.jit.ignore
def checkpoint_forward(self, x): # pragma: no cover
"""Forward method with support for gradient checkpointing"""
return checkpoint(self.fwd, x)
def forward(self, x): # pragma: no cover
if self.checkpointing >= 2:
return self.checkpoint_forward(x)
else:
return self.fwd(x)
class RealFFT2(nn.Module):
"""
Helper routine to wrap FFT similarly to the SHT
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None): # pragma: no cover
super(RealFFT2, self).__init__()
# use local FFT here
self.fft_handle = torch.fft.rfft2
self.nlat = nlat
self.nlon = nlon
self.lmax = lmax or self.nlat
self.mmax = mmax or self.nlon // 2 + 1
self.truncate = True
if (self.lmax == self.nlat) and (self.mmax == (self.nlon // 2 + 1)):
self.truncate = False
# self.num_batches = 1
assert self.lmax % 2 == 0
def forward(self, x): # pragma: no cover
y = self.fft_handle(x, (self.nlat, self.nlon), (-2, -1), "ortho")
if self.truncate:
y = torch.cat(
(
y[..., : math.ceil(self.lmax / 2), : self.mmax],
y[..., -math.floor(self.lmax / 2) :, : self.mmax],
),
dim=-2,
)
return y
class InverseRealFFT2(nn.Module):
"""
Helper routine to wrap FFT similarly to the SHT
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None): # pragma: no cover
super(InverseRealFFT2, self).__init__()
# use local FFT here
self.ifft_handle = torch.fft.irfft2
self.nlat = nlat
self.nlon = nlon
self.lmax = lmax or self.nlat
self.mmax = mmax or self.nlon // 2 + 1
def forward(self, x): # pragma: no cover
out = self.ifft_handle(x, (self.nlat, self.nlon), (-2, -1), "ortho")
return out
class SpectralConv2d(nn.Module):
"""
Spectral Convolution as utilized in
"""
def __init__(
self,
forward_transform,
inverse_transform,
in_channels,
out_channels,
scale="auto",
hard_thresholding_fraction=1,
compression=None,
rank=0,
bias=False,
): # pragma: no cover
super(SpectralConv2d, self).__init__()
if scale == "auto":
scale = 1 / (in_channels * out_channels)
self.hard_thresholding_fraction = hard_thresholding_fraction
self.contract_handle = _contract_diagonal
self.forward_transform = forward_transform
self.inverse_transform = inverse_transform
self.output_dims = (self.inverse_transform.nlat, self.inverse_transform.nlon)
modes_lat = self.inverse_transform.lmax
modes_lon = self.inverse_transform.mmax
self.modes_lat = int(modes_lat * self.hard_thresholding_fraction)
self.modes_lon = int(modes_lon * self.hard_thresholding_fraction)
self.scale_residual = (
self.forward_transform.nlat != self.inverse_transform.nlat
) or (self.forward_transform.nlon != self.inverse_transform.nlon)
# new simple linear layer
self.w = nn.Parameter(
scale
* torch.randn(in_channels, out_channels, self.modes_lat, self.modes_lon, 2)
)
# optional bias
if bias:
self.b = nn.Parameter(
scale * torch.randn(1, out_channels, *self.output_dims)
)
def forward(self, x): # pragma: no cover
dtype = x.dtype
B, C, H, W = x.shape
if not self.scale_residual:
residual = x
with amp.autocast(enabled=False):
x = x.to(torch.float32)
x = self.forward_transform(x)
if self.scale_residual:
x = x.contiguous()
residual = self.inverse_transform(x)
residual = residual.to(dtype)
x = torch.view_as_real(x)
x = x.to(dtype)
# do spectral conv
modes = self.contract_handle(x, self.w)
with amp.autocast(enabled=False):
x = x.to(torch.float32)
x = torch.view_as_complex(x)
x = x.contiguous()
x = self.inverse_transform(x)
x = x.to(dtype)
if hasattr(self, "b"):
x = x + self.b
return x, residual
class SpectralAttention2d(nn.Module):
"""
2d Spectral Attention layer
"""
def __init__(
self,
forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold=0.0,
hidden_size_factor=2,
use_complex_network=True,
use_complex_kernels=False,
complex_activation="real",
bias=False,
spectral_layers=1,
drop_rate=0.0,
): # pragma: no cover
super(SpectralAttention2d, self).__init__()
self.embed_dim = embed_dim
self.sparsity_threshold = sparsity_threshold
self.hidden_size = int(hidden_size_factor * self.embed_dim)
self.scale = 0.02
self.spectral_layers = spectral_layers
self.mul_add_handle = (
compl_muladd2d_fwd_c if use_complex_kernels else compl_muladd2d_fwd
)
self.mul_handle = compl_mul2d_fwd_c if use_complex_kernels else compl_mul2d_fwd
self.modes_lat = forward_transform.lmax
self.modes_lon = forward_transform.mmax
# only storing the forward handle to be able to call it
self.forward_transform = forward_transform
self.inverse_transform = inverse_transform
assert inverse_transform.lmax == self.modes_lat
assert inverse_transform.mmax == self.modes_lon
self.scale_residual = (
self.forward_transform.nlat != self.inverse_transform.nlat
) or (self.forward_transform.nlon != self.inverse_transform.nlon)
# weights
w = [self.scale * torch.randn(self.embed_dim, self.hidden_size, 2)]
# w = [self.scale * torch.randn(self.embed_dim + 2*self.embed_freqs, self.hidden_size, 2)]
# w = [self.scale * torch.randn(self.embed_dim + 4*self.embed_freqs, self.hidden_size, 2)]
for l in range(1, self.spectral_layers):
w.append(self.scale * torch.randn(self.hidden_size, self.hidden_size, 2))
self.w = nn.ParameterList(w)
if bias:
self.b = nn.ParameterList(
[
self.scale * torch.randn(self.hidden_size, 1, 2)
for _ in range(self.spectral_layers)
]
)
self.wout = nn.Parameter(
self.scale * torch.randn(self.hidden_size, self.embed_dim, 2)
)
self.drop = nn.Dropout(drop_rate) if drop_rate > 0.0 else nn.Identity()
self.activation = ComplexReLU(
mode=complex_activation, bias_shape=(self.hidden_size, 1, 1)
)
def forward_mlp(self, xr): # pragma: no cover
"""forward method for the MLP part of the network"""
for l in range(self.spectral_layers):
if hasattr(self, "b"):
xr = self.mul_add_handle(
xr, self.w[l].to(xr.dtype), self.b[l].to(xr.dtype)
)
else:
xr = self.mul_handle(xr, self.w[l].to(xr.dtype))
xr = torch.view_as_complex(xr)
xr = self.activation(xr)
xr = self.drop(xr)
xr = torch.view_as_real(xr)
xr = self.mul_handle(xr, self.wout)
return xr
def forward(self, x): # pragma: no cover
dtype = x.dtype
if not self.scale_residual:
residual = x
# FWD transform
with amp.autocast(enabled=False):
x = x.to(torch.float32)
x = x.contiguous()
x = self.forward_transform(x)
if self.scale_residual:
x = x.contiguous()
residual = self.inverse_transform(x)
residual = residual.to(dtype)
x = torch.view_as_real(x)
# MLP
x = self.forward_mlp(x)
# BWD transform
with amp.autocast(enabled=False):
x = torch.view_as_complex(x)
x = x.contiguous()
x = self.inverse_transform(x)
x = x.to(dtype)
return x, residual
class SpectralAttentionS2(nn.Module):
"""
geometrical Spectral Attention layer
"""
def __init__(
self,
forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold=0.0,
hidden_size_factor=2,
use_complex_network=True,
complex_activation="real",
bias=False,
spectral_layers=1,
drop_rate=0.0,
): # pragma: no cover
super(SpectralAttentionS2, self).__init__()
self.embed_dim = embed_dim
self.sparsity_threshold = sparsity_threshold
self.hidden_size = int(hidden_size_factor * self.embed_dim)
self.scale = 0.02
# self.mul_add_handle = compl_muladd1d_fwd_c if use_complex_kernels else compl_muladd1d_fwd
self.mul_add_handle = compl_muladd2d_fwd
# self.mul_handle = compl_mul1d_fwd_c if use_complex_kernels else compl_mul1d_fwd
self.mul_handle = compl_mul2d_fwd
self.spectral_layers = spectral_layers
self.modes_lat = forward_transform.lmax
self.modes_lon = forward_transform.mmax
# only storing the forward handle to be able to call it
self.forward_transform = forward_transform
self.inverse_transform = inverse_transform
assert inverse_transform.lmax == self.modes_lat
assert inverse_transform.mmax == self.modes_lon
self.scale_residual = (
(self.forward_transform.nlat != self.inverse_transform.nlat)
or (self.forward_transform.nlon != self.inverse_transform.nlon)
or (self.forward_transform.grid != self.inverse_transform.grid)
)
# weights
w = [self.scale * torch.randn(self.embed_dim, self.hidden_size, 2)]
for l in range(1, self.spectral_layers):
w.append(self.scale * torch.randn(self.hidden_size, self.hidden_size, 2))
self.w = nn.ParameterList(w)
if bias:
self.b = nn.ParameterList(
[
self.scale * torch.randn(2 * self.hidden_size, 1, 1, 2)
for _ in range(self.spectral_layers)
]
)
self.wout = nn.Parameter(
self.scale * torch.randn(self.hidden_size, self.embed_dim, 2)
)
self.drop = nn.Dropout(drop_rate) if drop_rate > 0.0 else nn.Identity()
self.activation = ComplexReLU(
mode=complex_activation, bias_shape=(self.hidden_size, 1, 1)
)
def forward_mlp(self, xr): # pragma: no cover
"""forward method for the MLP part of the network"""
for l in range(self.spectral_layers):
if hasattr(self, "b"):
xr = self.mul_add_handle(
xr, self.w[l].to(xr.dtype), self.b[l].to(xr.dtype)
)
else:
xr = self.mul_handle(xr, self.w[l].to(xr.dtype))
xr = torch.view_as_complex(xr)
xr = self.activation(xr)
xr = self.drop(xr)
xr = torch.view_as_real(xr)
# final MLP
xr = self.mul_handle(xr, self.wout)
return xr
def forward(self, x): # pragma: no cover
dtype = x.dtype
if not self.scale_residual:
residual = x
# FWD transform
with amp.autocast(enabled=False):
x = x.to(torch.float32)
x = x.contiguous()
x = self.forward_transform(x)
if self.scale_residual:
x = x.contiguous()
residual = self.inverse_transform(x)
residual = residual.to(dtype)
x = torch.view_as_real(x)
# MLP
x = self.forward_mlp(x)
# BWD transform
with amp.autocast(enabled=False):
x = torch.view_as_complex(x)
x = x.contiguous()
x = self.inverse_transform(x)
x = x.to(dtype)
return x, residual
|
modulus-main
|
modulus/models/sfno/layers.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from torch.cuda import amp
from dataclasses import dataclass
from typing import Any, Tuple
# import contractions
from modulus.models.sfno.factorizations import get_contract_fun, _contract_dense
# helpers
from modulus.models.sfno.layers import (
trunc_normal_,
DropPath,
MLP,
EncoderDecoder,
)
# import global convolution and non-linear spectral layers
from modulus.models.sfno.layers import (
SpectralConv2d,
SpectralAttention2d,
SpectralAttentionS2,
)
from modulus.models.sfno.s2convolutions import SpectralConvS2
# get spectral transforms from torch_harmonics
import torch_harmonics as th
import torch_harmonics.distributed as thd
# wrap fft, to unify interface to spectral transforms
from modulus.models.sfno.layers import RealFFT2, InverseRealFFT2
from modulus.utils.sfno.distributed.layers import (
DistributedRealFFT2,
DistributedInverseRealFFT2,
DistributedMLP,
DistributedEncoderDecoder,
)
# more distributed stuff
from modulus.utils.sfno.distributed import comm
# layer normalization
from apex.normalization import FusedLayerNorm
from modulus.utils.sfno.distributed.layer_norm import DistributedInstanceNorm2d
from modulus.models.module import Module
from modulus.models.meta import ModelMetaData
from modulus.models.layers import get_activation
@dataclass
class MetaData(ModelMetaData):
name: str = "SFNO"
# Optimization
jit: bool = False
cuda_graphs: bool = True
amp_cpu: bool = True
amp_gpu: bool = True
torch_fx: bool = False
# Inference
onnx: bool = False
# Physics informed
func_torch: bool = False
auto_grad: bool = False
class SpectralFilterLayer(nn.Module):
"""Spectral filter layer"""
def __init__(
self,
forward_transform,
inverse_transform,
embed_dim,
filter_type="linear",
operator_type="diagonal",
sparsity_threshold=0.0,
use_complex_kernels=True,
hidden_size_factor=1,
rank=1.0,
factorization=None,
separable=False,
complex_network=True,
complex_activation="real",
spectral_layers=1,
drop_rate=0.0,
): # pragma: no cover
super(SpectralFilterLayer, self).__init__()
if filter_type == "non-linear" and (
isinstance(forward_transform, th.RealSHT)
or isinstance(forward_transform, thd.DistributedRealSHT)
):
self.filter = SpectralAttentionS2(
forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold=sparsity_threshold,
hidden_size_factor=hidden_size_factor,
complex_activation=complex_activation,
spectral_layers=spectral_layers,
drop_rate=drop_rate,
bias=False,
)
elif filter_type == "non-linear" and (
isinstance(forward_transform, RealFFT2)
or isinstance(forward_transform, DistributedRealFFT2)
):
self.filter = SpectralAttention2d(
forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold=sparsity_threshold,
hidden_size_factor=hidden_size_factor,
complex_activation=complex_activation,
spectral_layers=spectral_layers,
drop_rate=drop_rate,
bias=False,
)
# spectral transform is passed to the module
elif filter_type == "linear":
self.filter = SpectralConvS2(
forward_transform,
inverse_transform,
embed_dim,
embed_dim,
operator_type=operator_type,
rank=rank,
factorization=factorization,
separable=separable,
bias=False,
use_tensorly=False if factorization is None else True,
)
else:
raise (NotImplementedError)
def forward(self, x): # pragma: no cover
return self.filter(x)
class FourierNeuralOperatorBlock(nn.Module):
"""Fourier Neural Operator Block"""
def __init__(
self,
forward_transform,
inverse_transform,
embed_dim,
filter_type="linear",
operator_type="diagonal",
mlp_ratio=2.0,
drop_rate=0.0,
drop_path=0.0,
act_layer="gelu",
norm_layer=(nn.LayerNorm, nn.LayerNorm),
sparsity_threshold=0.0,
use_complex_kernels=True,
rank=1.0,
factorization=None,
separable=False,
inner_skip="linear",
outer_skip=None, # None, nn.linear or nn.Identity
use_mlp=False,
comm_feature_inp_name=None,
comm_feature_hidden_name=None,
complex_network=True,
complex_activation="real",
spectral_layers=1,
checkpointing=0,
): # pragma: no cover
super(FourierNeuralOperatorBlock, self).__init__()
if (comm.get_size("h") > 1) or (comm.get_size("w") > 1):
self.input_shape_loc = (
forward_transform.nlat_local,
forward_transform.nlon_local,
)
self.output_shape_loc = (
inverse_transform.nlat_local,
inverse_transform.nlon_local,
)
else:
self.input_shape_loc = (forward_transform.nlat, forward_transform.nlon)
self.output_shape_loc = (inverse_transform.nlat, inverse_transform.nlon)
# norm layer
self.norm0 = norm_layer[0]()
# convolution layer
self.filter = SpectralFilterLayer(
forward_transform,
inverse_transform,
embed_dim,
filter_type,
operator_type,
sparsity_threshold,
use_complex_kernels=use_complex_kernels,
hidden_size_factor=mlp_ratio,
rank=rank,
factorization=factorization,
separable=separable,
complex_network=complex_network,
complex_activation=complex_activation,
spectral_layers=spectral_layers,
drop_rate=drop_rate,
)
if inner_skip == "linear":
self.inner_skip = nn.Conv2d(embed_dim, embed_dim, 1, 1)
elif inner_skip == "identity":
self.inner_skip = nn.Identity()
if filter_type == "linear" or filter_type == "real linear":
self.act_layer = get_activation(act_layer)
# dropout
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
# norm layer
self.norm1 = norm_layer[1]()
if use_mlp == True:
MLPH = DistributedMLP if (comm.get_size("matmul") > 1) else MLP
mlp_hidden_dim = int(embed_dim * mlp_ratio)
self.mlp = MLPH(
in_features=embed_dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop_rate=drop_rate,
comm_inp_name=comm_feature_inp_name,
comm_hidden_name=comm_feature_hidden_name,
checkpointing=checkpointing,
)
if outer_skip == "linear":
self.outer_skip = nn.Conv2d(embed_dim, embed_dim, 1, 1)
elif outer_skip == "identity":
self.outer_skip = nn.Identity()
def forward(self, x): # pragma: no cover
x_norm = torch.zeros_like(x)
x_norm[..., : self.input_shape_loc[0], : self.input_shape_loc[1]] = self.norm0(
x[..., : self.input_shape_loc[0], : self.input_shape_loc[1]]
)
x, residual = self.filter(x_norm)
if hasattr(self, "inner_skip"):
x = x + self.inner_skip(residual)
if hasattr(self, "act_layer"):
x = self.act_layer(x)
x_norm = torch.zeros_like(x)
x_norm[
..., : self.output_shape_loc[0], : self.output_shape_loc[1]
] = self.norm1(x[..., : self.output_shape_loc[0], : self.output_shape_loc[1]])
x = x_norm
if hasattr(self, "mlp"):
x = self.mlp(x)
x = self.drop_path(x)
if hasattr(self, "outer_skip"):
x = x + self.outer_skip(residual)
return x
class SphericalFourierNeuralOperatorNet(Module):
"""
Spherical Fourier Neural Operator Network
Parameters
----------
params : dict
Dictionary of parameters
spectral_transform : str, optional
Type of spectral transformation to use, by default "sht"
grid : str, optional
Type of grid to use, by default "legendre-gauss"
filter_type : str, optional
Type of filter to use ('linear', 'non-linear'), by default "non-linear"
operator_type : str, optional
Type of operator to use ('diaginal', 'dhconv'), by default "diagonal"
inp_shape : tuple, optional
Shape of the input channels, by default (721, 1440)
scale_factor : int, optional
Scale factor to use, by default 16
in_chans : int, optional
Number of input channels, by default 2
out_chans : int, optional
Number of output channels, by default 2
embed_dim : int, optional
Dimension of the embeddings, by default 256
num_layers : int, optional
Number of layers in the network, by default 12
repeat_layers : int, optional
Number of times to repeat the layers, by default 1
use_mlp : int, optional
Whether to use MLP, by default True
mlp_ratio : int, optional
Ratio of MLP to use, by default 2.0
activation_function : str, optional
Activation function to use, by default "gelu"
encoder_layers : int, optional
Number of layers in the encoder, by default 1
pos_embed : str, optional
Type of positional embedding to use, by default "direct"
drop_rate : float, optional
Dropout rate, by default 0.0
drop_path_rate : float, optional
Dropout path rate, by default 0.0
sparsity_threshold : float, optional
Threshold for sparsity, by default 0.0
normalization_layer : str, optional
Type of normalization layer to use ("layer_norm", "instance_norm", "none"), by default "instance_norm"
max_modes : Any, optional
Maximum modes to use, by default None
hard_thresholding_fraction : float, optional
Fraction of hard thresholding to apply, by default 1.0
use_complex_kernels : bool, optional
Whether to use complex kernels, by default True
big_skip : bool, optional
Whether to use big skip connections, by default True
rank : float, optional
Rank of the approximation, by default 1.0
factorization : Any, optional
Type of factorization to use, by default None
separable : bool, optional
Whether to use separable convolutions, by default False
complex_network : bool, optional
Whether to use a complex network architecture, by default True
complex_activation : str, optional
Type of complex activation function to use, by default "real"
spectral_layers : int, optional
Number of spectral layers, by default 3
output_transform : bool, optional
Whether to use an output transform, by default False
checkpointing : int, optional
Number of checkpointing segments, by default 0
Example:
--------
>>> from modulus.models.sfno.sfnonet import SphericalFourierNeuralOperatorNet as SFNO
>>> model = SFNO(
... params={},
... inp_shape=(8, 16),
... scale_factor=4,
... in_chans=2,
... out_chans=2,
... embed_dim=16,
... num_layers=2,
... encoder_layers=1,
... spectral_layers=2,
... use_mlp=True,)
>>> model(torch.randn(1, 2, 8, 16)).shape
torch.Size([1, 2, 8, 16])
"""
def __init__(
self,
params: dict,
spectral_transform: str = "sht",
grid="legendre-gauss",
filter_type: str = "non-linear",
operator_type: str = "diagonal",
inp_shape: Tuple[int] = (721, 1440),
scale_factor: int = 16,
in_chans: int = 2,
out_chans: int = 2,
embed_dim: int = 256,
num_layers: int = 12,
repeat_layers=1,
use_mlp: int = True,
mlp_ratio: int = 2.0,
activation_function: str = "gelu",
encoder_layers: int = 1,
pos_embed: str = "direct",
drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
sparsity_threshold: float = 0.0,
normalization_layer: str = "instance_norm",
max_modes: Any = None,
hard_thresholding_fraction: float = 1.0,
use_complex_kernels: bool = True,
big_skip: bool = True,
rank: float = 1.0,
factorization: Any = None,
separable: bool = False,
complex_network: bool = True,
complex_activation: str = "real",
spectral_layers: int = 3,
output_transform: bool = False,
checkpointing: int = 0,
): # pragma: no cover
super(SphericalFourierNeuralOperatorNet, self).__init__(meta=MetaData())
self.params = params
self.spectral_transform = (
params.spectral_transform
if hasattr(params, "spectral_transform")
else spectral_transform
)
self.grid = params.grid if hasattr(params, "grid") else grid
self.filter_type = (
params.filter_type if hasattr(params, "filter_type") else filter_type
)
self.operator_type = (
params.operator_type if hasattr(params, "operator_type") else operator_type
)
self.inp_shape = (
(params.img_shape_x, params.img_shape_y)
if hasattr(params, "img_shape_x") and hasattr(params, "img_shape_y")
else inp_shape
)
self.out_shape = (
(params.out_shape_x, params.out_shape_y)
if hasattr(params, "out_shape_x") and hasattr(params, "out_shape_y")
else self.inp_shape
)
self.scale_factor = (
params.scale_factor if hasattr(params, "scale_factor") else scale_factor
)
self.in_chans = (
params.N_in_channels if hasattr(params, "N_in_channels") else in_chans
)
self.out_chans = (
params.N_out_channels if hasattr(params, "N_out_channels") else out_chans
)
self.embed_dim = self.num_features = (
params.embed_dim if hasattr(params, "embed_dim") else embed_dim
)
self.num_layers = (
params.num_layers if hasattr(params, "num_layers") else num_layers
)
self.repeat_layers = (
params.repeat_layers if hasattr(params, "repeat_layers") else repeat_layers
)
self.max_modes = (
(params.lmax, params.mmax)
if hasattr(params, "lmax") and hasattr(params, "mmax")
else max_modes
)
self.hard_thresholding_fraction = (
params.hard_thresholding_fraction
if hasattr(params, "hard_thresholding_fraction")
else hard_thresholding_fraction
)
self.normalization_layer = (
params.normalization_layer
if hasattr(params, "normalization_layer")
else normalization_layer
)
self.use_mlp = params.use_mlp if hasattr(params, "use_mlp") else use_mlp
self.mlp_ratio = params.mlp_ratio if hasattr(params, "mlp_ratio") else mlp_ratio
self.activation_function = (
params.activation_function
if hasattr(params, "activation_function")
else activation_function
)
self.encoder_layers = (
params.encoder_layers
if hasattr(params, "encoder_layers")
else encoder_layers
)
self.pos_embed = params.pos_embed if hasattr(params, "pos_embed") else pos_embed
self.big_skip = params.big_skip if hasattr(params, "big_skip") else big_skip
self.rank = params.rank if hasattr(params, "rank") else rank
self.factorization = (
params.factorization if hasattr(params, "factorization") else factorization
)
self.separable = params.separable if hasattr(params, "separable") else separable
self.complex_network = (
params.complex_network
if hasattr(params, "complex_network")
else complex_network
)
self.complex_activation = (
params.complex_activation
if hasattr(params, "complex_activation")
else complex_activation
)
self.spectral_layers = (
params.spectral_layers
if hasattr(params, "spectral_layers")
else spectral_layers
)
self.output_transform = (
params.output_transform
if hasattr(params, "output_transform")
else output_transform
)
self.checkpointing = (
params.checkpointing if hasattr(params, "checkpointing") else checkpointing
)
# self.pretrain_encoding = params.pretrain_encoding if hasattr(params, "pretrain_encoding") else False
# compute the downscaled image size
self.h = int(self.inp_shape[0] // self.scale_factor)
self.w = int(self.inp_shape[1] // self.scale_factor)
# Compute the maximum frequencies in h and in w
if self.max_modes is not None:
modes_lat, modes_lon = self.max_modes
else:
modes_lat = int(self.h * self.hard_thresholding_fraction)
modes_lon = int((self.w // 2 + 1) * self.hard_thresholding_fraction)
# prepare the spectral transforms
if self.spectral_transform == "sht":
sht_handle = th.RealSHT
isht_handle = th.InverseRealSHT
# parallelism
if (comm.get_size("h") > 1) or (comm.get_size("w") > 1):
polar_group = None if (comm.get_size("h") == 1) else comm.get_group("h")
azimuth_group = (
None if (comm.get_size("w") == 1) else comm.get_group("w")
)
thd.init(polar_group, azimuth_group)
sht_handle = thd.DistributedRealSHT
isht_handle = thd.DistributedInverseRealSHT
# set up
self.trans_down = sht_handle(
*self.inp_shape, lmax=modes_lat, mmax=modes_lon, grid="equiangular"
).float()
self.itrans_up = isht_handle(
*self.out_shape, lmax=modes_lat, mmax=modes_lon, grid="equiangular"
).float()
self.trans = sht_handle(
self.h, self.w, lmax=modes_lat, mmax=modes_lon, grid=self.grid
).float()
self.itrans = isht_handle(
self.h, self.w, lmax=modes_lat, mmax=modes_lon, grid=self.grid
).float()
elif self.spectral_transform == "fft":
fft_handle = th.RealFFT2
ifft_handle = th.InverseRealFFT2
# determine the global padding
inp_dist_h = (
(self.inp_shape[0] + comm.get_size("h")) - 1
) // comm.get_size("h")
inp_dist_w = (
(self.inp_shape[1] + comm.get_size("w")) - 1
) // comm.get_size("w")
self.inp_padding = (
inp_dist_h * comm.get_size("h") - self.inp_shape[0],
inp_dist_w * comm.get_size("w") - self.inp_shape[1],
)
out_dist_h = (
(self.out_shape[0] + comm.get_size("h")) - 1
) // comm.get_size("h")
out_dist_w = (
(self.out_shape[1] + comm.get_size("w")) - 1
) // comm.get_size("w")
self.out_padding = (
out_dist_h * comm.get_size("h") - self.out_shape[0],
out_dist_w * comm.get_size("w") - self.out_shape[1],
)
# effective image size:
self.inp_shape_eff = [
self.inp_shape[0] + self.inp_padding[0],
self.inp_shape[1] + self.inp_padding[1],
]
self.inp_shape_loc = [
self.inp_shape_eff[0] // comm.get_size("h"),
self.inp_shape_eff[1] // comm.get_size("w"),
]
self.out_shape_eff = [
self.out_shape[0] + self.out_padding[0],
self.out_shape[1] + self.out_padding[1],
]
self.out_shape_loc = [
self.out_shape_eff[0] // comm.get_size("h"),
self.out_shape_eff[1] // comm.get_size("w"),
]
if (comm.get_size("h") > 1) or (comm.get_size("w") > 1):
fft_handle = DistributedRealFFT2
ifft_handle = DistributedInverseRealFFT2
self.trans_down = fft_handle(
*self.inp_shape_eff, lmax=modes_lat, mmax=modes_lon
).float()
self.itrans_up = ifft_handle(
*self.out_shape_eff, lmax=modes_lat, mmax=modes_lon
).float()
self.trans = fft_handle(
self.h, self.w, lmax=modes_lat, mmax=modes_lon
).float()
self.itrans = ifft_handle(
self.h, self.w, lmax=modes_lat, mmax=modes_lon
).float()
else:
raise (ValueError("Unknown spectral transform"))
# use the SHT/FFT to compute the local, downscaled grid dimensions
if (comm.get_size("h") > 1) or (comm.get_size("w") > 1):
self.inp_shape_loc = (
self.trans_down.nlat_local,
self.trans_down.nlon_local,
)
self.inp_shape_eff = [
self.trans_down.nlat_local + self.trans_down.nlatpad_local,
self.trans_down.nlon_local + self.trans_down.nlonpad_local,
]
self.h_loc = self.itrans.nlat_local
self.w_loc = self.itrans.nlon_local
else:
self.inp_shape_loc = (self.trans_down.nlat, self.trans_down.nlon)
self.inp_shape_eff = [self.trans_down.nlat, self.trans_down.nlon]
self.h_loc = self.itrans.nlat
self.w_loc = self.itrans.nlon
# encoder
if comm.get_size("matmul") > 1:
self.encoder = DistributedEncoderDecoder(
num_layers=self.encoder_layers,
input_dim=self.in_chans,
output_dim=self.embed_dim,
hidden_dim=int(1 * self.embed_dim),
act=self.activation_function,
comm_inp_name="fin",
comm_out_name="fout",
)
fblock_mlp_inp_name = self.encoder.comm_out_name
fblock_mlp_hidden_name = (
"fout" if (self.encoder.comm_out_name == "fin") else "fin"
)
else:
self.encoder = EncoderDecoder(
num_layers=self.encoder_layers,
input_dim=self.in_chans,
output_dim=self.embed_dim,
hidden_dim=int(1 * self.embed_dim),
act=self.activation_function,
)
fblock_mlp_inp_name = "fin"
fblock_mlp_hidden_name = "fout"
# dropout
self.pos_drop = nn.Dropout(p=drop_rate) if drop_rate > 0.0 else nn.Identity()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, self.num_layers)]
# pick norm layer
if self.normalization_layer == "layer_norm":
norm_layer_inp = partial(
nn.LayerNorm,
normalized_shape=(self.inp_shape_loc[0], self.inp_shape_loc[1]),
eps=1e-6,
)
norm_layer_mid = partial(
nn.LayerNorm, normalized_shape=(self.h_loc, self.w_loc), eps=1e-6
)
norm_layer_out = partial(
nn.LayerNorm,
normalized_shape=(self.out_shape_loc[0], self.out_shape_loc[1]),
eps=1e-6,
)
elif self.normalization_layer == "instance_norm":
if comm.get_size("spatial") > 1:
norm_layer_inp = partial(
DistributedInstanceNorm2d,
num_features=self.embed_dim,
eps=1e-6,
affine=True,
)
else:
norm_layer_inp = partial(
nn.InstanceNorm2d,
num_features=self.embed_dim,
eps=1e-6,
affine=True,
track_running_stats=False,
)
norm_layer_out = norm_layer_mid = norm_layer_inp
elif self.normalization_layer == "none":
norm_layer_out = norm_layer_mid = norm_layer_inp = nn.Identity
else:
raise NotImplementedError(
f"Error, normalization {self.normalization_layer} not implemented."
)
# FNO blocks
self.blocks = nn.ModuleList([])
for i in range(self.num_layers):
first_layer = i == 0
last_layer = i == self.num_layers - 1
forward_transform = self.trans_down if first_layer else self.trans
inverse_transform = self.itrans_up if last_layer else self.itrans
inner_skip = "linear"
outer_skip = "identity"
if first_layer and last_layer:
norm_layer = (norm_layer_inp, norm_layer_out)
elif first_layer:
norm_layer = (norm_layer_inp, norm_layer_mid)
elif last_layer:
norm_layer = (norm_layer_mid, norm_layer_out)
else:
norm_layer = (norm_layer_mid, norm_layer_mid)
filter_type = self.filter_type
operator_type = self.operator_type
block = FourierNeuralOperatorBlock(
forward_transform,
inverse_transform,
self.embed_dim,
filter_type=filter_type,
operator_type=operator_type,
mlp_ratio=self.mlp_ratio,
drop_rate=drop_rate,
drop_path=dpr[i],
act_layer=self.activation_function,
norm_layer=norm_layer,
sparsity_threshold=sparsity_threshold,
use_complex_kernels=use_complex_kernels,
inner_skip=inner_skip,
outer_skip=outer_skip,
use_mlp=self.use_mlp,
comm_feature_inp_name=fblock_mlp_inp_name,
comm_feature_hidden_name=fblock_mlp_hidden_name,
rank=self.rank,
factorization=self.factorization,
separable=self.separable,
complex_network=self.complex_network,
complex_activation=self.complex_activation,
spectral_layers=self.spectral_layers,
checkpointing=self.checkpointing,
)
self.blocks.append(block)
# decoder
if comm.get_size("matmul") > 1:
comm_inp_name = fblock_mlp_inp_name
comm_out_name = fblock_mlp_hidden_name
self.decoder = DistributedEncoderDecoder(
num_layers=self.encoder_layers,
input_dim=self.embed_dim,
output_dim=self.out_chans,
hidden_dim=int(1 * self.embed_dim),
act=self.activation_function,
comm_inp_name=comm_inp_name,
comm_out_name=comm_out_name,
)
else:
self.decoder = EncoderDecoder(
num_layers=self.encoder_layers,
input_dim=self.embed_dim + self.big_skip * self.out_chans,
output_dim=self.out_chans,
hidden_dim=int(1 * self.embed_dim),
act=self.activation_function,
)
# output transform
if self.big_skip:
self.residual_transform = nn.Conv2d(
self.out_chans, self.out_chans, 1, bias=False
)
# learned position embedding
if self.pos_embed == "direct":
# currently using deliberately a differently shape position embedding
self.pos_embed = nn.Parameter(
torch.zeros(
1, self.embed_dim, self.inp_shape_loc[0], self.inp_shape_loc[1]
)
)
self.pos_embed.is_shared_mp = ["matmul"]
self.pos_embed.sharded_dims_mp = [None, None, "h", "w"]
self.pos_embed.type = "direct"
trunc_normal_(self.pos_embed, std=0.02)
elif self.pos_embed == "frequency":
if (comm.get_size("h") > 1) or (comm.get_size("w") > 1):
lmax_loc = self.itrans_up.lmax_local
mmax_loc = self.itrans_up.mmax_local
else:
lmax_loc = self.itrans_up.lmax
mmax_loc = self.itrans_up.mmax
rcoeffs = nn.Parameter(
torch.tril(
torch.randn(1, self.embed_dim, lmax_loc, mmax_loc), diagonal=0
)
)
ccoeffs = nn.Parameter(
torch.tril(
torch.randn(1, self.embed_dim, lmax_loc, mmax_loc - 1), diagonal=-1
)
)
trunc_normal_(rcoeffs, std=0.02)
trunc_normal_(ccoeffs, std=0.02)
self.pos_embed = nn.ParameterList([rcoeffs, ccoeffs])
self.pos_embed.type = "frequency"
elif self.pos_embed == "none" or self.pos_embed == "None":
delattr(self, "pos_embed")
else:
raise ValueError("Unknown position embedding type")
if self.output_transform:
minmax_channels = []
for o, c in enumerate(params.out_channels):
if params.channel_names[c][0] == "r":
minmax_channels.append(o)
self.register_buffer(
"minmax_channels",
torch.Tensor(minmax_channels).to(torch.long),
persistent=False,
)
self.apply(self._init_weights)
def _init_weights(self, m): # pragma: no cover
"""Helper routine for weight initialization"""
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm) or isinstance(m, FusedLayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self): # pragma: no cover
"""Helper"""
return {"pos_embed", "cls_token"}
def _forward_features(self, x): # pragma: no cover
for r in range(self.repeat_layers):
for blk in self.blocks:
if self.checkpointing >= 3:
x = checkpoint(blk, x)
else:
x = blk(x)
return x
def forward(self, x): # pragma: no cover
if comm.get_size("fin") > 1:
x = scatter_to_parallel_region(x, "fin", 1)
# save big skip
if self.big_skip:
# if output shape differs, use the spectral transforms to change resolution
if self.out_shape != self.inp_shape:
xtype = x.dtype
# only take the predicted channels as residual
residual = x[..., : self.out_chans, :, :].to(torch.float32)
with amp.autocast(enabled=False):
residual = self.trans_down(residual)
residual = residual.contiguous()
# residual = self.inverse_transform(residual)
residual = self.itrans_up(residual)
residual = residual.to(dtype=xtype)
else:
# only take the predicted channels
residual = x[..., : self.out_chans, :, :]
if self.checkpointing >= 1:
x = checkpoint(self.encoder, x)
else:
x = self.encoder(x)
if hasattr(self, "pos_embed"):
if self.pos_embed.type == "frequency":
pos_embed = torch.stack(
[
self.pos_embed[0],
nn.functional.pad(self.pos_embed[1], (1, 0), "constant", 0),
],
dim=-1,
)
with amp.autocast(enabled=False):
pos_embed = self.itrans_up(torch.view_as_complex(pos_embed))
else:
pos_embed = self.pos_embed
# old way of treating unequally shaped weights
if (
self.pos_embed.type == "direct"
and self.inp_shape_loc != self.inp_shape_eff
):
xp = torch.zeros_like(x)
xp[..., : self.inp_shape_loc[0], : self.inp_shape_loc[1]] = (
x[..., : self.inp_shape_loc[0], : self.inp_shape_loc[1]] + pos_embed
)
x = xp
else:
x = x + pos_embed
# maybe clean the padding jsut in case
x = self.pos_drop(x)
# do the feature extraction
x = self._forward_features(x)
if self.big_skip:
x = torch.cat((x, residual), dim=1)
if self.checkpointing >= 1:
x = checkpoint(self.decoder, x)
else:
x = self.decoder(x)
if hasattr(self.decoder, "comm_out_name") and (
comm.get_size(self.decoder.comm_out_name) > 1
):
x = gather_from_parallel_region(x, self.decoder.comm_out_name, 1)
if self.big_skip:
x = x + self.residual_transform(residual)
if self.output_transform:
x[:, self.minmax_channels] = torch.sigmoid(x[:, self.minmax_channels])
return x
|
modulus-main
|
modulus/models/sfno/sfnonet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
@torch.jit.script
def compl_mul1d_fwd(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex-valued multiplication operation between two 1-dimensional
tensors.
"""
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bix,io->box", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script
def compl_muladd1d_fwd(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs complex multiplication of two 1-dimensional tensors 'a' and 'b', and then
adds a third tensor 'c'.
"""
tmpcc = torch.view_as_complex(compl_mul1d_fwd(a, b))
cc = torch.view_as_complex(c)
return torch.view_as_real(tmpcc + cc)
@torch.jit.script
def compl_mul2d_fwd(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex-valued multiplication operation between two 2-dimensional
tensors.
"""
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bixy,io->boxy", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script
def compl_muladd2d_fwd(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs complex multiplication of two 2-dimensional tensors 'a' and 'b', and then
adds a third tensor 'c'.
"""
tmpcc = torch.view_as_complex(compl_mul2d_fwd(a, b))
cc = torch.view_as_complex(c)
return torch.view_as_real(tmpcc + cc)
@torch.jit.script # TODO remove
def _contract_localconv_fwd(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex local convolution operation between two tensors 'a' and 'b'.
"""
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bixy,iox->boxy", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script # TODO remove
def _contract_blockconv_fwd(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex block convolution operation between two tensors 'a' and 'b'.
"""
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bim,imn->bin", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script # TODO remove
def _contractadd_blockconv_fwd(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex block convolution operation between two tensors 'a' and 'b', and
then adds a third tensor 'c'.
"""
tmpcc = torch.view_as_complex(_contract_blockconv_fwd(a, b))
cc = torch.view_as_complex(c)
return torch.view_as_real(tmpcc + cc)
# for the experimental layer
@torch.jit.script # TODO remove
def compl_exp_mul2d_fwd(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a 2D complex multiplication operation between two tensors 'a' and 'b'.
"""
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bixy,xio->boxy", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script
def compl_exp_muladd2d_fwd( # TODO remove
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a 2D complex multiplication operation between two tensors 'a' and 'b',
and then adds a third tensor 'c'.
"""
tmpcc = torch.view_as_complex(compl_exp_mul2d_fwd(a, b))
cc = torch.view_as_complex(c)
return torch.view_as_real(tmpcc + cc)
@torch.jit.script
def real_mul2d_fwd(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a 2D real multiplication operation between two tensors 'a' and 'b'.
"""
res = torch.einsum("bixy,io->boxy", a, b)
return res
@torch.jit.script
def real_muladd2d_fwd(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a 2D real multiplication operation between two tensors 'a' and 'b', and
then adds a third tensor 'c'.
"""
res = real_mul2d_fwd(a, b) + c
return res
# new contractions set to replace older ones. We use complex
@torch.jit.script
def _contract_diagonal(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex diagonal operation between two tensors 'a' and 'b'.
"""
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bixy,ioxy->boxy", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script
def _contract_dhconv(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex Driscoll-Healy style convolution operation between two tensors
'a' and 'b'.
"""
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bixy,iox->boxy", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script
def _contract_sep_diagonal(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex convolution operation between two tensors 'a' and 'b'
"""
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bixy,ixy->boxy", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script
def _contract_sep_dhconv(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex convolution operation between two tensors 'a' and 'b'
"""
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bixy,ix->boxy", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script
def _contract_diagonal_real(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex convolution operation between two tensors 'a' and 'b'
"""
res = torch.einsum("bixys,ioxy->boxys", a, b).contiguous()
return res
@torch.jit.script
def _contract_dhconv_real(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex convolution operation between two tensors 'a' and 'b'
"""
res = torch.einsum("bixys,iox->boxys", a, b).contiguous()
return res
@torch.jit.script
def _contract_sep_diagonal_real(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex convolution operation between two tensors 'a' and 'b'
"""
res = torch.einsum("bixys,ixy->boxys", a, b).contiguous()
return res
@torch.jit.script
def _contract_sep_dhconv_real(
a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor: # pragma: no cover
"""
Performs a complex convolution operation between two tensors 'a' and 'b'
"""
res = torch.einsum("bixys,ix->boxys", a, b).contiguous()
return res
|
modulus-main
|
modulus/models/sfno/contractions.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch import Tensor
from dgl import DGLGraph
from typing import Tuple, Union
from .mesh_graph_mlp import MeshGraphMLP
from .utils import aggregate_and_concat, CuGraphCSC
class MeshNodeBlock(nn.Module):
"""Node block used e.g. in GraphCast or MeshGraphNet
operating on a latent space represented by a mesh.
Parameters
----------
aggregation : str, optional
Aggregation method (sum, mean) , by default "sum"
input_dim_nodes : int, optional
Input dimensionality of the node features, by default 512
input_dim_edges : int, optional
Input dimensionality of the edge features, by default 512
output_dim : int, optional
Output dimensionality of the node features, by default 512
hidden_dim : int, optional
Number of neurons in each hidden layer, by default 512
hidden_layers : int, optional
Number of neurons in each hidden layer, by default 1
activation_fn : nn.Module, optional
Type of activation function, by default nn.SiLU()
norm_type : str, optional
Normalization type, by default "LayerNorm"
recompute_activation : bool, optional
Flag for recomputing activation in backward to save memory, by default False.
Currently, only SiLU is supported.
"""
def __init__(
self,
aggregation: str = "sum",
input_dim_nodes: int = 512,
input_dim_edges: int = 512,
output_dim: int = 512,
hidden_dim: int = 512,
hidden_layers: int = 1,
activation_fn: nn.Module = nn.SiLU(),
norm_type: str = "LayerNorm",
recompute_activation: bool = False,
):
super().__init__()
self.aggregation = aggregation
self.node_mlp = MeshGraphMLP(
input_dim=input_dim_nodes + input_dim_edges,
output_dim=output_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
@torch.jit.ignore()
def forward(
self,
efeat: Tensor,
nfeat: Tensor,
graph: Union[DGLGraph, CuGraphCSC],
) -> Tuple[Tensor, Tensor]:
# update edge features
cat_feat = aggregate_and_concat(efeat, nfeat, graph, self.aggregation)
# update node features + residual connection
nfeat_new = self.node_mlp(cat_feat) + nfeat
return efeat, nfeat_new
|
modulus-main
|
modulus/models/gnn_layers/mesh_node_block.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/models/gnn_layers/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from typing import Union
from torch import Tensor
from dgl import DGLGraph
from .mesh_graph_mlp import MeshGraphEdgeMLPConcat, MeshGraphEdgeMLPSum
from .utils import CuGraphCSC
class MeshEdgeBlock(nn.Module):
"""Edge block used e.g. in GraphCast or MeshGraphNet
operating on a latent space represented by a mesh.
Parameters
----------
input_dim_nodes : int, optional
Input dimensionality of the node features, by default 512
input_dim_edges : int, optional
Input dimensionality of the edge features, by default 512
output_dim : int, optional
Output dimensionality of the edge features, by default 512
hidden_dim : int, optional
_description_, by default 512
hidden_layers : int, optional
Number of neurons in each hidden layer, by default 1
activation_fn : nn.Module, optional
Type of activation function, by default nn.SiLU()
norm_type : str, optional
normalization type, by default "LayerNorm"
do_conat_trick: : bool, default=False
Whether to replace concat+MLP with MLP+idx+sum
recompute_activation : bool, optional
Flag for recomputing activation in backward to save memory, by default False.
Currently, only SiLU is supported.
"""
def __init__(
self,
input_dim_nodes: int = 512,
input_dim_edges: int = 512,
output_dim: int = 512,
hidden_dim: int = 512,
hidden_layers: int = 1,
activation_fn: nn.Module = nn.SiLU(),
norm_type: str = "LayerNorm",
do_concat_trick: bool = False,
recompute_activation: bool = False,
):
super().__init__()
MLP = MeshGraphEdgeMLPSum if do_concat_trick else MeshGraphEdgeMLPConcat
self.edge_mlp = MLP(
efeat_dim=input_dim_edges,
src_dim=input_dim_nodes,
dst_dim=input_dim_nodes,
output_dim=output_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
@torch.jit.ignore()
def forward(
self,
efeat: Tensor,
nfeat: Tensor,
graph: Union[DGLGraph, CuGraphCSC],
) -> Tensor:
efeat_new = self.edge_mlp(efeat, nfeat, graph)
efeat_new = efeat_new + efeat
return efeat_new, nfeat
|
modulus-main
|
modulus/models/gnn_layers/mesh_edge_block.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch import Tensor
from dgl import DGLGraph
from typing import Tuple, Union
from .mesh_graph_mlp import MeshGraphMLP, MeshGraphEdgeMLPConcat, MeshGraphEdgeMLPSum
from .utils import aggregate_and_concat, CuGraphCSC
class MeshGraphEncoder(nn.Module):
"""Encoder used e.g. in GraphCast
which acts on the bipartite graph connecting a mostly
regular grid (e.g. representing the input domain) to a mesh
(e.g. representing a latent space).
Parameters
----------
aggregation : str, optional
Message passing aggregation method ("sum", "mean"), by default "sum"
input_dim_src_nodes : int, optional
Input dimensionality of the source node features, by default 512
input_dim_dst_nodes : int, optional
Input dimensionality of the destination node features, by default 512
input_dim_edges : int, optional
Input dimensionality of the edge features, by default 512
output_dim_src_nodes : int, optional
Output dimensionality of the source node features, by default 512
output_dim_dst_nodes : int, optional
Output dimensionality of the destination node features, by default 512
output_dim_edges : int, optional
Output dimensionality of the edge features, by default 512
hidden_dim : int, optional
Number of neurons in each hidden layer, by default 512
hidden_layers : int, optional
Number of hiddel layers, by default 1
activation_fn : nn.Module, optional
Type of activation function, by default nn.SiLU()
norm_type : str, optional
Normalization type, by default "LayerNorm"
do_conat_trick: : bool, default=False
Whether to replace concat+MLP with MLP+idx+sum
recompute_activation : bool, optional
Flag for recomputing activation in backward to save memory, by default False.
Currently, only SiLU is supported.
"""
def __init__(
self,
aggregation: str = "sum",
input_dim_src_nodes: int = 512,
input_dim_dst_nodes: int = 512,
input_dim_edges: int = 512,
output_dim_src_nodes: int = 512,
output_dim_dst_nodes: int = 512,
output_dim_edges: int = 512,
hidden_dim: int = 512,
hidden_layers: int = 1,
activation_fn: int = nn.SiLU(),
norm_type: str = "LayerNorm",
do_concat_trick: bool = False,
recompute_activation: bool = False,
):
super().__init__()
self.aggregation = aggregation
MLP = MeshGraphEdgeMLPSum if do_concat_trick else MeshGraphEdgeMLPConcat
# edge MLP
self.edge_mlp = MLP(
efeat_dim=input_dim_edges,
src_dim=input_dim_src_nodes,
dst_dim=input_dim_dst_nodes,
output_dim=output_dim_edges,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
# src node MLP
self.src_node_mlp = MeshGraphMLP(
input_dim=input_dim_src_nodes,
output_dim=output_dim_src_nodes,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
# dst node MLP
self.dst_node_mlp = MeshGraphMLP(
input_dim=input_dim_dst_nodes + output_dim_edges,
output_dim=output_dim_dst_nodes,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
@torch.jit.ignore()
def forward(
self,
g2m_efeat: Tensor,
grid_nfeat: Tensor,
mesh_nfeat: Tensor,
graph: Union[DGLGraph, CuGraphCSC],
) -> Tuple[Tensor, Tensor]:
# update edge features by concatenating node features (both mesh and grid) and existing edge featues
# (or applying the concat trick instead)
efeat = self.edge_mlp(g2m_efeat, (grid_nfeat, mesh_nfeat), graph)
# aggregate messages (edge features) to obtain updated node features
cat_feat = aggregate_and_concat(efeat, mesh_nfeat, graph, self.aggregation)
# update src, dst node features + residual connections
mesh_nfeat = mesh_nfeat + self.dst_node_mlp(cat_feat)
grid_nfeat = grid_nfeat + self.src_node_mlp(grid_nfeat)
return grid_nfeat, mesh_nfeat
|
modulus-main
|
modulus/models/gnn_layers/mesh_graph_encoder.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl import DGLGraph
from torch import Tensor
from torch.autograd.function import once_differentiable
from .utils import concat_efeat, sum_efeat, CuGraphCSC
from modulus.models.layers.fused_silu import silu_backward_for
try:
from apex.normalization import FusedLayerNorm
apex_imported = True
except:
apex_imported = False
class CustomSiLuLinearAutogradFunction(torch.autograd.Function):
"""Custom SiLU + Linear autograd function"""
@staticmethod
def forward(
ctx,
features: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
) -> torch.Tensor:
# by combining SiLU and a Linear transformation
# we can avoid storing the activation
# at the cost of recomputing it during the backward
out = F.silu(features)
out = F.linear(out, weight, bias)
ctx.save_for_backward(features, weight)
return out
@staticmethod
@once_differentiable
def backward(
ctx, grad_output: torch.Tensor
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor],]:
"""backward pass of the SiLU + Linear function"""
(
need_dgrad,
need_wgrad,
need_bgrad,
) = ctx.needs_input_grad
features, weight = ctx.saved_tensors
grad_features = None
grad_weight = None
grad_bias = None
if need_bgrad:
grad_bias = grad_output.sum(dim=0)
if need_wgrad:
out = F.silu(features)
grad_weight = grad_output.T @ out
if need_dgrad:
grad_features = grad_output @ weight
silu_backward = silu_backward_for(features.dtype, features.dim())
grad_silu = silu_backward.execute([features])[0]
grad_features = grad_features * grad_silu
return grad_features, grad_weight, grad_bias
class MeshGraphMLP(nn.Module):
"""MLP layer which is commonly used in building blocks
of models operating on the union of grids and meshes. It
consists of a number of linear layers followed by an activation
and a norm layer following the last linear layer.
Parameters
----------
input_dim : int
dimensionality of the input features
output_dim : int, optional
dimensionality of the output features, by default 512
hidden_dim : int, optional
number of neurons in each hidden layer, by default 512
hidden_layers : int, optional
number of hidden layers, by default 1
activation_fn : nn.Module, optional
, by default nn.SiLU()
norm_type : str, optional
normalization type, by default "LayerNorm"
recompute_activation : bool, optional
Flag for recomputing recompute_activation in backward to save memory, by default False.
Currently, only SiLU is supported.
"""
def __init__(
self,
input_dim: int,
output_dim: int = 512,
hidden_dim: int = 512,
hidden_layers: int = 1,
activation_fn: nn.Module = nn.SiLU(),
norm_type: str = "LayerNorm",
recompute_activation: bool = False,
):
super().__init__()
layers = [nn.Linear(input_dim, hidden_dim), activation_fn]
self.hidden_layers = hidden_layers
for _ in range(hidden_layers - 1):
layers += [nn.Linear(hidden_dim, hidden_dim), activation_fn]
layers.append(nn.Linear(hidden_dim, output_dim))
self.norm_type = norm_type
if norm_type is not None:
assert norm_type in [
"LayerNorm",
"GraphNorm",
"InstanceNorm",
"BatchNorm",
"MessageNorm",
]
if norm_type == "LayerNorm" and apex_imported:
norm_layer = FusedLayerNorm
else:
norm_layer = getattr(nn, norm_type)
layers.append(norm_layer(output_dim))
self.model = nn.Sequential(*layers)
if recompute_activation:
assert isinstance(activation_fn, nn.SiLU)
self.recompute_activation = True
else:
self.recompute_activation = False
def default_forward(self, x: Tensor) -> Tensor:
"""default forward pass of the MLP"""
return self.model(x)
@torch.jit.ignore()
def custom_silu_linear_forward(self, x: Tensor) -> Tensor:
"""forward pass of the MLP where SiLU is recomputed in backward"""
lin = self.model[0]
hidden = lin(x)
for i in range(1, self.hidden_layers + 1):
lin = self.model[2 * i]
hidden = CustomSiLuLinearAutogradFunction.apply(
hidden, lin.weight, lin.bias
)
if self.norm_type is not None:
norm = self.model[2 * self.hidden_layers + 1]
hidden = norm(hidden)
return hidden
def forward(self, x: Tensor) -> Tensor:
if self.recompute_activation:
return self.custom_silu_linear_forward(x)
return self.default_forward(x)
class MeshGraphEdgeMLPConcat(MeshGraphMLP):
"""MLP layer which is commonly used in building blocks
of models operating on the union of grids and meshes. It
consists of a number of linear layers followed by an activation
and a norm layer following the last linear layer. It first
concatenates the input edge features and the node features of the
corresponding source and destination nodes of the corresponding edge
to create new edge features. These then are transformed through the
transformations mentioned above.
Parameters
----------
efeat_dim: int
dimension of the input edge features
src_dim: int
dimension of the input src-node features
dst_dim: int
dimension of the input dst-node features
output_dim : int, optional
dimensionality of the output features, by default 512
hidden_dim : int, optional
number of neurons in each hidden layer, by default 512
hidden_layers : int, optional
number of hidden layers, by default 1
activation_fn : nn.Module, optional
type of activation function, by default nn.SiLU()
norm_type : str, optional
normalization type, by default "LayerNorm"
bias : bool, optional
whether to use bias in the MLP, by default True
recompute_activation : bool, optional
Flag for recomputing activation in backward to save memory, by default False.
Currently, only SiLU is supported.
"""
def __init__(
self,
efeat_dim: int = 512,
src_dim: int = 512,
dst_dim: int = 512,
output_dim: int = 512,
hidden_dim: int = 512,
hidden_layers: int = 2,
activation_fn: nn.Module = nn.SiLU(),
norm_type: str = "LayerNorm",
bias: bool = True,
recompute_activation: bool = False,
):
cat_dim = efeat_dim + src_dim + dst_dim
super(MeshGraphEdgeMLPConcat, self).__init__(
cat_dim,
output_dim,
hidden_dim,
hidden_layers,
activation_fn,
norm_type,
recompute_activation,
)
def forward(
self,
efeat: Tensor,
nfeat: Union[Tensor, Tuple[Tensor]],
graph: Union[DGLGraph, CuGraphCSC],
) -> Tensor:
efeat = concat_efeat(efeat, nfeat, graph)
efeat = self.model(efeat)
return efeat
class MeshGraphEdgeMLPSum(nn.Module):
"""MLP layer which is commonly used in building blocks
of models operating on the union of grids and meshes. It
consists of a number of linear layers followed by an activation
and a norm layer following the last linear layer. It transform
edge features - which originally are intended to be a concatenation
of previous edge features, and the node features of the corresponding
source and destinationn nodes - by transorming these three features
individually through separate linear transformations and then sums
them for each edge accordingly. The result of this is transformed
through the remaining linear layers and activation or norm functions.
Parameters
----------
efeat_dim: int
dimension of the input edge features
src_dim: int
dimension of the input src-node features
dst_dim: int
dimension of the input dst-node features
output_dim : int, optional
dimensionality of the output features, by default 512
hidden_dim : int, optional
number of neurons in each hidden layer, by default 512
hidden_layers : int, optional
number of hidden layers, by default 1
activation_fn : nn.Module, optional
type of activation function, by default nn.SiLU()
norm_type : str, optional
normalization type, by default "LayerNorm"
bias : bool, optional
whether to use bias in the MLP, by default True
recompute_activation : bool, optional
Flag for recomputing activation in backward to save memory, by default False.
Currently, only SiLU is supported.
"""
def __init__(
self,
efeat_dim: int,
src_dim: int,
dst_dim: int,
output_dim: int = 512,
hidden_dim: int = 512,
hidden_layers: int = 1,
activation_fn: nn.Module = nn.SiLU(),
norm_type: str = "LayerNorm",
bias: bool = True,
recompute_activation: bool = False,
):
super().__init__()
self.efeat_dim = efeat_dim
self.src_dim = src_dim
self.dst_dim = dst_dim
# this should ensure the same sequence of initializations
# as the original MLP-Layer in combination with a concat operation
tmp_lin = nn.Linear(efeat_dim + src_dim + dst_dim, hidden_dim, bias=bias)
# orig_weight has shape (hidden_dim, efeat_dim + src_dim + dst_dim)
orig_weight = tmp_lin.weight
w_efeat, w_src, w_dst = torch.split(
orig_weight, [efeat_dim, src_dim, dst_dim], dim=1
)
self.lin_efeat = nn.Parameter(w_efeat)
self.lin_src = nn.Parameter(w_src)
self.lin_dst = nn.Parameter(w_dst)
if bias:
self.bias = tmp_lin.bias
else:
self.bias = None
layers = [activation_fn]
self.hidden_layers = hidden_layers
for _ in range(hidden_layers - 1):
layers += [nn.Linear(hidden_dim, hidden_dim), activation_fn]
layers.append(nn.Linear(hidden_dim, output_dim))
self.norm_type = norm_type
if norm_type is not None:
assert norm_type in [
"LayerNorm",
"GraphNorm",
"InstanceNorm",
"BatchNorm",
"MessageNorm",
]
if norm_type == "LayerNorm" and apex_imported:
norm_layer = FusedLayerNorm
else:
norm_layer = getattr(nn, norm_type)
layers.append(norm_layer(output_dim))
self.model = nn.Sequential(*layers)
if recompute_activation:
assert isinstance(activation_fn, nn.SiLU)
self.recompute_activation = True
else:
self.recompute_activation = False
def forward_truncated_sum(
self,
efeat: Tensor,
nfeat: Union[Tensor, Tuple[Tensor]],
graph: Union[DGLGraph, CuGraphCSC],
) -> Tensor:
"""forward pass of the truncated MLP. This uses separate linear layers without
bias. Bias is added to one MLP, as we sum afterwards. This adds the bias to the
total sum, too. Having it in one F.linear should allow a fusion of the bias
addition while avoiding adding the bias to the "edge-level" result.
"""
if isinstance(nfeat, Tensor):
src_feat, dst_feat = nfeat, nfeat
else:
src_feat, dst_feat = nfeat
mlp_efeat = F.linear(efeat, self.lin_efeat, None)
mlp_src = F.linear(src_feat, self.lin_src, None)
mlp_dst = F.linear(dst_feat, self.lin_dst, self.bias)
mlp_sum = sum_efeat(mlp_efeat, (mlp_src, mlp_dst), graph)
return mlp_sum
def default_forward(
self,
efeat: Tensor,
nfeat: Union[Tensor, Tuple[Tensor]],
graph: Union[DGLGraph, CuGraphCSC],
) -> Tensor:
"""Default forward pass of the truncated MLP."""
mlp_sum = self.forward_truncated_sum(
efeat,
nfeat,
graph,
)
return self.model(mlp_sum)
def custom_silu_linear_forward(
self,
efeat: Tensor,
nfeat: Union[Tensor, Tuple[Tensor]],
graph: Union[DGLGraph, CuGraphCSC],
) -> Tensor:
"""Forward pass of the truncated MLP with custom SiLU function."""
mlp_sum = self.forward_truncated_sum(
efeat,
nfeat,
graph,
)
lin = self.model[1]
hidden = CustomSiLuLinearAutogradFunction.apply(mlp_sum, lin.weight, lin.bias)
for i in range(2, self.hidden_layers + 1):
lin = self.model[2 * i - 1]
hidden = CustomSiLuLinearAutogradFunction.apply(
hidden, lin.weight, lin.bias
)
if self.norm_type is not None:
norm = self.model[2 * self.hidden_layers]
hidden = norm(hidden)
return hidden
def forward(
self,
efeat: Tensor,
nfeat: Union[Tensor, Tuple[Tensor]],
graph: Union[DGLGraph, CuGraphCSC],
) -> Tensor:
if self.recompute_activation:
return self.custom_silu_linear_forward(efeat, nfeat, graph)
return self.default_forward(efeat, nfeat, graph)
|
modulus-main
|
modulus/models/gnn_layers/mesh_graph_mlp.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from dgl import DGLGraph
import dgl.function as fn
from typing import Any, Callable, Dict, Optional, Union, Tuple
from torch.utils.checkpoint import checkpoint
try:
from pylibcugraphops.pytorch import StaticCSC, BipartiteCSC
from pylibcugraphops.pytorch.operators import (
update_efeat_bipartite_e2e,
update_efeat_static_e2e,
agg_concat_e2n,
)
except:
update_efeat_bipartite_e2e = None
update_efeat_static_e2e = None
agg_concat_e2n = None
class CuGraphCSC:
"""Constructs a CuGraphCSC object.
Parameters
----------
offsets : Tensor
The offsets tensor.
indices : Tensor
The indices tensor.
num_src_nodes : int
The number of source nodes.
num_dst_nodes : int
The number of destination nodes.
ef_indices : Optional[Tensor], optional
The edge feature indices tensor, by default None
reverse_graph_bwd : bool, optional
Whether to reverse the graph for the backward pass, by default True
cache_graph : bool, optional
Whether to cache graph structures when wrapping offsets and indices
to the corresponding cugraph-ops graph types. If graph change in each
iteration, set to False, by default True.
"""
def __init__(
self,
offsets: Tensor,
indices: Tensor,
num_src_nodes: int,
num_dst_nodes: int,
ef_indices: Optional[Tensor] = None,
reverse_graph_bwd: bool = True,
cache_graph: bool = True,
) -> None:
self.offsets = offsets
self.indices = indices
self.num_src_nodes = num_src_nodes
self.num_dst_nodes = num_dst_nodes
self.ef_indices = ef_indices
self.reverse_graph_bwd = reverse_graph_bwd
self.cache_graph = cache_graph
self.bipartite_csc = None
self.static_csc = None
def to(self, *args: Any, **kwargs: Any) -> "CuGraphCSC":
"""Moves the object to the specified device, dtype, or format and returns the
updated object.
Parameters
----------
*args : Any
Positional arguments to be passed to the `torch._C._nn._parse_to` function.
**kwargs : Any
Keyword arguments to be passed to the `torch._C._nn._parse_to` function.
Returns
-------
NodeBlockCUGO
The updated object after moving to the specified device, dtype, or format.
"""
device, dtype, _, _ = torch._C._nn._parse_to(*args, **kwargs)
assert dtype in (
None,
torch.int32,
torch.int64,
), f"Invalid dtype, expected torch.int32 or torch.int64, got {dtype}."
self.offsets = self.offsets.to(device=device, dtype=dtype)
self.indices = self.indices.to(device=device, dtype=dtype)
if self.ef_indices is not None:
self.ef_indices = self.ef_indices.to(device=device, dtype=dtype)
return self
def to_bipartite_csc(self, dtype=None):
"""Converts the graph to a bipartite CSC graph.
Parameters
----------
dtype : torch.dtype, optional
The dtype of the graph, by default None
Returns
-------
BipartiteCSC
The bipartite CSC graph.
"""
assert self.offsets.is_cuda, "Expected the graph structures to reside on GPU."
if self.bipartite_csc is None or not self.cache_graph:
# Occassionally, we have to watch out for the IdxT type
# of offsets and indices. Technically, they are only relevant
# for storing node and edge indices. However, they are also used
# to index pointers in the underlying kernels (for now). This means
# that depending on the data dimension, one has to rely on int64
# for the indices despite int32 technically being enough to store the
# graph. This will be improved in cugraph-ops-23.06. Until then, allow
# the change of dtype.
graph_offsets = self.offsets
graph_indices = self.indices
graph_ef_indices = self.ef_indices
if dtype is not None:
graph_offsets = self.offsets.to(dtype=dtype)
graph_indices = self.indices.to(dtype=dtype)
if self.ef_indices is not None:
graph_ef_indices = self.ef_indices.to(dtype=dtype)
graph = BipartiteCSC(
graph_offsets,
graph_indices,
self.num_src_nodes,
graph_ef_indices,
reverse_graph_bwd=self.reverse_graph_bwd,
)
self.bipartite_csc = graph
return self.bipartite_csc
def to_static_csc(self, dtype=None):
"""Converts the graph to a static CSC graph.
Parameters
----------
dtype : torch.dtype, optional
The dtype of the graph, by default None
Returns
-------
StaticCSC
The static CSC graph.
"""
if self.static_csc is None or not self.cache_graph:
# Occassionally, we have to watch out for the IdxT type
# of offsets and indices. Technically, they are only relevant
# for storing node and edge indices. However, they are also used
# to index pointers in the underlying kernels (for now). This means
# that depending on the data dimension, one has to rely on int64
# for the indices despite int32 technically being enough to store the
# graph. This will be improved in cugraph-ops-23.06. Until then, allow
# the change of dtype.
graph_offsets = self.offsets
graph_indices = self.indices
graph_ef_indices = self.ef_indices
if dtype is not None:
graph_offsets = self.offsets.to(dtype=dtype)
graph_indices = self.indices.to(dtype=dtype)
if self.ef_indices is not None:
graph_ef_indices = self.ef_indices.to(dtype=dtype)
graph = StaticCSC(
graph_offsets,
graph_indices,
graph_ef_indices,
)
self.static_csc = graph
return self.static_csc
def checkpoint_identity(layer: Callable, *args: Any, **kwargs: Any) -> Any:
"""Applies the identity function for checkpointing.
This function serves as an identity function for use with model layers
when checkpointing is not enabled. It simply forwards the input arguments
to the specified layer and returns its output.
Parameters
----------
layer : Callable
The model layer or function to apply to the input arguments.
*args
Positional arguments to be passed to the layer.
**kwargs
Keyword arguments to be passed to the layer.
Returns
-------
Any
The output of the specified layer after processing the input arguments.
"""
return layer(*args)
def set_checkpoint_fn(do_checkpointing: bool) -> Callable:
"""Sets checkpoint function.
This function returns the appropriate checkpoint function based on the
provided `do_checkpointing` flag. If `do_checkpointing` is True, the
function returns the checkpoint function from PyTorch's
`torch.utils.checkpoint`. Otherwise, it returns an identity function
that simply passes the inputs through the given layer.
Parameters
----------
do_checkpointing : bool
Whether to use checkpointing for gradient computation. Checkpointing
can reduce memory usage during backpropagation at the cost of
increased computation time.
Returns
-------
Callable
The selected checkpoint function to use for gradient computation.
"""
if do_checkpointing:
return checkpoint
else:
return checkpoint_identity
def concat_message_function(edges: Tensor) -> Dict[str, Tensor]:
"""Concatenates source node, destination node, and edge features.
Parameters
----------
edges : Tensor
Edges.
Returns
-------
Dict[Tensor]
Concatenated source node, destination node, and edge features.
"""
# concats src node , dst node, and edge features
cat_feat = torch.cat((edges.data["x"], edges.src["x"], edges.dst["x"]), dim=1)
return {"cat_feat": cat_feat}
@torch.jit.ignore()
def concat_efeat_dgl(
efeat: Tensor,
nfeat: Union[Tensor, Tuple[torch.Tensor, torch.Tensor]],
graph: DGLGraph,
) -> Tensor:
"""Concatenates edge features with source and destination node features.
Use for homogeneous graphs.
Parameters
----------
efeat : Tensor
Edge features.
nfeat : Tensor | Tuple[Tensor, Tensor]
Node features.
graph : DGLGraph
Graph.
Returns
-------
Tensor
Concatenated edge features with source and destination node features.
"""
if isinstance(nfeat, Tuple):
src_feat, dst_feat = nfeat
with graph.local_scope():
graph.srcdata["x"] = src_feat
graph.dstdata["x"] = dst_feat
graph.edata["x"] = efeat
graph.apply_edges(concat_message_function)
return graph.edata["cat_feat"]
with graph.local_scope():
graph.ndata["x"] = nfeat
graph.edata["x"] = efeat
graph.apply_edges(concat_message_function)
return graph.edata["cat_feat"]
def concat_efeat(
efeat: Tensor,
nfeat: Union[Tensor, Tuple[Tensor]],
graph: Union[DGLGraph, CuGraphCSC],
) -> Tensor:
"""Concatenates edge features with source and destination node features.
Use for homogeneous graphs.
Parameters
----------
efeat : Tensor
Edge features.
nfeat : Tensor | Tuple[Tensor]
Node features.
graph : DGLGraph | CuGraphCSC
Graph.
Returns
-------
Tensor
Concatenated edge features with source and destination node features.
"""
if isinstance(nfeat, Tensor):
if isinstance(graph, CuGraphCSC):
static_graph = graph.to_static_csc()
efeat = update_efeat_static_e2e(
efeat,
nfeat,
static_graph,
mode="concat",
use_source_emb=True,
use_target_emb=True,
)
else:
efeat = concat_efeat_dgl(efeat, nfeat, graph)
else:
src_feat, dst_feat = nfeat
# update edge features through concatenating edge and node features
if isinstance(graph, CuGraphCSC):
# torch.int64 to avoid indexing overflows due tu current behavior of cugraph-ops
bipartite_graph = graph.to_bipartite_csc(dtype=torch.int64)
efeat = update_efeat_bipartite_e2e(
efeat, src_feat, dst_feat, bipartite_graph, "concat"
)
else:
efeat = concat_efeat_dgl(efeat, (src_feat, dst_feat), graph)
return efeat
@torch.jit.script
def sum_efeat_dgl(
efeat: Tensor, src_feat: Tensor, dst_feat: Tensor, src_idx: Tensor, dst_idx: Tensor
) -> Tensor:
"""Sums edge features with source and destination node features.
Parameters
----------
efeat : Tensor
Edge features.
src_feat : Tensor
Source node features.
dst_feat : Tensor
Destination node features.
src_idx : Tensor
Source node indices.
dst_idx : Tensor
Destination node indices.
Returns
-------
Tensor
Sum of edge features with source and destination node features.
"""
return efeat + src_feat[src_idx] + dst_feat[dst_idx]
def sum_efeat(
efeat: Tensor,
nfeat: Union[Tensor, Tuple[Tensor]],
graph: Union[DGLGraph, CuGraphCSC],
):
"""Sums edge features with source and destination node features.
Parameters
----------
efeat : Tensor
Edge features.
nfeat : Tensor | Tuple[Tensor]
Node features (static setting) or tuple of node features of
source and destination nodes (bipartite setting).
graph : DGLGraph | CuGraphCSC
The underlying graph.
Returns
-------
Tensor
Sum of edge features with source and destination node features.
"""
if isinstance(nfeat, Tensor):
if isinstance(graph, CuGraphCSC):
static_graph = graph.to_static_csc()
sum_efeat = update_efeat_bipartite_e2e(
efeat, nfeat, static_graph, mode="sum"
)
else:
src_feat, dst_feat = nfeat, nfeat
src, dst = (item.long() for item in graph.edges())
sum_efeat = sum_efeat_dgl(efeat, src_feat, dst_feat, src, dst)
else:
src_feat, dst_feat = nfeat
if isinstance(graph, CuGraphCSC):
bipartite_graph = graph.to_bipartite_csc()
sum_efeat = update_efeat_bipartite_e2e(
efeat, src_feat, dst_feat, bipartite_graph, mode="sum"
)
else:
src, dst = (item.long() for item in graph.edges())
sum_efeat = sum_efeat_dgl(efeat, src_feat, dst_feat, src, dst)
return sum_efeat
@torch.jit.ignore()
def agg_concat_dgl(
efeat: Tensor, dst_nfeat: Tensor, graph: DGLGraph, aggregation: str
) -> Tensor:
"""Aggregates edge features and concatenates result with destination node features.
Parameters
----------
efeat : Tensor
Edge features.
nfeat : Tensor
Node features (destination nodes).
graph : DGLGraph
Graph.
aggregation : str
Aggregation method (sum or mean).
Returns
-------
Tensor
Aggregated edge features concatenated with destination node features.
Raises
------
RuntimeError
If aggregation method is not sum or mean.
"""
with graph.local_scope():
# populate features on graph edges
graph.edata["x"] = efeat
# aggregate edge features
if aggregation == "sum":
graph.update_all(fn.copy_e("x", "m"), fn.sum("m", "h_dest"))
elif aggregation == "mean":
graph.update_all(fn.copy_e("x", "m"), fn.mean("m", "h_dest"))
else:
raise RuntimeError("Not a valid aggregation!")
# concat dst-node & edge features
cat_feat = torch.cat((graph.dstdata["h_dest"], dst_nfeat), -1)
return cat_feat
def aggregate_and_concat(
efeat: Tensor,
nfeat: Tensor,
graph: Union[DGLGraph, CuGraphCSC],
aggregation: str,
):
"""
Aggregates edge features and concatenates result with destination node features.
Parameters
----------
efeat : Tensor
Edge features.
nfeat : Tensor
Node features (destination nodes).
graph : DGLGraph
Graph.
aggregation : str
Aggregation method (sum or mean).
Returns
-------
Tensor
Aggregated edge features concatenated with destination node features.
Raises
------
RuntimeError
If aggregation method is not sum or mean.
"""
if isinstance(graph, CuGraphCSC):
static_graph = graph.to_static_csc()
cat_feat = agg_concat_e2n(nfeat, efeat, static_graph, aggregation)
else:
cat_feat = agg_concat_dgl(efeat, nfeat, graph, aggregation)
return cat_feat
|
modulus-main
|
modulus/models/gnn_layers/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
from typing import Tuple
from torch import Tensor
from .mesh_graph_mlp import MeshGraphMLP
class GraphCastEncoderEmbedder(nn.Module):
"""GraphCast feature embedder for gird node features, multimesh node features,
grid2mesh edge features, and multimesh edge features.
Parameters
----------
input_dim_grid_nodes : int, optional
Input dimensionality of the grid node features, by default 474
input_dim_mesh_nodes : int, optional
Input dimensionality of the mesh node features, by default 3
input_dim_edges : int, optional
Input dimensionality of the edge features, by default 4
output_dim : int, optional
Dimensionality of the embedded features, by default 512
hidden_dim : int, optional
Number of neurons in each hidden layer, by default 512
hidden_layers : int, optional
Number of hiddel layers, by default 1
activation_fn : nn.Module, optional
Type of activation function, by default nn.SiLU()
norm_type : str, optional
Normalization type, by default "LayerNorm"
recompute_activation : bool, optional
Flag for recomputing activation in backward to save memory, by default False.
Currently, only SiLU is supported.
"""
def __init__(
self,
input_dim_grid_nodes: int = 474,
input_dim_mesh_nodes: int = 3,
input_dim_edges: int = 4,
output_dim: int = 512,
hidden_dim: int = 512,
hidden_layers: int = 1,
activation_fn: nn.Module = nn.SiLU(),
norm_type: str = "LayerNorm",
recompute_activation: bool = False,
):
super().__init__()
# MLP for grid node embedding
self.grid_node_mlp = MeshGraphMLP(
input_dim=input_dim_grid_nodes,
output_dim=output_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
# MLP for mesh node embedding
self.mesh_node_mlp = MeshGraphMLP(
input_dim=input_dim_mesh_nodes,
output_dim=output_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
# MLP for mesh edge embedding
self.mesh_edge_mlp = MeshGraphMLP(
input_dim=input_dim_edges,
output_dim=output_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
# MLP for grid2mesh edge embedding
self.grid2mesh_edge_mlp = MeshGraphMLP(
input_dim=input_dim_edges,
output_dim=output_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
def forward(
self,
grid_nfeat: Tensor,
mesh_nfeat: Tensor,
g2m_efeat: Tensor,
mesh_efeat: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
# Input node feature embedding
grid_nfeat = self.grid_node_mlp(grid_nfeat)
mesh_nfeat = self.mesh_node_mlp(mesh_nfeat)
# Input edge feature embedding
g2m_efeat = self.grid2mesh_edge_mlp(g2m_efeat)
mesh_efeat = self.mesh_edge_mlp(mesh_efeat)
return grid_nfeat, mesh_nfeat, g2m_efeat, mesh_efeat
class GraphCastDecoderEmbedder(nn.Module):
"""GraphCast feature embedder for mesh2grid edge features
Parameters
----------
input_dim_edges : int, optional
Input dimensionality of the edge features, by default 4
output_dim : int, optional
Dimensionality of the embedded features, by default 512
hidden_dim : int, optional
Number of neurons in each hidden layer, by default 512
hidden_layers : int, optional
Number of hiddel layers, by default 1
activation_fn : nn.Module, optional
Type of activation function, by default nn.SiLU()
norm_type : str, optional
Normalization type, by default "LayerNorm"
recompute_activation : bool, optional
Flag for recomputing activation in backward to save memory, by default False.
Currently, only SiLU is supported.
"""
def __init__(
self,
input_dim_edges: int = 4,
output_dim: int = 512,
hidden_dim: int = 512,
hidden_layers: int = 1,
activation_fn: nn.Module = nn.SiLU(),
norm_type: str = "LayerNorm",
recompute_activation: bool = False,
):
super().__init__()
# MLP for mesh2grid edge embedding
self.mesh2grid_edge_mlp = MeshGraphMLP(
input_dim=input_dim_edges,
output_dim=output_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
def forward(
self,
m2g_efeat: Tensor,
) -> Tensor:
m2g_efeat = self.mesh2grid_edge_mlp(m2g_efeat)
return m2g_efeat
|
modulus-main
|
modulus/models/gnn_layers/embedder.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from typing import Union
from torch import Tensor
from dgl import DGLGraph
from .mesh_graph_mlp import MeshGraphMLP, MeshGraphEdgeMLPConcat, MeshGraphEdgeMLPSum
from .utils import aggregate_and_concat, CuGraphCSC
class MeshGraphDecoder(nn.Module):
"""Decoder used e.g. in GraphCast
which acts on the bipartite graph connecting a mesh
(e.g. representing a latent space) to a mostly regular
grid (e.g. representing the output domain).
Parameters
----------
aggregation : str, optional
Message passing aggregation method ("sum", "mean"), by default "sum"
input_dim_src_nodes : int, optional
Input dimensionality of the source node features, by default 512
input_dim_dst_nodes : int, optional
Input dimensionality of the destination node features, by default 512
input_dim_edges : int, optional
Input dimensionality of the edge features, by default 512
output_dim_dst_nodes : int, optional
Output dimensionality of the destination node features, by default 512
output_dim_edges : int, optional
Output dimensionality of the edge features, by default 512
hidden_dim : int, optional
Number of neurons in each hidden layer, by default 512
hidden_layers : int, optional
Number of hiddel layers, by default 1
activation_fn : nn.Module, optional
Type of activation function, by default nn.SiLU()
norm_type : str, optional
Normalization type, by default "LayerNorm"
do_conat_trick: : bool, default=False
Whether to replace concat+MLP with MLP+idx+sum
recompute_activation : bool, optional
Flag for recomputing activation in backward to save memory, by default False.
Currently, only SiLU is supported.
"""
def __init__(
self,
aggregation: str = "sum",
input_dim_src_nodes: int = 512,
input_dim_dst_nodes: int = 512,
input_dim_edges: int = 512,
output_dim_dst_nodes: int = 512,
output_dim_edges: int = 512,
hidden_dim: int = 512,
hidden_layers: int = 1,
activation_fn: nn.Module = nn.SiLU(),
norm_type: str = "LayerNorm",
do_concat_trick: bool = False,
recompute_activation: bool = False,
):
super().__init__()
self.aggregation = aggregation
MLP = MeshGraphEdgeMLPSum if do_concat_trick else MeshGraphEdgeMLPConcat
# edge MLP
self.edge_mlp = MLP(
efeat_dim=input_dim_edges,
src_dim=input_dim_src_nodes,
dst_dim=input_dim_dst_nodes,
output_dim=output_dim_edges,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
# dst node MLP
self.node_mlp = MeshGraphMLP(
input_dim=input_dim_dst_nodes + output_dim_edges,
output_dim=output_dim_dst_nodes,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
@torch.jit.ignore()
def forward(
self,
m2g_efeat: Tensor,
grid_nfeat: Tensor,
mesh_nfeat: Tensor,
graph: Union[DGLGraph, CuGraphCSC],
) -> Tensor:
# update edge features
efeat = self.edge_mlp(m2g_efeat, (mesh_nfeat, grid_nfeat), graph)
# aggregate messages (edge features) to obtain updated node features
cat_feat = aggregate_and_concat(efeat, grid_nfeat, graph, self.aggregation)
# transformation and residual connection
dst_feat = self.node_mlp(cat_feat) + grid_nfeat
return dst_feat
|
modulus-main
|
modulus/models/gnn_layers/mesh_graph_decoder.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .fno import FNO
|
modulus-main
|
modulus/models/fno/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import modulus.models.layers as layers
import modulus
from typing import Dict, List, Union, Tuple
from torch import Tensor
from dataclasses import dataclass
from ..meta import ModelMetaData
from ..module import Module
from ..mlp import FullyConnected
# ===================================================================
# ===================================================================
# 1D FNO
# ===================================================================
# ===================================================================
class FNO1DEncoder(nn.Module):
"""1D Spectral encoder for FNO
Parameters
----------
in_channels : int, optional
Number of input channels, by default 1
num_fno_layers : int, optional
Number of spectral convolutional layers, by default 4
fno_layer_size : int, optional
Latent features size in spectral convolutions, by default 32
num_fno_modes : Union[int, List[int]], optional
Number of Fourier modes kept in spectral convolutions, by default 16
padding : Union[int, List[int]], optional
Domain padding for spectral convolutions, by default 8
padding_type : str, optional
Type of padding for spectral convolutions, by default "constant"
activation_fn : nn.Module, optional
Activation function, by default nn.GELU
coord_features : bool, optional
Use coordinate grid as additional feature map, by default True
"""
def __init__(
self,
in_channels: int = 1,
num_fno_layers: int = 4,
fno_layer_size: int = 32,
num_fno_modes: Union[int, List[int]] = 16,
padding: Union[int, List[int]] = 8,
padding_type: str = "constant",
activation_fn: nn.Module = nn.GELU(),
coord_features: bool = True,
) -> None:
super().__init__()
self.in_channels = in_channels
self.num_fno_layers = num_fno_layers
self.fno_width = fno_layer_size
self.coord_features = coord_features
# Spectral modes to have weights
if isinstance(num_fno_modes, int):
num_fno_modes = [num_fno_modes]
# Add relative coordinate feature
if self.coord_features:
self.in_channels = self.in_channels + 1
self.activation_fn = activation_fn
self.spconv_layers = nn.ModuleList()
self.conv_layers = nn.ModuleList()
# Initial lift network
self.lift_network = torch.nn.Sequential()
self.lift_network.append(
layers.Conv1dFCLayer(self.in_channels, int(self.fno_width / 2))
)
self.lift_network.append(self.activation_fn)
self.lift_network.append(
layers.Conv1dFCLayer(int(self.fno_width / 2), self.fno_width)
)
# Build Neural Fourier Operators
for _ in range(self.num_fno_layers):
self.spconv_layers.append(
layers.SpectralConv1d(self.fno_width, self.fno_width, num_fno_modes[0])
)
self.conv_layers.append(nn.Conv1d(self.fno_width, self.fno_width, 1))
# Padding values for spectral conv
if isinstance(padding, int):
padding = [padding]
self.pad = padding[:1]
self.ipad = [-pad if pad > 0 else None for pad in self.pad]
self.padding_type = padding_type
def forward(self, x: Tensor) -> Tensor:
if self.coord_features:
coord_feat = self.meshgrid(list(x.shape), x.device)
x = torch.cat((x, coord_feat), dim=1)
x = self.lift_network(x)
# (left, right)
x = F.pad(x, (0, self.pad[0]), mode=self.padding_type)
# Spectral layers
for k, conv_w in enumerate(zip(self.conv_layers, self.spconv_layers)):
conv, w = conv_w
if k < len(self.conv_layers) - 1:
x = self.activation_fn(conv(x) + w(x))
else:
x = conv(x) + w(x)
x = x[..., : self.ipad[0]]
return x
def meshgrid(self, shape: List[int], device: torch.device) -> Tensor:
"""Creates 1D meshgrid feature
Parameters
----------
shape : List[int]
Tensor shape
device : torch.device
Device model is on
Returns
-------
Tensor
Meshgrid tensor
"""
bsize, size_x = shape[0], shape[2]
grid_x = torch.linspace(0, 1, size_x, dtype=torch.float32, device=device)
grid_x = grid_x.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1)
return grid_x
# ===================================================================
# ===================================================================
# 2D FNO
# ===================================================================
# ===================================================================
class FNO2DEncoder(nn.Module):
"""2D Spectral encoder for FNO
Parameters
----------
in_channels : int, optional
Number of input channels, by default 1
num_fno_layers : int, optional
Number of spectral convolutional layers, by default 4
fno_layer_size : int, optional
Latent features size in spectral convolutions, by default 32
num_fno_modes : Union[int, List[int]], optional
Number of Fourier modes kept in spectral convolutions, by default 16
padding : Union[int, List[int]], optional
Domain padding for spectral convolutions, by default 8
padding_type : str, optional
Type of padding for spectral convolutions, by default "constant"
activation_fn : nn.Module, optional
Activation function, by default nn.GELU
coord_features : bool, optional
Use coordinate grid as additional feature map, by default True
"""
def __init__(
self,
in_channels: int = 1,
num_fno_layers: int = 4,
fno_layer_size: int = 32,
num_fno_modes: Union[int, List[int]] = 16,
padding: Union[int, List[int]] = 8,
padding_type: str = "constant",
activation_fn: nn.Module = nn.GELU(),
coord_features: bool = True,
) -> None:
super().__init__()
self.in_channels = in_channels
self.num_fno_layers = num_fno_layers
self.fno_width = fno_layer_size
self.coord_features = coord_features
# Spectral modes to have weights
if isinstance(num_fno_modes, int):
num_fno_modes = [num_fno_modes, num_fno_modes]
# Add relative coordinate feature
if self.coord_features:
self.in_channels = self.in_channels + 2
self.activation_fn = activation_fn
self.spconv_layers = nn.ModuleList()
self.conv_layers = nn.ModuleList()
# Initial lift network
self.lift_network = torch.nn.Sequential()
self.lift_network.append(
layers.Conv2dFCLayer(self.in_channels, int(self.fno_width / 2))
)
self.lift_network.append(self.activation_fn)
self.lift_network.append(
layers.Conv2dFCLayer(int(self.fno_width / 2), self.fno_width)
)
# Build Neural Fourier Operators
for _ in range(self.num_fno_layers):
self.spconv_layers.append(
layers.SpectralConv2d(
self.fno_width, self.fno_width, num_fno_modes[0], num_fno_modes[1]
)
)
self.conv_layers.append(nn.Conv2d(self.fno_width, self.fno_width, 1))
# Padding values for spectral conv
if isinstance(padding, int):
padding = [padding, padding]
padding = padding + [0, 0] # Pad with zeros for smaller lists
self.pad = padding[:2]
self.ipad = [-pad if pad > 0 else None for pad in self.pad]
self.padding_type = padding_type
def forward(self, x: Tensor) -> Tensor:
assert (
x.dim() == 4
), "Only 4D tensors [batch, in_channels, grid_x, grid_y] accepted for 2D FNO"
if self.coord_features:
coord_feat = self.meshgrid(list(x.shape), x.device)
x = torch.cat((x, coord_feat), dim=1)
x = self.lift_network(x)
# (left, right, top, bottom)
x = F.pad(x, (0, self.pad[1], 0, self.pad[0]), mode=self.padding_type)
# Spectral layers
for k, conv_w in enumerate(zip(self.conv_layers, self.spconv_layers)):
conv, w = conv_w
if k < len(self.conv_layers) - 1:
x = self.activation_fn(conv(x) + w(x))
else:
x = conv(x) + w(x)
# remove padding
x = x[..., : self.ipad[0], : self.ipad[1]]
return x
def meshgrid(self, shape: List[int], device: torch.device) -> Tensor:
"""Creates 2D meshgrid feature
Parameters
----------
shape : List[int]
Tensor shape
device : torch.device
Device model is on
Returns
-------
Tensor
Meshgrid tensor
"""
bsize, size_x, size_y = shape[0], shape[2], shape[3]
grid_x = torch.linspace(0, 1, size_x, dtype=torch.float32, device=device)
grid_y = torch.linspace(0, 1, size_y, dtype=torch.float32, device=device)
grid_x, grid_y = torch.meshgrid(grid_x, grid_y, indexing="ij")
grid_x = grid_x.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1)
grid_y = grid_y.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1)
return torch.cat((grid_x, grid_y), dim=1)
# ===================================================================
# ===================================================================
# 3D FNO
# ===================================================================
# ===================================================================
class FNO3DEncoder(nn.Module):
"""3D Spectral encoder for FNO
Parameters
----------
in_channels : int, optional
Number of input channels, by default 1
num_fno_layers : int, optional
Number of spectral convolutional layers, by default 4
fno_layer_size : int, optional
Latent features size in spectral convolutions, by default 32
num_fno_modes : Union[int, List[int]], optional
Number of Fourier modes kept in spectral convolutions, by default 16
padding : Union[int, List[int]], optional
Domain padding for spectral convolutions, by default 8
padding_type : str, optional
Type of padding for spectral convolutions, by default "constant"
activation_fn : nn.Module, optional
Activation function, by default nn.GELU
coord_features : bool, optional
Use coordinate grid as additional feature map, by default True
"""
def __init__(
self,
in_channels: int = 1,
num_fno_layers: int = 4,
fno_layer_size: int = 32,
num_fno_modes: Union[int, List[int]] = 16,
padding: Union[int, List[int]] = 8,
padding_type: str = "constant",
activation_fn: nn.Module = nn.GELU(),
coord_features: bool = True,
) -> None:
super().__init__()
self.in_channels = in_channels
self.num_fno_layers = num_fno_layers
self.fno_width = fno_layer_size
self.coord_features = coord_features
# Spectral modes to have weights
if isinstance(num_fno_modes, int):
num_fno_modes = [num_fno_modes, num_fno_modes, num_fno_modes]
# Add relative coordinate feature
if self.coord_features:
self.in_channels = self.in_channels + 3
self.activation_fn = activation_fn
self.spconv_layers = nn.ModuleList()
self.conv_layers = nn.ModuleList()
# Initial lift network
self.lift_network = torch.nn.Sequential()
self.lift_network.append(
layers.Conv3dFCLayer(self.in_channels, int(self.fno_width / 2))
)
self.lift_network.append(self.activation_fn)
self.lift_network.append(
layers.Conv3dFCLayer(int(self.fno_width / 2), self.fno_width)
)
# Build Neural Fourier Operators
for _ in range(self.num_fno_layers):
self.spconv_layers.append(
layers.SpectralConv3d(
self.fno_width,
self.fno_width,
num_fno_modes[0],
num_fno_modes[1],
num_fno_modes[2],
)
)
self.conv_layers.append(nn.Conv3d(self.fno_width, self.fno_width, 1))
# Padding values for spectral conv
if isinstance(padding, int):
padding = [padding, padding, padding]
padding = padding + [0, 0, 0] # Pad with zeros for smaller lists
self.pad = padding[:3]
self.ipad = [-pad if pad > 0 else None for pad in self.pad]
self.padding_type = padding_type
def forward(self, x: Tensor) -> Tensor:
if self.coord_features:
coord_feat = self.meshgrid(list(x.shape), x.device)
x = torch.cat((x, coord_feat), dim=1)
x = self.lift_network(x)
# (left, right, top, bottom, front, back)
x = F.pad(
x,
(0, self.pad[2], 0, self.pad[1], 0, self.pad[0]),
mode=self.padding_type,
)
# Spectral layers
for k, conv_w in enumerate(zip(self.conv_layers, self.spconv_layers)):
conv, w = conv_w
if k < len(self.conv_layers) - 1:
x = self.activation_fn(conv(x) + w(x))
else:
x = conv(x) + w(x)
x = x[..., : self.ipad[0], : self.ipad[1], : self.ipad[2]]
return x
def meshgrid(self, shape: List[int], device: torch.device) -> Tensor:
"""Creates 3D meshgrid feature
Parameters
----------
shape : List[int]
Tensor shape
device : torch.device
Device model is on
Returns
-------
Tensor
Meshgrid tensor
"""
bsize, size_x, size_y, size_z = shape[0], shape[2], shape[3], shape[4]
grid_x = torch.linspace(0, 1, size_x, dtype=torch.float32, device=device)
grid_y = torch.linspace(0, 1, size_y, dtype=torch.float32, device=device)
grid_z = torch.linspace(0, 1, size_z, dtype=torch.float32, device=device)
grid_x, grid_y, grid_z = torch.meshgrid(grid_x, grid_y, grid_z, indexing="ij")
grid_x = grid_x.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1, 1)
grid_y = grid_y.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1, 1)
grid_z = grid_z.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1, 1)
return torch.cat((grid_x, grid_y, grid_z), dim=1)
# ===================================================================
# ===================================================================
# 4D FNO
# ===================================================================
# ===================================================================
class FNO4DEncoder(nn.Module):
"""4D Spectral encoder for FNO
Parameters
----------
in_channels : int, optional
Number of input channels, by default 1
num_fno_layers : int, optional
Number of spectral convolutional layers, by default 4
fno_layer_size : int, optional
Latent features size in spectral convolutions, by default 32
num_fno_modes : Union[int, List[int]], optional
Number of Fourier modes kept in spectral convolutions, by default 16
padding : Union[int, List[int]], optional
Domain padding for spectral convolutions, by default 8
padding_type : str, optional
Type of padding for spectral convolutions, by default "constant"
activation_fn : nn.Module, optional
Activation function, by default nn.GELU
coord_features : bool, optional
Use coordinate grid as additional feature map, by default True
"""
def __init__(
self,
in_channels: int = 1,
num_fno_layers: int = 4,
fno_layer_size: int = 32,
num_fno_modes: Union[int, List[int]] = 16,
padding: Union[int, List[int]] = 8,
padding_type: str = "constant",
activation_fn: nn.Module = nn.GELU(),
coord_features: bool = True,
) -> None:
super().__init__()
self.in_channels = in_channels
self.num_fno_layers = num_fno_layers
self.fno_width = fno_layer_size
self.coord_features = coord_features
# Spectral modes to have weights
if isinstance(num_fno_modes, int):
num_fno_modes = [num_fno_modes, num_fno_modes, num_fno_modes, num_fno_modes]
# Add relative coordinate feature
if self.coord_features:
self.in_channels = self.in_channels + 4
self.activation_fn = activation_fn
self.spconv_layers = nn.ModuleList()
self.conv_layers = nn.ModuleList()
# Initial lift network
self.lift_network = torch.nn.Sequential()
self.lift_network.append(
layers.ConvNdFCLayer(self.in_channels, int(self.fno_width / 2))
)
self.lift_network.append(self.activation_fn)
self.lift_network.append(
layers.ConvNdFCLayer(int(self.fno_width / 2), self.fno_width)
)
# Build Neural Fourier Operators
for _ in range(self.num_fno_layers):
self.spconv_layers.append(
layers.SpectralConv4d(
self.fno_width,
self.fno_width,
num_fno_modes[0],
num_fno_modes[1],
num_fno_modes[2],
num_fno_modes[3],
)
)
self.conv_layers.append(
layers.ConvNdKernel1Layer(self.fno_width, self.fno_width)
)
# Padding values for spectral conv
if isinstance(padding, int):
padding = [padding, padding, padding, padding]
padding = padding + [0, 0, 0, 0] # Pad with zeros for smaller lists
self.pad = padding[:4]
self.ipad = [-pad if pad > 0 else None for pad in self.pad]
self.padding_type = padding_type
def forward(self, x: Tensor) -> Tensor:
if self.coord_features:
coord_feat = self.meshgrid(list(x.shape), x.device)
x = torch.cat((x, coord_feat), dim=1)
x = self.lift_network(x)
# (left, right, top, bottom, front, back, past, future)
x = F.pad(
x,
(0, self.pad[3], 0, self.pad[2], 0, self.pad[1], 0, self.pad[0]),
mode=self.padding_type,
)
# Spectral layers
for k, conv_w in enumerate(zip(self.conv_layers, self.spconv_layers)):
conv, w = conv_w
if k < len(self.conv_layers) - 1:
x = self.activation_fn(conv(x) + w(x))
else:
x = conv(x) + w(x)
x = x[..., : self.ipad[0], : self.ipad[1], : self.ipad[2], : self.ipad[3]]
return x
def meshgrid(self, shape: List[int], device: torch.device) -> Tensor:
"""Creates 4D meshgrid feature
Parameters
----------
shape : List[int]
Tensor shape
device : torch.device
Device model is on
Returns
-------
Tensor
Meshgrid tensor
"""
bsize, size_x, size_y, size_z, size_t = (
shape[0],
shape[2],
shape[3],
shape[4],
shape[5],
)
grid_x = torch.linspace(0, 1, size_x, dtype=torch.float32, device=device)
grid_y = torch.linspace(0, 1, size_y, dtype=torch.float32, device=device)
grid_z = torch.linspace(0, 1, size_z, dtype=torch.float32, device=device)
grid_t = torch.linspace(0, 1, size_t, dtype=torch.float32, device=device)
grid_x, grid_y, grid_z, grid_t = torch.meshgrid(
grid_x, grid_y, grid_z, grid_t, indexing="ij"
)
grid_x = grid_x.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1, 1, 1)
grid_y = grid_y.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1, 1, 1)
grid_z = grid_z.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1, 1, 1)
grid_t = grid_t.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1, 1, 1)
return torch.cat((grid_x, grid_y, grid_z, grid_t), dim=1)
# Functions for converting between point based and grid (image) representations
def _grid_to_points1d(value: Tensor) -> Tuple[Tensor, List[int]]:
y_shape = list(value.size())
output = torch.permute(value, (0, 2, 1))
return output.reshape(-1, output.size(-1)), y_shape
def _points_to_grid1d(value: Tensor, shape: List[int]) -> Tensor:
output = value.reshape(shape[0], shape[2], value.size(-1))
return torch.permute(output, (0, 2, 1))
def _grid_to_points2d(value: Tensor) -> Tuple[Tensor, List[int]]:
y_shape = list(value.size())
output = torch.permute(value, (0, 2, 3, 1))
return output.reshape(-1, output.size(-1)), y_shape
def _points_to_grid2d(value: Tensor, shape: List[int]) -> Tensor:
output = value.reshape(shape[0], shape[2], shape[3], value.size(-1))
return torch.permute(output, (0, 3, 1, 2))
def _grid_to_points3d(value: Tensor) -> Tuple[Tensor, List[int]]:
y_shape = list(value.size())
output = torch.permute(value, (0, 2, 3, 4, 1))
return output.reshape(-1, output.size(-1)), y_shape
def _points_to_grid3d(value: Tensor, shape: List[int]) -> Tensor:
output = value.reshape(shape[0], shape[2], shape[3], shape[4], value.size(-1))
return torch.permute(output, (0, 4, 1, 2, 3))
def _grid_to_points4d(value: Tensor) -> Tuple[Tensor, List[int]]:
y_shape = list(value.size())
output = torch.permute(value, (0, 2, 3, 4, 5, 1))
return output.reshape(-1, output.size(-1)), y_shape
def _points_to_grid4d(value: Tensor, shape: List[int]) -> Tensor:
output = value.reshape(
shape[0], shape[2], shape[3], shape[4], shape[5], value.size(-1)
)
return torch.permute(output, (0, 5, 1, 2, 3, 4))
# ===================================================================
# ===================================================================
# General FNO Model
# ===================================================================
# ===================================================================
@dataclass
class MetaData(ModelMetaData):
name: str = "FourierNeuralOperator"
# Optimization
jit: bool = True
cuda_graphs: bool = True
amp: bool = False
# Inference
onnx_cpu: bool = False
onnx_gpu: bool = False
onnx_runtime: bool = False
# Physics informed
var_dim: int = 1
func_torch: bool = False
auto_grad: bool = False
class FNO(Module):
"""Fourier neural operator (FNO) model.
Note
----
The FNO architecture supports options for 1D, 2D, 3D and 4D fields which can
be controlled using the `dimension` parameter.
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
decoder_layers : int, optional
Number of decoder layers, by default 1
decoder_layer_size : int, optional
Number of neurons in decoder layers, by default 32
decoder_activation_fn : str, optional
Activation function for decoder, by default "silu"
dimension : int
Model dimensionality (supports 1, 2, 3).
latent_channels : int, optional
Latent features size in spectral convolutions, by default 32
num_fno_layers : int, optional
Number of spectral convolutional layers, by default 4
num_fno_modes : Union[int, List[int]], optional
Number of Fourier modes kept in spectral convolutions, by default 16
padding : int, optional
Domain padding for spectral convolutions, by default 8
padding_type : str, optional
Type of padding for spectral convolutions, by default "constant"
activation_fn : str, optional
Activation function, by default "gelu"
coord_features : bool, optional
Use coordinate grid as additional feature map, by default True
Example
-------
>>> # define the 2d FNO model
>>> model = modulus.models.fno.FNO(
... in_channels=4,
... out_channels=3,
... decoder_layers=2,
... decoder_layer_size=32,
... dimension=2,
... latent_channels=32,
... num_fno_layers=2,
... padding=0,
... )
>>> input = torch.randn(32, 4, 32, 32) #(N, C, H, W)
>>> output = model(input)
>>> output.size()
torch.Size([32, 3, 32, 32])
Note
----
Reference: Li, Zongyi, et al. "Fourier neural operator for parametric
partial differential equations." arXiv preprint arXiv:2010.08895 (2020).
"""
def __init__(
self,
in_channels: int,
out_channels: int,
decoder_layers: int = 1,
decoder_layer_size: int = 32,
decoder_activation_fn: str = "silu",
dimension: int = 2,
latent_channels: int = 32,
num_fno_layers: int = 4,
num_fno_modes: Union[int, List[int]] = 16,
padding: int = 8,
padding_type: str = "constant",
activation_fn: str = "gelu",
coord_features: bool = True,
) -> None:
super().__init__(meta=MetaData())
self.num_fno_layers = num_fno_layers
self.num_fno_modes = num_fno_modes
self.padding = padding
self.padding_type = padding_type
self.activation_fn = layers.get_activation(activation_fn)
self.coord_features = coord_features
# decoder net
self.decoder_net = FullyConnected(
in_features=latent_channels,
layer_size=decoder_layer_size,
out_features=out_channels,
num_layers=decoder_layers,
activation_fn=decoder_activation_fn,
)
if dimension == 1:
FNOModel = FNO1DEncoder
self.grid_to_points = _grid_to_points1d # For JIT
self.points_to_grid = _points_to_grid1d # For JIT
elif dimension == 2:
FNOModel = FNO2DEncoder
self.grid_to_points = _grid_to_points2d # For JIT
self.points_to_grid = _points_to_grid2d # For JIT
elif dimension == 3:
FNOModel = FNO3DEncoder
self.grid_to_points = _grid_to_points3d # For JIT
self.points_to_grid = _points_to_grid3d # For JIT
elif dimension == 4:
FNOModel = FNO4DEncoder
self.grid_to_points = _grid_to_points4d # For JIT
self.points_to_grid = _points_to_grid4d # For JIT
else:
raise NotImplementedError(
"Invalid dimensionality. Only 1D, 2D, 3D and 4D FNO implemented"
)
self.spec_encoder = FNOModel(
in_channels,
num_fno_layers=self.num_fno_layers,
fno_layer_size=latent_channels,
num_fno_modes=self.num_fno_modes,
padding=self.padding,
padding_type=self.padding_type,
activation_fn=self.activation_fn,
coord_features=self.coord_features,
)
def forward(self, x: Tensor) -> Tensor:
# Fourier encoder
y_latent = self.spec_encoder(x)
# Reshape to pointwise inputs if not a conv FC model
y_shape = y_latent.shape
y_latent, y_shape = self.grid_to_points(y_latent)
# Decoder
y = self.decoder_net(y_latent)
# Convert back into grid
y = self.points_to_grid(y, y_shape)
return y
|
modulus-main
|
modulus/models/fno/fno.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import modulus
import modulus.models.layers.fft as fft
from functools import partial
from typing import Tuple, Any
from dataclasses import dataclass
from ..meta import ModelMetaData
from ..module import Module
Tensor = torch.Tensor
class AFNOMlp(nn.Module):
"""Fully-connected Multi-layer perception used inside AFNO
Parameters
----------
in_features : int
Input feature size
latent_features : int
Latent feature size
out_features : int
Output feature size
activation_fn : nn.Module, optional
Activation function, by default nn.GELU
drop : float, optional
Drop out rate, by default 0.0
"""
def __init__(
self,
in_features: int,
latent_features: int,
out_features: int,
activation_fn: nn.Module = nn.GELU(),
drop: float = 0.0,
):
super().__init__()
self.fc1 = nn.Linear(in_features, latent_features)
self.act = activation_fn
self.fc2 = nn.Linear(latent_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x: Tensor) -> Tensor:
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class AFNO2DLayer(nn.Module):
"""AFNO spectral convolution layer
Parameters
----------
hidden_size : int
Feature dimensionality
num_blocks : int, optional
Number of blocks used in the block diagonal weight matrix, by default 8
sparsity_threshold : float, optional
Sparsity threshold (softshrink) of spectral features, by default 0.01
hard_thresholding_fraction : float, optional
Threshold for limiting number of modes used [0,1], by default 1
hidden_size_factor : int, optional
Factor to increase spectral features by after weight multiplication, by default 1
"""
def __init__(
self,
hidden_size: int,
num_blocks: int = 8,
sparsity_threshold: float = 0.01,
hard_thresholding_fraction: float = 1,
hidden_size_factor: int = 1,
):
super().__init__()
assert (
hidden_size % num_blocks == 0
), f"hidden_size {hidden_size} should be divisible by num_blocks {num_blocks}"
self.hidden_size = hidden_size
self.sparsity_threshold = sparsity_threshold
self.num_blocks = num_blocks
self.block_size = self.hidden_size // self.num_blocks
self.hard_thresholding_fraction = hard_thresholding_fraction
self.hidden_size_factor = hidden_size_factor
self.scale = 0.02
self.w1 = nn.Parameter(
self.scale
* torch.randn(
2,
self.num_blocks,
self.block_size,
self.block_size * self.hidden_size_factor,
)
)
self.b1 = nn.Parameter(
self.scale
* torch.randn(2, self.num_blocks, self.block_size * self.hidden_size_factor)
)
self.w2 = nn.Parameter(
self.scale
* torch.randn(
2,
self.num_blocks,
self.block_size * self.hidden_size_factor,
self.block_size,
)
)
self.b2 = nn.Parameter(
self.scale * torch.randn(2, self.num_blocks, self.block_size)
)
def forward(self, x: Tensor) -> Tensor:
bias = x
dtype = x.dtype
x = x.float()
B, H, W, C = x.shape
# Using ONNX friendly FFT functions
x = fft.rfft2(x, dim=(1, 2), norm="ortho")
x_real, x_imag = fft.real(x), fft.imag(x)
x_real = x_real.reshape(B, H, W // 2 + 1, self.num_blocks, self.block_size)
x_imag = x_imag.reshape(B, H, W // 2 + 1, self.num_blocks, self.block_size)
o1_real = torch.zeros(
[
B,
H,
W // 2 + 1,
self.num_blocks,
self.block_size * self.hidden_size_factor,
],
device=x.device,
)
o1_imag = torch.zeros(
[
B,
H,
W // 2 + 1,
self.num_blocks,
self.block_size * self.hidden_size_factor,
],
device=x.device,
)
o2 = torch.zeros(x_real.shape + (2,), device=x.device)
total_modes = H // 2 + 1
kept_modes = int(total_modes * self.hard_thresholding_fraction)
o1_real[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
] = F.relu(
torch.einsum(
"nyxbi,bio->nyxbo",
x_real[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w1[0],
)
- torch.einsum(
"nyxbi,bio->nyxbo",
x_imag[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w1[1],
)
+ self.b1[0]
)
o1_imag[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
] = F.relu(
torch.einsum(
"nyxbi,bio->nyxbo",
x_imag[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w1[0],
)
+ torch.einsum(
"nyxbi,bio->nyxbo",
x_real[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w1[1],
)
+ self.b1[1]
)
o2[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes, ..., 0
] = (
torch.einsum(
"nyxbi,bio->nyxbo",
o1_real[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w2[0],
)
- torch.einsum(
"nyxbi,bio->nyxbo",
o1_imag[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w2[1],
)
+ self.b2[0]
)
o2[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes, ..., 1
] = (
torch.einsum(
"nyxbi,bio->nyxbo",
o1_imag[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w2[0],
)
+ torch.einsum(
"nyxbi,bio->nyxbo",
o1_real[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w2[1],
)
+ self.b2[1]
)
x = F.softshrink(o2, lambd=self.sparsity_threshold)
x = fft.view_as_complex(x)
# TODO(akamenev): replace the following branching with
# a one-liner, something like x.reshape(..., -1).squeeze(-1),
# but this currently fails during ONNX export.
if torch.onnx.is_in_onnx_export():
x = x.reshape(B, H, W // 2 + 1, C, 2)
else:
x = x.reshape(B, H, W // 2 + 1, C)
# Using ONNX friendly FFT functions
x = fft.irfft2(x, s=(H, W), dim=(1, 2), norm="ortho")
x = x.type(dtype)
return x + bias
class Block(nn.Module):
"""AFNO block, spectral convolution and MLP
Parameters
----------
embed_dim : int
Embedded feature dimensionality
num_blocks : int, optional
Number of blocks used in the block diagonal weight matrix, by default 8
mlp_ratio : float, optional
Ratio of MLP latent variable size to input feature size, by default 4.0
drop : float, optional
Drop out rate in MLP, by default 0.0
activation_fn: nn.Module, optional
Activation function used in MLP, by default nn.GELU
norm_layer : nn.Module, optional
Normalization function, by default nn.LayerNorm
double_skip : bool, optional
Residual, by default True
sparsity_threshold : float, optional
Sparsity threshold (softshrink) of spectral features, by default 0.01
hard_thresholding_fraction : float, optional
Threshold for limiting number of modes used [0,1], by default 1
"""
def __init__(
self,
embed_dim: int,
num_blocks: int = 8,
mlp_ratio: float = 4.0,
drop: float = 0.0,
activation_fn: nn.Module = nn.GELU(),
norm_layer: nn.Module = nn.LayerNorm,
double_skip: bool = True,
sparsity_threshold: float = 0.01,
hard_thresholding_fraction: float = 1.0,
):
super().__init__()
self.norm1 = norm_layer(embed_dim)
self.filter = AFNO2DLayer(
embed_dim, num_blocks, sparsity_threshold, hard_thresholding_fraction
)
# self.drop_path = nn.Identity()
self.norm2 = norm_layer(embed_dim)
mlp_latent_dim = int(embed_dim * mlp_ratio)
self.mlp = AFNOMlp(
in_features=embed_dim,
latent_features=mlp_latent_dim,
out_features=embed_dim,
activation_fn=activation_fn,
drop=drop,
)
self.double_skip = double_skip
def forward(self, x: Tensor) -> Tensor:
residual = x
x = self.norm1(x)
x = self.filter(x)
if self.double_skip:
x = x + residual
residual = x
x = self.norm2(x)
x = self.mlp(x)
x = x + residual
return x
class PatchEmbed(nn.Module):
"""Patch embedding layer
Converts 2D patch into a 1D vector for input to AFNO
Parameters
----------
img_size : Tuple[int, int]
Input image dimensions (height, width)
in_channels : int
Number of input channels
patch_size : Tuple[int, int], optional
Size of image patches, by default (16, 16)
embed_dim : int, optional
Embedded channel size, by default 256
"""
def __init__(
self,
img_size: Tuple[int, int],
in_channels: int,
patch_size: Tuple[int, int] = (16, 16),
embed_dim: int = 256,
):
super().__init__()
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_channels, embed_dim, kernel_size=patch_size, stride=patch_size
)
def forward(self, x: Tensor) -> Tensor:
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
@dataclass
class MetaData(ModelMetaData):
name: str = "AFNO"
# Optimization
jit: bool = False # ONNX Ops Conflict
cuda_graphs: bool = True
amp: bool = True
# Inference
onnx_cpu: bool = False # No FFT op on CPU
onnx_gpu: bool = True
onnx_runtime: bool = True
# Physics informed
var_dim: int = 1
func_torch: bool = False
auto_grad: bool = False
class AFNO(Module):
"""Adaptive Fourier neural operator (AFNO) model.
Note
----
AFNO is a model that is designed for 2D images only.
Parameters
----------
img_size : Tuple[int, int]
Input image dimensions (height, width)
in_channels : int
Number of input channels
out_channels: int
Number of output channels
patch_size : Tuple[int, int], optional
Size of image patches, by default (16, 16)
embed_dim : int, optional
Embedded channel size, by default 256
depth : int, optional
Number of AFNO layers, by default 4
mlp_ratio : float, optional
Ratio of layer MLP latent variable size to input feature size, by default 4.0
drop_rate : float, optional
Drop out rate in layer MLPs, by default 0.0
num_blocks : int, optional
Number of blocks in the block-diag frequency weight matrices, by default 16
sparsity_threshold : float, optional
Sparsity threshold (softshrink) of spectral features, by default 0.01
hard_thresholding_fraction : float, optional
Threshold for limiting number of modes used [0,1], by default 1
Example
-------
>>> model = modulus.models.afno.AFNO(
... img_size=(32, 32),
... in_channels=2,
... out_channels=1,
... patch_size=(8, 8),
... embed_dim=16,
... depth=2,
... num_blocks=2,
... )
>>> input = torch.randn(32, 2, 32, 32) #(N, C, H, W)
>>> output = model(input)
>>> output.size()
torch.Size([32, 1, 32, 32])
Note
----
Reference: Guibas, John, et al. "Adaptive fourier neural operators:
Efficient token mixers for transformers." arXiv preprint arXiv:2111.13587 (2021).
"""
def __init__(
self,
img_size: Tuple[int, int],
in_channels: int,
out_channels: int,
patch_size: Tuple[int, int] = (16, 16),
embed_dim: int = 256,
depth: int = 4,
mlp_ratio: float = 4.0,
drop_rate: float = 0.0,
num_blocks: int = 16,
sparsity_threshold: float = 0.01,
hard_thresholding_fraction: float = 1.0,
) -> None:
super().__init__(meta=MetaData())
assert (
img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0
), f"img_size {img_size} should be divisible by patch_size {patch_size}"
self.in_chans = in_channels
self.out_chans = out_channels
self.img_size = img_size
self.patch_size = patch_size
self.num_features = self.embed_dim = embed_dim
self.num_blocks = num_blocks
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size,
in_channels=self.in_chans,
patch_size=self.patch_size,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
self.h = img_size[0] // self.patch_size[0]
self.w = img_size[1] // self.patch_size[1]
self.blocks = nn.ModuleList(
[
Block(
embed_dim=embed_dim,
num_blocks=self.num_blocks,
mlp_ratio=mlp_ratio,
drop=drop_rate,
norm_layer=norm_layer,
sparsity_threshold=sparsity_threshold,
hard_thresholding_fraction=hard_thresholding_fraction,
)
for i in range(depth)
]
)
self.head = nn.Linear(
embed_dim,
self.out_chans * self.patch_size[0] * self.patch_size[1],
bias=False,
)
torch.nn.init.trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Init model weights"""
if isinstance(m, nn.Linear):
torch.nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
# What is this for
# @torch.jit.ignore
# def no_weight_decay(self):
# return {"pos_embed", "cls_token"}
def forward_features(self, x: Tensor) -> Tensor:
"""Forward pass of core AFNO"""
B = x.shape[0]
x = self.patch_embed(x)
x = x + self.pos_embed
x = self.pos_drop(x)
x = x.reshape(B, self.h, self.w, self.embed_dim)
for blk in self.blocks:
x = blk(x)
return x
def forward(self, x: Tensor) -> Tensor:
x = self.forward_features(x)
x = self.head(x)
# Correct tensor shape back into [B, C, H, W]
# [b h w (p1 p2 c_out)]
out = x.view(list(x.shape[:-1]) + [self.patch_size[0], self.patch_size[1], -1])
# [b h w p1 p2 c_out]
out = torch.permute(out, (0, 5, 1, 3, 2, 4))
# [b c_out, h, p1, w, p2]
out = out.reshape(list(out.shape[:2]) + [self.img_size[0], self.img_size[1]])
# [b c_out, (h*p1), (w*p2)]
return out
|
modulus-main
|
modulus/models/afno/afno.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .afno import AFNO
from .distributed import DistributedAFNO
|
modulus-main
|
modulus/models/afno/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import torch
import torch.nn as nn
import torch.fft
from torch import Tensor
from typing import Tuple, Union, Any
# distributed stuff
import torch.distributed as dist
import modulus
from modulus.distributed.manager import DistributedManager
from modulus.models.afno.distributed.mappings import copy_to_matmul_parallel_region
from modulus.models.afno.distributed.mappings import (
scatter_to_matmul_parallel_region,
gather_from_matmul_parallel_region,
)
from modulus.models.afno.distributed.layers import trunc_normal_, DropPath
from modulus.models.afno.distributed.layers import (
DistributedPatchEmbed,
DistributedMLP,
DistributedAFNO2D,
)
import logging
logger = logging.getLogger(__name__)
class DistributedBlock(nn.Module):
def __init__(
self,
h,
w,
dim,
mlp_ratio=4.0,
drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
double_skip=True,
num_blocks=8,
sparsity_threshold=0.01,
hard_thresholding_fraction=1.0,
input_is_matmul_parallel=False,
output_is_matmul_parallel=False,
):
super(DistributedBlock, self).__init__()
# model parallelism
# matmul parallelism
self.input_is_matmul_parallel = input_is_matmul_parallel
self.output_is_matmul_parallel = output_is_matmul_parallel
# norm layer
self.norm1 = norm_layer((h, w))
# filter
self.filter = DistributedAFNO2D(
dim,
num_blocks,
sparsity_threshold,
hard_thresholding_fraction,
input_is_matmul_parallel=True,
output_is_matmul_parallel=True,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
# norm layer
self.norm2 = norm_layer((h, w))
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = DistributedMLP(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
input_is_matmul_parallel=True,
output_is_matmul_parallel=True,
)
self.double_skip = double_skip
def forward(self, x):
if not self.input_is_matmul_parallel:
x = scatter_to_matmul_parallel_region(x, dim=1)
residual = x
x = self.norm1(x)
x = self.filter(x)
if self.double_skip:
x = x + residual
residual = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = x + residual
if not self.output_is_matmul_parallel:
x = gather_from_matmul_parallel_region(x, dim=1)
return x
class DistributedAFNONet(nn.Module):
def __init__(
self,
img_size=(720, 1440),
patch_size=(16, 16),
in_chans=2,
out_chans=2,
embed_dim=768,
depth=12,
mlp_ratio=4.0,
drop_rate=0.0,
drop_path_rate=0.0,
num_blocks=16,
sparsity_threshold=0.01,
hard_thresholding_fraction=1.0,
input_is_matmul_parallel=False,
output_is_matmul_parallel=False,
):
super().__init__()
# comm sizes
matmul_comm_size = DistributedManager().group_size("model_parallel")
self.img_size = img_size
self.patch_size = patch_size
self.in_chans = in_chans
self.out_chans = out_chans
self.num_features = self.embed_dim = embed_dim
self.num_blocks = num_blocks
self.input_is_matmul_parallel = input_is_matmul_parallel
self.output_is_matmul_parallel = output_is_matmul_parallel
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = DistributedPatchEmbed(
img_size=img_size,
patch_size=self.patch_size,
in_chans=self.in_chans,
embed_dim=embed_dim,
input_is_matmul_parallel=self.input_is_matmul_parallel,
output_is_matmul_parallel=True,
)
num_patches = self.patch_embed.num_patches
# original: x = B, H*W, C
# self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
# new: x = B, C, H*W
self.embed_dim_local = self.embed_dim // matmul_comm_size
self.pos_embed = nn.Parameter(torch.zeros(1, self.embed_dim_local, num_patches))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.h = img_size[0] // self.patch_size[0]
self.w = img_size[1] // self.patch_size[1]
# add blocks
blks = []
for i in range(0, depth):
input_is_matmul_parallel = True # if i > 0 else False
output_is_matmul_parallel = True if i < (depth - 1) else False
blks.append(
DistributedBlock(
h=self.h,
w=self.w,
dim=embed_dim,
mlp_ratio=mlp_ratio,
drop=drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
num_blocks=self.num_blocks,
sparsity_threshold=sparsity_threshold,
hard_thresholding_fraction=hard_thresholding_fraction,
input_is_matmul_parallel=input_is_matmul_parallel,
output_is_matmul_parallel=output_is_matmul_parallel,
)
)
self.blocks = nn.ModuleList(blks)
# head
if self.output_is_matmul_parallel:
self.out_chans_local = (
self.out_chans + matmul_comm_size - 1
) // matmul_comm_size
else:
self.out_chans_local = self.out_chans
self.head = nn.Conv2d(
self.embed_dim,
self.out_chans_local * self.patch_size[0] * self.patch_size[1],
1,
bias=False,
)
self.synchronized_head = False
# init weights
trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
x = x + self.pos_embed
x = self.pos_drop(x)
# reshape
x = x.reshape(B, self.embed_dim_local, self.h, self.w)
for blk in self.blocks:
x = blk(x)
return x
def forward(self, x):
# fw pass on features
x = self.forward_features(x)
# be careful if head is distributed
if self.output_is_matmul_parallel:
x = copy_to_matmul_parallel_region(x)
else:
if not self.synchronized_head:
# If output is not model parallel, synchronize all GPUs params for head
for param in self.head.parameters():
dist.broadcast(
param, 0, group=DistributedManager().group("model_parallel")
)
self.synchronized_head = True
x = self.head(x)
# new: B, C, H, W
b = x.shape[0]
xv = x.view(b, self.patch_size[0], self.patch_size[1], -1, self.h, self.w)
xvt = torch.permute(xv, (0, 3, 4, 1, 5, 2)).contiguous()
x = xvt.view(
b, -1, (self.h * self.patch_size[0]), (self.w * self.patch_size[1])
)
return x
class DistributedAFNO(modulus.Module):
"""Distributed Adaptive Fourier neural operator (AFNO) model.
Note
----
AFNO is a model that is designed for 2D images only.
Parameters
----------
img_shape : Tuple[int, int]
Input image dimensions (height, width)
in_channels : int
Number of input channels
out_channels: Union[int, Any], optional
Number of outout channels, by default in_channels
patch_size : int, optional
Size of image patchs, by default 16
embed_dim : int, optional
Embedded channel size, by default 256
depth : int, optional
Number of AFNO layers, by default 4
num_blocks : int, optional
Number of blocks in the frequency weight matrices, by default 4
channel_parallel_inputs : bool, optional
Are the inputs sharded along the channel dimension, by default False
channel_parallel_outputs : bool, optional
Should the outputs be sharded along the channel dimension, by default False
Variable Shape
--------------
- Input variable tensor shape: :math:`[N, size, H, W]`
- Output variable tensor shape: :math:`[N, size, H, W]`
Example
-------
>>> # from modulus.distributed import DistributedManager
>>> # DistributedManager.initialize()
>>> # model = modulus.models.afno.DistributedAFNO((64, 64), 2)
>>> # input = torch.randn(20, 2, 64, 64)
>>> # output = model(input)
"""
def __init__(
self,
img_shape: Tuple[int, int],
in_channels: int,
out_channels: Union[int, Any] = None,
patch_size: int = 16,
embed_dim: int = 256,
depth: int = 4,
num_blocks: int = 4,
channel_parallel_inputs: bool = False,
channel_parallel_outputs: bool = False,
) -> None:
super().__init__()
out_channels = out_channels or in_channels
if DistributedManager().group("model_parallel") is None:
raise RuntimeError(
"Distributed AFNO needs to have model parallel group created first. "
"Check the MODEL_PARALLEL_SIZE environment variable"
)
comm_size = DistributedManager().group_size("model_parallel")
if channel_parallel_inputs:
assert (
in_channels % comm_size == 0
), "Error, in_channels needs to be divisible by model_parallel size"
self._impl = DistributedAFNONet(
img_size=img_shape,
patch_size=(patch_size, patch_size),
in_chans=in_channels,
out_chans=out_channels,
embed_dim=embed_dim,
depth=depth,
num_blocks=num_blocks,
input_is_matmul_parallel=False,
output_is_matmul_parallel=False,
)
def forward(self, in_vars: Tensor) -> Tensor:
return self._impl(in_vars)
|
modulus-main
|
modulus/models/afno/distributed/afno.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .afno import DistributedAFNO
|
modulus-main
|
modulus/models/afno/distributed/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from modulus.distributed.manager import DistributedManager
from modulus.models.afno.distributed.mappings import copy_to_matmul_parallel_region
from modulus.models.afno.distributed.mappings import reduce_from_matmul_parallel_region
from modulus.models.afno.distributed.mappings import scatter_to_matmul_parallel_region
from modulus.models.afno.distributed.mappings import gather_from_matmul_parallel_region
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases
# Method based on
# https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> o = nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
@torch.jit.script
def drop_path(
x: torch.Tensor, drop_prob: float = 0.0, training: bool = False
) -> torch.Tensor:
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1.0 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class DistributedMLP(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
input_is_matmul_parallel=False,
output_is_matmul_parallel=False,
):
super(DistributedMLP, self).__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.input_is_matmul_parallel = input_is_matmul_parallel
self.output_is_matmul_parallel = output_is_matmul_parallel
# get effective embedding size:
comm_size = DistributedManager().group_size("model_parallel")
assert (
hidden_features % comm_size == 0
), "Error, hidden_features needs to be divisible by matmul_parallel_size"
hidden_features_local = hidden_features // comm_size
# first set of hp
self.w1 = nn.Parameter(torch.ones(hidden_features_local, in_features, 1, 1))
self.b1 = nn.Parameter(torch.zeros(hidden_features_local))
# second set of hp
self.w2 = nn.Parameter(torch.ones(out_features, hidden_features_local, 1, 1))
self.b2 = nn.Parameter(torch.zeros(out_features))
self.act = act_layer()
self.drop = nn.Dropout(drop) if drop > 0.0 else nn.Identity()
# init weights
self._init_weights()
def _init_weights(self):
trunc_normal_(self.w1, std=0.02)
nn.init.constant_(self.b1, 0.0)
trunc_normal_(self.w2, std=0.02)
nn.init.constant_(self.b2, 0.0)
def forward(self, x):
# gather if input is MP
if self.input_is_matmul_parallel:
x = gather_from_matmul_parallel_region(x, dim=1)
x = copy_to_matmul_parallel_region(x)
x = F.conv2d(x, self.w1, bias=self.b1)
x = self.act(x)
x = self.drop(x)
x = F.conv2d(x, self.w2, bias=None)
x = reduce_from_matmul_parallel_region(x)
x = x + torch.reshape(self.b2, (1, -1, 1, 1))
x = self.drop(x)
# scatter if output is MP
if self.output_is_matmul_parallel:
x = scatter_to_matmul_parallel_region(x, dim=1)
return x
class DistributedPatchEmbed(nn.Module):
def __init__(
self,
img_size=(224, 224),
patch_size=(16, 16),
in_chans=3,
embed_dim=768,
input_is_matmul_parallel=False,
output_is_matmul_parallel=True,
):
super(DistributedPatchEmbed, self).__init__()
# store params
self.input_parallel = input_is_matmul_parallel
self.output_parallel = output_is_matmul_parallel
# get comm sizes:
matmul_comm_size = DistributedManager().group_size("model_parallel")
# compute parameters
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = (img_size[0], img_size[1])
self.patch_size = patch_size
self.num_patches = num_patches
if self.input_parallel:
assert (
in_chans % matmul_comm_size == 0
), "Error, the in_chans needs to be divisible by matmul_parallel_size"
# get effective embedding size:
if self.output_parallel:
assert (
embed_dim % matmul_comm_size == 0
), "Error, the embed_dim needs to be divisible by matmul_parallel_size"
out_chans_local = embed_dim // matmul_comm_size
else:
out_chans_local = embed_dim
# the weights of this layer is shared across spatial parallel ranks
self.proj = nn.Conv2d(
in_chans, out_chans_local, kernel_size=patch_size, stride=patch_size
)
# make sure we reduce them across rank
self.proj.weight.is_shared_spatial = True
self.proj.bias.is_shared_spatial = True
def forward(self, x):
if self.input_parallel:
x = gather_from_matmul_parallel_region(x, dim=1)
if self.output_parallel:
x = copy_to_matmul_parallel_region(x)
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
# new: B, C, H*W
x = self.proj(x).flatten(2)
return x
@torch.jit.script
def compl_mul_add_fwd(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
tmp = torch.einsum("bkixys,kiot->stbkoxy", a, b)
res = (
torch.stack(
[tmp[0, 0, ...] - tmp[1, 1, ...], tmp[1, 0, ...] + tmp[0, 1, ...]], dim=-1
)
+ c
)
return res
@torch.jit.script
def compl_mul_add_fwd_c(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
cc = torch.view_as_complex(c)
tmp = torch.einsum("bkixy,kio->bkoxy", ac, bc)
res = tmp + cc
return torch.view_as_real(res)
class DistributedAFNO2D(nn.Module):
def __init__(
self,
hidden_size,
num_blocks=8,
sparsity_threshold=0.01,
hard_thresholding_fraction=1,
hidden_size_factor=1,
input_is_matmul_parallel=False,
output_is_matmul_parallel=False,
):
super(DistributedAFNO2D, self).__init__()
assert (
hidden_size % num_blocks == 0
), f"hidden_size {hidden_size} should be divisible by num_blocks {num_blocks}"
# get comm sizes:
matmul_comm_size = DistributedManager().group_size("model_parallel")
self.fft_handle = torch.fft.rfft2
self.ifft_handle = torch.fft.irfft2
self.hidden_size = hidden_size
self.sparsity_threshold = sparsity_threshold
self.num_blocks = num_blocks
assert (
self.num_blocks % matmul_comm_size == 0
), "Error, num_blocks needs to be divisible by matmul_parallel_size"
self.num_blocks_local = self.num_blocks // matmul_comm_size
self.block_size = self.hidden_size // self.num_blocks
self.hard_thresholding_fraction = hard_thresholding_fraction
self.hidden_size_factor = hidden_size_factor
self.scale = 0.02
use_complex_mult = False
self.mult_handle = (
compl_mul_add_fwd_c if use_complex_mult else compl_mul_add_fwd
)
# model parallelism
self.input_is_matmul_parallel = input_is_matmul_parallel
self.output_is_matmul_parallel = output_is_matmul_parallel
# new
# these weights need to be synced across all spatial ranks!
self.w1 = nn.Parameter(
self.scale
* torch.randn(
self.num_blocks_local,
self.block_size,
self.block_size * self.hidden_size_factor,
2,
)
)
self.b1 = nn.Parameter(
self.scale
* torch.randn(
self.num_blocks_local,
self.block_size * self.hidden_size_factor,
1,
1,
2,
)
)
self.w2 = nn.Parameter(
self.scale
* torch.randn(
self.num_blocks_local,
self.block_size * self.hidden_size_factor,
self.block_size,
2,
)
)
self.b2 = nn.Parameter(
self.scale * torch.randn(self.num_blocks_local, self.block_size, 1, 1, 2)
)
# make sure we reduce them across rank
self.w1.is_shared_spatial = True
self.b1.is_shared_spatial = True
self.w2.is_shared_spatial = True
self.b2.is_shared_spatial = True
def forward(self, x):
if not self.input_is_matmul_parallel:
# distribute data
x = scatter_to_matmul_parallel_region(x, dim=1)
# bias
bias = x
dtype = x.dtype
x = x.float()
B, C, H, W = x.shape
total_modes = H // 2 + 1
kept_modes = int(total_modes * self.hard_thresholding_fraction)
x = self.fft_handle(x, (H, W), (-2, -1), "ortho")
x = x.view(B, self.num_blocks_local, self.block_size, H, W // 2 + 1)
# new
x = torch.view_as_real(x)
o2 = torch.zeros(x.shape, device=x.device)
o1 = F.relu(
self.mult_handle(
x[
:,
:,
:,
total_modes - kept_modes : total_modes + kept_modes,
:kept_modes,
:,
],
self.w1,
self.b1,
)
)
o2[
:, :, :, total_modes - kept_modes : total_modes + kept_modes, :kept_modes, :
] = self.mult_handle(o1, self.w2, self.b2)
# finalize
x = F.softshrink(o2, lambd=self.sparsity_threshold)
x = torch.view_as_complex(x)
x = x.reshape(B, C, H, W // 2 + 1)
x = self.ifft_handle(x, (H, W), (-2, -1), "ortho")
x = x.type(dtype) + bias
# gather
if not self.output_is_matmul_parallel:
x = gather_from_matmul_parallel_region(x, dim=1)
return x
|
modulus-main
|
modulus/models/afno/distributed/layers.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from modulus.distributed.manager import DistributedManager
from modulus.distributed.utils import _reduce
from modulus.distributed.utils import _split
from modulus.distributed.utils import _gather
# matmul parallel
class _CopyToMatmulParallelRegion(torch.autograd.Function):
"""Pass the input to the matmul parallel region."""
@staticmethod
def symbolic(graph, input_):
return input_
@staticmethod
def forward(ctx, input_):
return input_
@staticmethod
def backward(ctx, grad_output):
return _reduce(grad_output, group=DistributedManager().group("model_parallel"))
class _ReduceFromMatmulParallelRegion(torch.autograd.Function):
"""All-reduce the input from the matmul parallel region."""
@staticmethod
def symbolic(graph, input_):
return _reduce(input_, group=DistributedManager().group("model_parallel"))
@staticmethod
def forward(ctx, input_):
return _reduce(input_, group=DistributedManager().group("model_parallel"))
@staticmethod
def backward(ctx, grad_output):
return grad_output
class _ScatterToMatmulParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
@staticmethod
def symbolic(graph, input_, dim_):
return _split(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def forward(ctx, input_, dim_):
ctx.dim = dim_
return _split(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def backward(ctx, grad_output):
return (
_gather(
grad_output, ctx.dim, group=DistributedManager().group("model_parallel")
),
None,
)
class _GatherFromMatmulParallelRegion(torch.autograd.Function):
"""Gather the input from matmul parallel region and concatinate."""
@staticmethod
def symbolic(graph, input_, dim_):
return _gather(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def forward(ctx, input_, dim_):
ctx.dim = dim_
return _gather(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def backward(ctx, grad_output):
return (
_split(
grad_output, ctx.dim, group=DistributedManager().group("model_parallel")
),
None,
)
class _GatherWithinMatmulParallelRegion(torch.autograd.Function):
"""Gather the input from matmul parallel region and concatinate."""
@staticmethod
def symbolic(graph, input_, dim_):
return _gather(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def forward(ctx, input_, dim_):
ctx.dim = dim_
return _gather(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def backward(ctx, grad_output):
red = _reduce(grad_output, group=DistributedManager().group("model_parallel"))
return (
_split(red, ctx.dim, group=DistributedManager().group("model_parallel")),
None,
)
# -----------------
# Helper functions.
# -----------------
# matmul parallel
def copy_to_matmul_parallel_region(input_):
"""Copy input"""
return _CopyToMatmulParallelRegion.apply(input_)
def reduce_from_matmul_parallel_region(input_):
"""All-reduce the input from the matmul parallel region."""
return _ReduceFromMatmulParallelRegion.apply(input_)
def scatter_to_matmul_parallel_region(input_, dim):
"""Split the input and keep only the corresponding chuck to the rank."""
return _ScatterToMatmulParallelRegion.apply(input_, dim)
def gather_from_matmul_parallel_region(input_, dim):
"""Gather the input from matmul parallel region and concatenate."""
return _GatherFromMatmulParallelRegion.apply(input_, dim)
def gather_within_matmul_parallel_region(input_, dim):
"""Gather the input from matmul parallel region and concatenate."""
return _GatherWithinMatmulParallelRegion.apply(input_, dim)
|
modulus-main
|
modulus/models/afno/distributed/mappings.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .graph_cast_net import GraphCastNet
|
modulus-main
|
modulus/models/graphcast/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from typing import Union
from torch import Tensor
from dgl import DGLGraph
from modulus.models.gnn_layers.utils import set_checkpoint_fn, CuGraphCSC
from modulus.models.gnn_layers.mesh_node_block import MeshNodeBlock
from modulus.models.gnn_layers.mesh_edge_block import MeshEdgeBlock
class GraphCastProcessor(nn.Module):
"""Processor block used in GraphCast operating on a latent space
represented by hierarchy of icosahedral meshes.
Parameters
----------
aggregation : str, optional
message passing aggregation method ("sum", "mean"), by default "sum"
processor_layers : int, optional
number of processor layers, by default 16
input_dim_nodes : int, optional
input dimensionality of the node features, by default 512
input_dim_edges : int, optional
input dimensionality of the edge features, by default 512
hidden_dim : int, optional
number of neurons in each hidden layer, by default 512
hidden_layers : int, optional
number of hiddel layers, by default 1
activation_fn : nn.Module, optional
type of activation function, by default nn.SiLU()
norm_type : str, optional
normalization type, by default "LayerNorm"
do_conat_trick: : bool, default=False
whether to replace concat+MLP with MLP+idx+sum
recompute_activation : bool, optional
Flag for recomputing activation in backward to save memory, by default False.
Currently, only SiLU is supported.
"""
def __init__(
self,
aggregation: str = "sum",
processor_layers: int = 16,
input_dim_nodes: int = 512,
input_dim_edges: int = 512,
hidden_dim: int = 512,
hidden_layers: int = 1,
activation_fn: nn.Module = nn.SiLU(),
norm_type: str = "LayerNorm",
do_concat_trick: bool = False,
recompute_activation: bool = False,
):
super().__init__()
edge_block_invars = (
input_dim_nodes,
input_dim_edges,
input_dim_edges,
hidden_dim,
hidden_layers,
activation_fn,
norm_type,
do_concat_trick,
recompute_activation,
)
node_block_invars = (
aggregation,
input_dim_nodes,
input_dim_edges,
input_dim_nodes,
hidden_dim,
hidden_layers,
activation_fn,
norm_type,
recompute_activation,
)
layers = []
for _ in range(processor_layers):
layers.append(MeshEdgeBlock(*edge_block_invars))
layers.append(MeshNodeBlock(*node_block_invars))
self.processor_layers = nn.ModuleList(layers)
self.num_processor_layers = len(self.processor_layers)
# per default, no checkpointing
# one segment for compatability
self.checkpoint_segments = [(0, self.num_processor_layers)]
self.checkpoint_fn = set_checkpoint_fn(False)
def set_checkpoint_segments(self, checkpoint_segments: int):
"""
Set the number of checkpoint segments
Parameters
----------
checkpoint_segments : int
number of checkpoint segments
Raises
------
ValueError
if the number of processor layers is not a multiple of the number of
checkpoint segments
"""
if checkpoint_segments > 0:
if self.num_processor_layers % checkpoint_segments != 0:
raise ValueError(
"Processor layers must be a multiple of checkpoint_segments"
)
segment_size = self.num_processor_layers // checkpoint_segments
self.checkpoint_segments = []
for i in range(0, self.num_processor_layers, segment_size):
self.checkpoint_segments.append((i, i + segment_size))
self.checkpoint_fn = set_checkpoint_fn(True)
else:
self.checkpoint_fn = set_checkpoint_fn(False)
self.checkpoint_segments = [(0, self.num_processor_layers)]
def run_function(self, segment_start: int, segment_end: int):
"""Custom forward for gradient checkpointing
Parameters
----------
segment_start : int
Layer index as start of the segment
segment_end : int
Layer index as end of the segment
Returns
-------
function
Custom forward function
"""
segment = self.processor_layers[segment_start:segment_end]
def custom_forward(efeat, nfeat, graph):
"""Custom forward function"""
for module in segment:
efeat, nfeat = module(efeat, nfeat, graph)
return efeat, nfeat
return custom_forward
def forward(
self,
efeat: Tensor,
nfeat: Tensor,
graph: Union[DGLGraph, CuGraphCSC],
) -> Tensor:
for segment_start, segment_end in self.checkpoint_segments:
efeat, nfeat = self.checkpoint_fn(
self.run_function(segment_start, segment_end),
efeat,
nfeat,
graph,
use_reentrant=False,
preserve_rng_state=False,
)
return efeat, nfeat
|
modulus-main
|
modulus/models/graphcast/graph_cast_processor.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch import Tensor
from typing import Any
from dataclasses import dataclass
from modulus.models.layers import get_activation
from modulus.models.gnn_layers.utils import set_checkpoint_fn, CuGraphCSC
from modulus.models.gnn_layers.embedder import (
GraphCastEncoderEmbedder,
GraphCastDecoderEmbedder,
)
from modulus.models.gnn_layers.mesh_graph_encoder import MeshGraphEncoder
from modulus.models.gnn_layers.mesh_graph_decoder import MeshGraphDecoder
from modulus.models.gnn_layers.mesh_graph_mlp import MeshGraphMLP
from modulus.models.module import Module
from modulus.models.meta import ModelMetaData
from modulus.utils.graphcast.graph import Graph
from modulus.utils.graphcast.data_utils import StaticData
from .graph_cast_processor import GraphCastProcessor
@dataclass
class MetaData(ModelMetaData):
name: str = "GraphCastNet"
# Optimization
jit: bool = False
cuda_graphs: bool = False
amp_cpu: bool = False
amp_gpu: bool = True
torch_fx: bool = False
# Inference
onnx: bool = False
# Physics informed
func_torch: bool = False
auto_grad: bool = False
class GraphCastNet(Module):
"""GraphCast network architecture
Parameters
----------
meshgraph_path : str
Path to the meshgraph file. If not provided, the meshgraph will be created
using PyMesh.
static_dataset_path : str
Path to the static dataset file.
input_res: Tuple[int, int]
Input resolution of the latitude-longitude grid
input_dim_grid_nodes : int, optional
Input dimensionality of the grid node features, by default 474
input_dim_mesh_nodes : int, optional
Input dimensionality of the mesh node features, by default 3
input_dim_edges : int, optional
Input dimensionality of the edge features, by default 4
output_dim_grid_nodes : int, optional
Final output dimensionality of the edge features, by default 227
processor_layers : int, optional
Number of processor layers, by default 16
hidden_layers : int, optional
Number of hiddel layers, by default 1
hidden_dim : int, optional
Number of neurons in each hidden layer, by default 512
aggregation : str, optional
Message passing aggregation method ("sum", "mean"), by default "sum"
activation_fn : str, optional
Type of activation function, by default "silu"
norm_type : str, optional
Normalization type, by default "LayerNorm"
use_cugraphops_encoder : bool, default=False
Flag to select cugraphops kernels in encoder
use_cugraphops_processor : bool, default=False
Flag to select cugraphops kernels in the processor
use_cugraphops_decoder : bool, default=False
Flag to select cugraphops kernels in the decoder
do_conat_trick: : bool, default=False
Whether to replace concat+MLP with MLP+idx+sum
recompute_activation : bool, optional
Flag for recomputing activation in backward to save memory, by default False.
Currently, only SiLU is supported.
Note
----
Based on these papers:
- "GraphCast: Learning skillful medium-range global weather forecasting"
https://arxiv.org/abs/2212.12794
- "Forecasting Global Weather with Graph Neural Networks"
https://arxiv.org/abs/2202.07575
- "Learning Mesh-Based Simulation with Graph Networks"
https://arxiv.org/abs/2010.03409
- "MultiScale MeshGraphNets"
https://arxiv.org/abs/2210.00612
"""
def __init__(
self,
meshgraph_path: str,
static_dataset_path: str,
input_res: tuple = (721, 1440),
input_dim_grid_nodes: int = 474,
input_dim_mesh_nodes: int = 3,
input_dim_edges: int = 4,
output_dim_grid_nodes: int = 227,
processor_layers: int = 16,
hidden_layers: int = 1,
hidden_dim: int = 512,
aggregation: str = "sum",
activation_fn: str = "silu",
norm_type: str = "LayerNorm",
use_cugraphops_encoder: bool = False,
use_cugraphops_processor: bool = False,
use_cugraphops_decoder: bool = False,
do_concat_trick: bool = False,
recompute_activation: bool = False,
):
super().__init__(meta=MetaData())
# create the lat_lon_grid
self.latitudes = torch.linspace(-90, 90, steps=input_res[0])
self.longitudes = torch.linspace(-180, 180, steps=input_res[1] + 1)[1:]
self.lat_lon_grid = torch.stack(
torch.meshgrid(self.latitudes, self.longitudes, indexing="ij"), dim=-1
)
self.has_static_data = static_dataset_path is not None
# Set activation function
activation_fn = get_activation(activation_fn)
# Get the static data
if self.has_static_data:
self.static_data = StaticData(
static_dataset_path, self.latitudes, self.longitudes
).get()
num_static_feat = self.static_data.size(1)
input_dim_grid_nodes += num_static_feat
else:
self.static_data = None
self.input_dim_grid_nodes = input_dim_grid_nodes
self.output_dim_grid_nodes = output_dim_grid_nodes
self.input_res = input_res
# construct the graph
try:
self.graph = Graph(meshgraph_path, self.lat_lon_grid)
except:
raise FileNotFoundError(
"The icospheres_path is corrupted. "
"Tried using pymesh to generate the graph but could not find pymesh"
)
self.mesh_graph = self.graph.create_mesh_graph(verbose=False)
self.g2m_graph = self.graph.create_g2m_graph(verbose=False)
self.m2g_graph = self.graph.create_m2g_graph(verbose=False)
self.g2m_edata = self.g2m_graph.edata["x"]
self.m2g_edata = self.m2g_graph.edata["x"]
self.mesh_edata = self.mesh_graph.edata["x"]
self.mesh_ndata = self.mesh_graph.ndata["x"]
if use_cugraphops_encoder:
offsets, indices, edge_ids = self.g2m_graph.adj_tensors("csc")
n_in_nodes, n_out_nodes = (
self.g2m_graph.num_src_nodes(),
self.g2m_graph.num_dst_nodes(),
)
self.g2m_graph = CuGraphCSC(
offsets, indices, n_in_nodes, n_out_nodes, edge_ids
)
if use_cugraphops_decoder:
offsets, indices, edge_ids = self.m2g_graph.adj_tensors("csc")
n_in_nodes, n_out_nodes = (
self.m2g_graph.num_src_nodes(),
self.m2g_graph.num_dst_nodes(),
)
self.m2g_graph = CuGraphCSC(
offsets, indices, n_in_nodes, n_out_nodes, edge_ids
)
if use_cugraphops_processor:
offsets, indices, edge_ids = self.mesh_graph.adj_tensors("csc")
n_in_nodes, n_out_nodes = (
self.mesh_graph.num_src_nodes(),
self.mesh_graph.num_dst_nodes(),
)
self.mesh_graph = CuGraphCSC(
offsets, indices, n_in_nodes, n_out_nodes, edge_ids
)
# by default: don't checkpoint at all
self.model_checkpoint_fn = set_checkpoint_fn(False)
self.encoder_checkpoint_fn = set_checkpoint_fn(False)
self.decoder_checkpoint_fn = set_checkpoint_fn(False)
# initial feature embedder
self.encoder_embedder = GraphCastEncoderEmbedder(
input_dim_grid_nodes=input_dim_grid_nodes,
input_dim_mesh_nodes=input_dim_mesh_nodes,
input_dim_edges=input_dim_edges,
output_dim=hidden_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
self.decoder_embedder = GraphCastDecoderEmbedder(
input_dim_edges=input_dim_edges,
output_dim=hidden_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
recompute_activation=recompute_activation,
)
# grid2mesh encoder
self.encoder = MeshGraphEncoder(
aggregation=aggregation,
input_dim_src_nodes=hidden_dim,
input_dim_dst_nodes=hidden_dim,
input_dim_edges=hidden_dim,
output_dim_src_nodes=hidden_dim,
output_dim_dst_nodes=hidden_dim,
output_dim_edges=hidden_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
do_concat_trick=do_concat_trick,
recompute_activation=recompute_activation,
)
# icosahedron processor
assert processor_layers > 2, "Expected at least 3 processor layers"
self.processor_encoder = GraphCastProcessor(
aggregation=aggregation,
processor_layers=1,
input_dim_nodes=hidden_dim,
input_dim_edges=hidden_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
do_concat_trick=do_concat_trick,
recompute_activation=recompute_activation,
)
self.processor = GraphCastProcessor(
aggregation=aggregation,
processor_layers=processor_layers - 2,
input_dim_nodes=hidden_dim,
input_dim_edges=hidden_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
do_concat_trick=do_concat_trick,
recompute_activation=recompute_activation,
)
self.processor_decoder = GraphCastProcessor(
aggregation=aggregation,
processor_layers=1,
input_dim_nodes=hidden_dim,
input_dim_edges=hidden_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
do_concat_trick=do_concat_trick,
recompute_activation=recompute_activation,
)
# mesh2grid decoder
self.decoder = MeshGraphDecoder(
aggregation=aggregation,
input_dim_src_nodes=hidden_dim,
input_dim_dst_nodes=hidden_dim,
input_dim_edges=hidden_dim,
output_dim_dst_nodes=hidden_dim,
output_dim_edges=hidden_dim,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=norm_type,
do_concat_trick=do_concat_trick,
recompute_activation=recompute_activation,
)
# final MLP
self.finale = MeshGraphMLP(
input_dim=hidden_dim,
output_dim=output_dim_grid_nodes,
hidden_dim=hidden_dim,
hidden_layers=hidden_layers,
activation_fn=activation_fn,
norm_type=None,
recompute_activation=recompute_activation,
)
def set_checkpoint_model(self, checkpoint_flag: bool):
"""Sets checkpoint function for the entire model.
This function returns the appropriate checkpoint function based on the
provided `checkpoint_flag` flag. If `checkpoint_flag` is True, the
function returns the checkpoint function from PyTorch's
`torch.utils.checkpoint`. In this case, all the other gradient checkpoitings
will be disabled. Otherwise, it returns an identity function
that simply passes the inputs through the given layer.
Parameters
----------
checkpoint_flag : bool
Whether to use checkpointing for gradient computation. Checkpointing
can reduce memory usage during backpropagation at the cost of
increased computation time.
Returns
-------
Callable
The selected checkpoint function to use for gradient computation.
"""
# force a single checkpoint for the whole model
self.model_checkpoint_fn = set_checkpoint_fn(checkpoint_flag)
if checkpoint_flag:
self.processor.set_checkpoint_segments(-1)
self.encoder_checkpoint_fn = set_checkpoint_fn(False)
self.decoder_checkpoint_fn = set_checkpoint_fn(False)
def set_checkpoint_processor(self, checkpoint_segments: int):
"""Sets checkpoint function for the processor excluding the first and last
layers.
This function returns the appropriate checkpoint function based on the
provided `checkpoint_segments` flag. If `checkpoint_segments` is positive,
the function returns the checkpoint function from PyTorch's
`torch.utils.checkpoint`, with number of checkpointing segments equal to
`checkpoint_segments`. Otherwise, it returns an identity function
that simply passes the inputs through the given layer.
Parameters
----------
checkpoint_segments : int
Number of checkpointing segments for gradient computation. Checkpointing
can reduce memory usage during backpropagation at the cost of
increased computation time.
Returns
-------
Callable
The selected checkpoint function to use for gradient computation.
"""
self.processor.set_checkpoint_segments(checkpoint_segments)
def set_checkpoint_encoder(self, checkpoint_flag: bool):
"""Sets checkpoint function for the embedder, encoder, and the first of
the processor.
This function returns the appropriate checkpoint function based on the
provided `checkpoint_flag` flag. If `checkpoint_flag` is True, the
function returns the checkpoint function from PyTorch's
`torch.utils.checkpoint`. Otherwise, it returns an identity function
that simply passes the inputs through the given layer.
Parameters
----------
checkpoint_flag : bool
Whether to use checkpointing for gradient computation. Checkpointing
can reduce memory usage during backpropagation at the cost of
increased computation time.
Returns
-------
Callable
The selected checkpoint function to use for gradient computation.
"""
self.encoder_checkpoint_fn = set_checkpoint_fn(checkpoint_flag)
def set_checkpoint_decoder(self, checkpoint_flag: bool):
"""Sets checkpoint function for the last layer of the processor, the decoder,
and the final MLP.
This function returns the appropriate checkpoint function based on the
provided `checkpoint_flag` flag. If `checkpoint_flag` is True, the
function returns the checkpoint function from PyTorch's
`torch.utils.checkpoint`. Otherwise, it returns an identity function
that simply passes the inputs through the given layer.
Parameters
----------
checkpoint_flag : bool
Whether to use checkpointing for gradient computation. Checkpointing
can reduce memory usage during backpropagation at the cost of
increased computation time.
Returns
-------
Callable
The selected checkpoint function to use for gradient computation.
"""
self.decoder_checkpoint_fn = set_checkpoint_fn(checkpoint_flag)
def encoder_forward(
self,
grid_nfeat: Tensor,
) -> Tensor:
"""Forward method for the embedder, encoder, and the first of the processor.
Parameters
----------
grid_nfeat : Tensor
Node features for the latitude-longitude grid.
Returns
-------
mesh_efeat_processed: Tensor
Processed edge features for the multimesh.
mesh_nfeat_processed: Tensor
Processed node features for the multimesh.
grid_nfeat_encoded: Tensor
Encoded node features for the latitude-longitude grid.
"""
# embedd graph features
(
grid_nfeat_embedded,
mesh_nfeat_embedded,
g2m_efeat_embedded,
mesh_efeat_embedded,
) = self.encoder_embedder(
grid_nfeat,
self.mesh_ndata,
self.g2m_edata,
self.mesh_edata,
)
# encode lat/lon to multimesh
grid_nfeat_encoded, mesh_nfeat_encoded = self.encoder(
g2m_efeat_embedded,
grid_nfeat_embedded,
mesh_nfeat_embedded,
self.g2m_graph,
)
# process multimesh graph
mesh_efeat_processed, mesh_nfeat_processed = self.processor_encoder(
mesh_efeat_embedded,
mesh_nfeat_encoded,
self.mesh_graph,
)
return mesh_efeat_processed, mesh_nfeat_processed, grid_nfeat_encoded
def decoder_forward(
self,
mesh_efeat_processed: Tensor,
mesh_nfeat_processed: Tensor,
grid_nfeat_encoded: Tensor,
) -> Tensor:
"""Forward method for the last layer of the processor, the decoder,
and the final MLP.
Parameters
----------
mesh_efeat_processed : Tensor
Multimesh edge features processed by the processor.
mesh_nfeat_processed : Tensor
Multi-mesh node features processed by the processor.
grid_nfeat_encoded : Tensor
The encoded node features for the latitude-longitude grid.
Returns
-------
grid_nfeat_finale: Tensor
The final node features for the latitude-longitude grid.
"""
# process multimesh graph
_, mesh_nfeat_processed = self.processor_decoder(
mesh_efeat_processed,
mesh_nfeat_processed,
self.mesh_graph,
)
m2g_efeat_embedded = self.decoder_embedder(self.m2g_edata)
# decode multimesh to lat/lon
grid_nfeat_decoded = self.decoder(
m2g_efeat_embedded, grid_nfeat_encoded, mesh_nfeat_processed, self.m2g_graph
)
# map to the target output dimension
grid_nfeat_finale = self.finale(
grid_nfeat_decoded,
)
return grid_nfeat_finale
def custom_forward(self, grid_nfeat: Tensor) -> Tensor:
"""GraphCast forward method with support for gradient checkpointing.
Parameters
----------
grid_nfeat : Tensor
Node features of the latitude-longitude graph.
Returns
-------
grid_nfeat_finale: Tensor
Predicted node features of the latitude-longitude graph.
"""
(
mesh_efeat_processed,
mesh_nfeat_processed,
grid_nfeat_encoded,
) = self.encoder_checkpoint_fn(
self.encoder_forward,
grid_nfeat,
use_reentrant=False,
preserve_rng_state=False,
)
# checkpoint of processor done in processor itself
mesh_efeat_processed, mesh_nfeat_processed = self.processor(
mesh_efeat_processed,
mesh_nfeat_processed,
self.mesh_graph,
)
grid_nfeat_finale = self.decoder_checkpoint_fn(
self.decoder_forward,
mesh_efeat_processed,
mesh_nfeat_processed,
grid_nfeat_encoded,
use_reentrant=False,
preserve_rng_state=False,
)
return grid_nfeat_finale
def forward(
self,
grid_nfeat: Tensor,
) -> Tensor:
invar = self.prepare_input(grid_nfeat)
outvar = self.model_checkpoint_fn(
self.custom_forward,
invar,
use_reentrant=False,
preserve_rng_state=False,
)
return self.prepare_output(outvar)
def prepare_input(self, invar: Tensor) -> Tensor:
"""Prepares the input to the model in the required shape.
Parameters
----------
invar : Tensor
Input in the shape [N, C, H, W].
Returns
-------
Tensor
Reshaped input.
"""
assert invar.size(0) == 1, "GraphCast does not support batch size > 1"
# concat static data
if self.has_static_data:
invar = torch.concat((invar, self.static_data), dim=1)
invar = invar[0].view(self.input_dim_grid_nodes, -1).permute(1, 0)
return invar
def prepare_output(self, outvar: Tensor) -> Tensor:
"""Prepares the output of the model in the shape [N, C, H, W].
Parameters
----------
outvar : Tensor
Output of the final MLP of the model.
Returns
-------
Tensor
The reshaped output of the model.
"""
outvar = outvar.permute(1, 0)
outvar = outvar.view(self.output_dim_grid_nodes, *self.input_res)
outvar = torch.unsqueeze(outvar, dim=0)
return outvar
def to(self, *args: Any, **kwargs: Any) -> "GraphCastNet":
"""Moves the object to the specified device, dtype, or format.
This method moves the object and its underlying graph and graph features to
the specified device, dtype, or format, and returns the updated object.
Parameters
----------
*args : Any
Positional arguments to be passed to the `torch._C._nn._parse_to` function.
**kwargs : Any
Keyword arguments to be passed to the `torch._C._nn._parse_to` function.
Returns
-------
GraphCastNet
The updated object after moving to the specified device, dtype, or format.
"""
self = super(GraphCastNet, self).to(*args, **kwargs)
self.g2m_edata = self.g2m_edata.to(*args, **kwargs)
self.m2g_edata = self.m2g_edata.to(*args, **kwargs)
self.mesh_ndata = self.mesh_ndata.to(*args, **kwargs)
self.mesh_edata = self.mesh_edata.to(*args, **kwargs)
if self.has_static_data:
self.static_data = self.static_data.to(*args, **kwargs)
device, _, _, _ = torch._C._nn._parse_to(*args, **kwargs)
self.g2m_graph = self.g2m_graph.to(device)
self.mesh_graph = self.mesh_graph.to(device)
self.m2g_graph = self.m2g_graph.to(device)
return self
|
modulus-main
|
modulus/models/graphcast/graph_cast_net.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .meshgraphnet import MeshGraphNet
|
modulus-main
|
modulus/models/meshgraphnet/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
import torch.nn as nn
import dgl
try:
from dgl import DGLGraph
except:
raise ImportError(
"Mesh Graph Net requires the DGL library. Install the "
+ "desired CUDA version at: \n https://www.dgl.ai/pages/start.html"
)
from typing import Callable, Tuple, List, Union
from dataclasses import dataclass
import modulus
from modulus.models.meta import ModelMetaData
from modulus.models.module import Module
from modulus.models.gnn_layers.utils import set_checkpoint_fn, CuGraphCSC
from modulus.models.gnn_layers.mesh_graph_mlp import MeshGraphMLP
from modulus.models.gnn_layers.mesh_edge_block import MeshEdgeBlock
from modulus.models.gnn_layers.mesh_node_block import MeshNodeBlock
@dataclass
class MetaData(ModelMetaData):
name: str = "MeshGraphNet"
# Optimization, no JIT as DGLGraph causes trouble
jit: bool = False
cuda_graphs: bool = False
amp_cpu: bool = False
amp_gpu: bool = True
torch_fx: bool = False
# Inference
onnx: bool = False
# Physics informed
func_torch: bool = True
auto_grad: bool = True
class MeshGraphNet(Module):
"""MeshGraphNet network architecture
Parameters
----------
input_dim_nodes : int
Number of node features
input_dim_edges : int
Number of edge features
output_dim : int
Number of outputs
processor_size : int, optional
Number of message passing blocks, by default 15
num_layers_node_processor : int, optional
Number of MLP layers for processing nodes in each message passing block, by default 2
num_layers_edge_processor : int, optional
Number of MLP layers for processing edge features in each message passing block, by default 2
hidden_dim_processor : int, optional
Hidden layer size for the message passing blocks, by default 128
hidden_dim_node_encoder : int, optional
Hidden layer size for the node feature encoder, by default 128
num_layers_node_encoder : int, optional
Number of MLP layers for the node feature encoder, by default 2
hidden_dim_edge_encoder : int, optional
Hidden layer size for the edge feature encoder, by default 128
num_layers_edge_encoder : int, optional
Number of MLP layers for the edge feature encoder, by default 2
hidden_dim_node_decoder : int, optional
Hidden layer size for the node feature decoder, by default 128
num_layers_node_decoder : int, optional
Number of MLP layers for the node feature decoder, by default 2
aggregation: str, optional
Message aggregation type, by default "sum"
do_conat_trick: : bool, default=False
Whether to replace concat+MLP with MLP+idx+sum
num_processor_checkpoint_segments: int, optional
Number of processor segments for gradient checkpointing, by default 0 (checkpointing disabled)
Example
-------
>>> model = modulus.models.meshgraphnet.MeshGraphNet(
... input_dim_nodes=4,
... input_dim_edges=3,
... output_dim=2,
... )
>>> graph = dgl.rand_graph(10, 5)
>>> node_features = torch.randn(10, 4)
>>> edge_features = torch.randn(5, 3)
>>> output = model(node_features, edge_features, graph)
>>> output.size()
torch.Size([10, 2])
Note
----
Reference: Pfaff, Tobias, et al. "Learning mesh-based simulation with graph networks."
arXiv preprint arXiv:2010.03409 (2020).
"""
def __init__(
self,
input_dim_nodes: int,
input_dim_edges: int,
output_dim: int,
processor_size: int = 15,
num_layers_node_processor: int = 2,
num_layers_edge_processor: int = 2,
hidden_dim_processor: int = 128,
hidden_dim_node_encoder: int = 128,
num_layers_node_encoder: int = 2,
hidden_dim_edge_encoder: int = 128,
num_layers_edge_encoder: int = 2,
hidden_dim_node_decoder: int = 128,
num_layers_node_decoder: int = 2,
aggregation: str = "sum",
do_concat_trick: bool = False,
num_processor_checkpoint_segments: int = 0,
):
super().__init__(meta=MetaData())
self.edge_encoder = MeshGraphMLP(
input_dim_edges,
output_dim=hidden_dim_processor,
hidden_dim=hidden_dim_edge_encoder,
hidden_layers=num_layers_edge_encoder,
activation_fn=nn.ReLU(),
norm_type="LayerNorm",
recompute_activation=False,
)
self.node_encoder = MeshGraphMLP(
input_dim_nodes,
output_dim=hidden_dim_processor,
hidden_dim=hidden_dim_node_encoder,
hidden_layers=num_layers_node_encoder,
activation_fn=nn.ReLU(),
norm_type="LayerNorm",
recompute_activation=False,
)
self.node_decoder = MeshGraphMLP(
hidden_dim_processor,
output_dim=output_dim,
hidden_dim=hidden_dim_node_decoder,
hidden_layers=num_layers_node_decoder,
activation_fn=nn.ReLU(),
norm_type=None,
recompute_activation=False,
)
self.processor = MeshGraphNetProcessor(
processor_size=processor_size,
input_dim_node=hidden_dim_processor,
input_dim_edge=hidden_dim_processor,
num_layers_node=num_layers_node_processor,
num_layers_edge=num_layers_edge_processor,
aggregation=aggregation,
norm_type="LayerNorm",
activation_fn=nn.ReLU(),
do_concat_trick=do_concat_trick,
num_processor_checkpoint_segments=num_processor_checkpoint_segments,
)
def forward(
self,
node_features: Tensor,
edge_features: Tensor,
graph: Union[DGLGraph, List[DGLGraph]],
) -> Tensor:
edge_features = self.edge_encoder(edge_features)
node_features = self.node_encoder(node_features)
x = self.processor(node_features, edge_features, graph)
x = self.node_decoder(x)
return x
class MeshGraphNetProcessor(nn.Module):
"""MeshGraphNet processor block"""
def __init__(
self,
processor_size: int = 15,
input_dim_node: int = 128,
input_dim_edge: int = 128,
num_layers_node: int = 2,
num_layers_edge: int = 2,
aggregation: str = "sum",
norm_type: str = "LayerNorm",
activation_fn: nn.Module = nn.ReLU(),
do_concat_trick: bool = False,
num_processor_checkpoint_segments: int = 0,
):
super().__init__()
self.processor_size = processor_size
self.num_processor_checkpoint_segments = num_processor_checkpoint_segments
edge_block_invars = (
input_dim_node,
input_dim_edge,
input_dim_edge,
input_dim_edge,
num_layers_edge,
activation_fn,
norm_type,
do_concat_trick,
False,
)
node_block_invars = (
aggregation,
input_dim_node,
input_dim_edge,
input_dim_edge,
input_dim_edge,
num_layers_node,
activation_fn,
norm_type,
False,
)
edge_blocks = []
node_blocks = []
layers = []
for _ in range(self.processor_size):
edge_blocks.append(MeshEdgeBlock(*edge_block_invars))
for _ in range(self.processor_size):
node_blocks.append(MeshNodeBlock(*node_block_invars))
for i in range(self.processor_size):
layers.append(edge_blocks[i])
layers.append(node_blocks[i])
self.processor_layers = nn.ModuleList(layers)
self.num_processor_layers = len(self.processor_layers)
self.set_checkpoint_segments(self.num_processor_checkpoint_segments)
def set_checkpoint_segments(self, checkpoint_segments: int):
"""
Set the number of checkpoint segments
Parameters
----------
checkpoint_segments : int
number of checkpoint segments
Raises
------
ValueError
if the number of processor layers is not a multiple of the number of
checkpoint segments
"""
if checkpoint_segments > 0:
if self.num_processor_layers % checkpoint_segments != 0:
raise ValueError(
"Processor layers must be a multiple of checkpoint_segments"
)
segment_size = self.num_processor_layers // checkpoint_segments
self.checkpoint_segments = []
for i in range(0, self.num_processor_layers, segment_size):
self.checkpoint_segments.append((i, i + segment_size))
self.checkpoint_fn = set_checkpoint_fn(True)
else:
self.checkpoint_fn = set_checkpoint_fn(False)
self.checkpoint_segments = [(0, self.num_processor_layers)]
def run_function(
self, segment_start: int, segment_end: int
) -> Callable[
[Tensor, Tensor, Union[DGLGraph, List[DGLGraph]]], Tuple[Tensor, Tensor]
]:
"""Custom forward for gradient checkpointing
Parameters
----------
segment_start : int
Layer index as start of the segment
segment_end : int
Layer index as end of the segment
Returns
-------
Callable
Custom forward function
"""
segment = self.processor_layers[segment_start:segment_end]
def custom_forward(
node_features: Tensor,
edge_features: Tensor,
graph: Union[DGLGraph, List[DGLGraph]],
) -> Tuple[Tensor, Tensor]:
"""Custom forward function"""
for module in segment:
edge_features, node_features = module(
edge_features, node_features, graph
)
return edge_features, node_features
return custom_forward
@torch.jit.unused
def forward(
self,
node_features: Tensor,
edge_features: Tensor,
graph: Union[DGLGraph, List[DGLGraph], CuGraphCSC],
) -> Tensor:
for segment_start, segment_end in self.checkpoint_segments:
edge_features, node_features = self.checkpoint_fn(
self.run_function(segment_start, segment_end),
node_features,
edge_features,
graph,
use_reentrant=False,
preserve_rng_state=False,
)
return node_features
|
modulus-main
|
modulus/models/meshgraphnet/meshgraphnet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .pix2pix import Pix2Pix
|
modulus-main
|
modulus/models/pix2pix/__init__.py
|
# ignore_header_test
""""""
"""
Pix2Pix model. This code was modified from, https://github.com/NVIDIA/pix2pixHD
The following license is provided from their source,
Copyright (C) 2019 NVIDIA Corporation. Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu.
BSD License. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
--------------------------- LICENSE FOR pytorch-CycleGAN-and-pix2pix ----------------
Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import torch.nn as nn
from typing import Union, List
from dataclasses import dataclass
import modulus
from modulus.models.layers import get_activation
from ..meta import ModelMetaData
from ..module import Module
Tensor = torch.Tensor
@dataclass
class MetaData(ModelMetaData):
name: str = "Pix2Pix"
# Optimization
jit: bool = True
cuda_graphs: bool = True
amp_cpu: bool = False # Reflect padding not supported in bfloat16
amp_gpu: bool = True
# Inference
onnx: bool = True
# Physics informed
var_dim: int = 1
func_torch: bool = True
auto_grad: bool = True
class Pix2Pix(Module):
"""Convolutional encoder-decoder based on pix2pix generator models.
Note
----
The pix2pix architecture supports options for 1D, 2D and 3D fields which can
be constroled using the `dimension` parameter.
Parameters
----------
in_channels : int
Number of input channels
out_channels: Union[int, Any], optional
Number of outout channels
dimension : int
Model dimensionality (supports 1, 2, 3).
conv_layer_size : int, optional
Latent channel size after first convolution, by default 64
n_downsampling : int, optional
Number of downsampling blocks, by default 3
n_upsampling : int, optional
Number of upsampling blocks, by default 3
n_blocks : int, optional
Number of residual blocks in middle of model, by default 3
activation_fn : str, optional
Activation function, by default "relu"
batch_norm : bool, optional
Batch normalization, by default False
padding_type : str, optional
Padding type ('reflect', 'replicate' or 'zero'), by default "reflect"
Example
-------
>>> #2D convolutional encoder decoder
>>> model = modulus.models.pix2pix.Pix2Pix(
... in_channels=1,
... out_channels=2,
... dimension=2,
... conv_layer_size=4)
>>> input = torch.randn(4, 1, 32, 32) #(N, C, H, W)
>>> output = model(input)
>>> output.size()
torch.Size([4, 2, 32, 32])
Note
----
Reference: Isola, Phillip, et al. βImage-To-Image translation with conditional
adversarial networksβ Conference on Computer Vision and Pattern Recognition, 2017.
https://arxiv.org/abs/1611.07004
Reference: Wang, Ting-Chun, et al. βHigh-Resolution image synthesis and semantic
manipulation with conditional GANsβ Conference on Computer Vision and Pattern
Recognition, 2018. https://arxiv.org/abs/1711.11585
Note
----
Based on the implementation: https://github.com/NVIDIA/pix2pixHD
"""
def __init__(
self,
in_channels: int,
out_channels: int,
dimension: int,
conv_layer_size: int = 64,
n_downsampling: int = 3,
n_upsampling: int = 3,
n_blocks: int = 3,
activation_fn: str = "relu",
batch_norm: bool = False,
padding_type: str = "reflect",
):
assert (
n_blocks >= 0 and n_downsampling >= 0 and n_upsampling >= 0
), "Invalid arch params"
assert padding_type in ["reflect", "zero", "replicate"], "Invalid padding type"
super().__init__(meta=MetaData())
activation = get_activation(activation_fn)
# set padding and convolutions
if dimension == 1:
padding = nn.ReflectionPad1d(3)
conv = nn.Conv1d
trans_conv = nn.ConvTranspose1d
norm = nn.BatchNorm1d
elif dimension == 2:
padding = nn.ReflectionPad2d(3)
conv = nn.Conv2d
trans_conv = nn.ConvTranspose2d
norm = nn.BatchNorm2d
elif dimension == 3:
padding = nn.ReflectionPad3d(3)
conv = nn.Conv3d
trans_conv = nn.ConvTranspose3d
norm = nn.BatchNorm3d
else:
raise NotImplementedError(
f"Pix2Pix only supported dimensions 1, 2, 3. Got {dimension}"
)
model = [
padding,
conv(in_channels, conv_layer_size, kernel_size=7, padding=0),
]
if batch_norm:
model.append(norm(conv_layer_size))
model.append(activation)
### downsample
for i in range(n_downsampling):
mult = 2**i
model.append(
conv(
conv_layer_size * mult,
conv_layer_size * mult * 2,
kernel_size=3,
stride=2,
padding=1,
)
)
if batch_norm:
model.append(norm(conv_layer_size * mult * 2))
model.append(activation)
### resnet blocks
mult = 2**n_downsampling
for i in range(n_blocks):
model += [
ResnetBlock(
dimension,
conv_layer_size * mult,
padding_type=padding_type,
activation=activation,
use_batch_norm=batch_norm,
)
]
### upsample
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
model.append(
trans_conv(
int(conv_layer_size * mult),
int(conv_layer_size * mult / 2),
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
)
)
if batch_norm:
model.append(norm(int(conv_layer_size * mult / 2)))
model.append(activation)
# super-resolution layers
for i in range(max([0, n_upsampling - n_downsampling])):
model.append(
trans_conv(
int(conv_layer_size),
int(conv_layer_size),
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
)
)
if batch_norm:
model.append(norm(conv_layer_size))
model.append(activation)
model += [
padding,
conv(conv_layer_size, out_channels, kernel_size=7, padding=0),
]
self.model = nn.Sequential(*model)
def forward(self, input: Tensor) -> Tensor:
y = self.model(input)
return y
class ResnetBlock(nn.Module):
"""A simple ResNet block
Parameters
----------
dimension : int
Model dimensionality (supports 1, 2, 3).
channels : int
Number of feature channels
padding_type : str, optional
Padding type ('reflect', 'replicate' or 'zero'), by default "reflect"
activation : nn.Module, optional
Activation function, by default nn.ReLU()
use_batch_norm : bool, optional
Batch normalization, by default False
"""
def __init__(
self,
dimension: int,
channels: int,
padding_type: str = "reflect",
activation: nn.Module = nn.ReLU(),
use_batch_norm: bool = False,
use_dropout: bool = False,
):
super().__init__()
assert padding_type in [
"reflect",
"zero",
"replicate",
], f"Invalid padding type {padding_type}"
if dimension == 1:
conv = nn.Conv1d
if padding_type == "reflect":
padding = nn.ReflectionPad1d(1)
elif padding_type == "replicate":
padding = nn.ReplicationPad1d(1)
else:
padding = None
norm = nn.BatchNorm1d
elif dimension == 2:
conv = nn.Conv2d
if padding_type == "reflect":
padding = nn.ReflectionPad2d(1)
elif padding_type == "replicate":
padding = nn.ReplicationPad2d(1)
else:
padding = None
norm = nn.BatchNorm2d
elif dimension == 3:
conv = nn.Conv3d
if padding_type == "reflect":
padding = nn.ReflectionPad3d(1)
elif padding_type == "replicate":
padding = nn.ReplicationPad3d(1)
else:
padding = None
norm = nn.BatchNorm3d
else:
raise NotImplementedError(
f"Pix2Pix ResnetBlock only supported dimensions 1, 2, 3. Got {dimension}"
)
conv_block = []
if padding_type != "zero":
conv_block += [padding]
p = 0
else:
p = 1 # Use built in conv padding
conv_block.append(conv(channels, channels, kernel_size=3, padding=p))
if use_batch_norm:
conv_block.append(norm(channels))
conv_block.append(activation)
if padding_type != "zero":
conv_block += [padding]
conv_block += [
conv(channels, channels, kernel_size=3, padding=p),
]
if use_batch_norm:
conv_block.append(norm(channels))
self.conv_block = nn.Sequential(*conv_block)
def forward(self, x: Tensor) -> Tensor:
out = x + self.conv_block(x)
return out
|
modulus-main
|
modulus/models/pix2pix/pix2pix.py
|
# ignore_header_test
""""""
"""
SRResNet model. This code was modified from,
https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Super-Resolution
The following license is provided from their source,
MIT License
Copyright (c) 2020 Sagar Vinodababu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import torch
from torch import nn
import math
from typing import Union, List, Any
from dataclasses import dataclass
import modulus
from modulus.models.layers import get_activation
from ..meta import ModelMetaData
from ..module import Module
Tensor = torch.Tensor
@dataclass
class MetaData(ModelMetaData):
name: str = "SuperResolution"
# Optimization
jit: bool = True
cuda_graphs: bool = False # TODO: Investigate this
amp_cpu: bool = False
amp_gpu: bool = False
# Inference
onnx: bool = True
# Physics informed
var_dim: int = 1
func_torch: bool = True
auto_grad: bool = True
class SRResNet(Module):
"""3D convolutional super-resolution network
Parameters
----------
in_channels : int
Number of input channels
out_channels: int
Number of outout channels
large_kernel_size : int, optional
convolutional kernel size for first and last convolution, by default 7
small_kernel_size : int, optional
convolutional kernel size for internal convolutions, by default 3
conv_layer_size : int, optional
Latent channel size, by default 32
n_resid_blocks : int, optional
Number of residual blocks before , by default 8
scaling_factor : int, optional
Scaling factor to increase the output feature size
compared to the input (2, 4, or 8), by default 8
activation_fn : str, optional
Activation function, by default "prelu"
Example
-------
>>> #3D convolutional encoder decoder
>>> model = modulus.models.srrn.SRResNet(
... in_channels=1,
... out_channels=2,
... conv_layer_size=4,
... scaling_factor=2)
>>> input = torch.randn(4, 1, 8, 8, 8) #(N, C, D, H, W)
>>> output = model(input)
>>> output.size()
torch.Size([4, 2, 16, 16, 16])
Note
----
Based on the implementation:
https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Super-Resolution
"""
def __init__(
self,
in_channels: int,
out_channels: int,
large_kernel_size: int = 7,
small_kernel_size: int = 3,
conv_layer_size: int = 32,
n_resid_blocks: int = 8,
scaling_factor: int = 8,
activation_fn: str = "prelu",
):
super().__init__(meta=MetaData())
self.var_dim = 1
activation_fn = get_activation(activation_fn)
# Scaling factor must be 2, 4, or 8
scaling_factor = int(scaling_factor)
assert scaling_factor in {2, 4, 8}, "The scaling factor must be 2, 4, or 8!"
# The first convolutional block
self.conv_block1 = ConvolutionalBlock3d(
in_channels=in_channels,
out_channels=conv_layer_size,
kernel_size=large_kernel_size,
batch_norm=False,
activation_fn=activation_fn,
)
# A sequence of n_resid_blocks residual blocks,
# each containing a skip-connection across the block
self.residual_blocks = nn.Sequential(
*[
ResidualConvBlock3d(
n_layers=2,
kernel_size=small_kernel_size,
conv_layer_size=conv_layer_size,
activation_fn=activation_fn,
)
for i in range(n_resid_blocks)
]
)
# Another convolutional block
self.conv_block2 = ConvolutionalBlock3d(
in_channels=conv_layer_size,
out_channels=conv_layer_size,
kernel_size=small_kernel_size,
batch_norm=True,
)
# Upscaling is done by sub-pixel convolution,
# with each such block upscaling by a factor of 2
n_subpixel_convolution_blocks = int(math.log2(scaling_factor))
self.subpixel_convolutional_blocks = nn.Sequential(
*[
SubPixel_ConvolutionalBlock3d(
kernel_size=small_kernel_size,
conv_layer_size=conv_layer_size,
scaling_factor=2,
)
for i in range(n_subpixel_convolution_blocks)
]
)
# The last convolutional block
self.conv_block3 = ConvolutionalBlock3d(
in_channels=conv_layer_size,
out_channels=out_channels,
kernel_size=large_kernel_size,
batch_norm=False,
)
def forward(self, in_vars: Tensor) -> Tensor:
output = self.conv_block1(in_vars) # (N, 3, w, h)
residual = output # (N, n_channels, w, h)
output = self.residual_blocks(output) # (N, n_channels, w, h)
output = self.conv_block2(output) # (N, n_channels, w, h)
output = output + residual # (N, n_channels, w, h)
output = self.subpixel_convolutional_blocks(
output
) # (N, n_channels, w * scaling factor, h * scaling factor)
output = self.conv_block3(
output
) # (N, 3, w * scaling factor, h * scaling factor)
return output
class ConvolutionalBlock3d(nn.Module):
"""3D convolutional block
Parameters
----------
in_channels : int
Input channels
out_channels : int
Output channels
kernel_size : int
Kernel size
stride : int, optional
Convolutional stride, by default 1
batch_norm : bool, optional
Use batchnorm, by default False
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
batch_norm: bool = False, # TODO set the train/eval model context
activation_fn: nn.Module = nn.Identity(),
):
super().__init__()
# A container that will hold the layers in this convolutional block
layers = list()
# A convolutional layer
layers.append(
nn.Conv3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
)
)
# A batch normalization (BN) layer, if wanted
if batch_norm is True:
layers.append(nn.BatchNorm3d(num_features=out_channels))
self.activation_fn = activation_fn
# Put together the convolutional block as a sequence of the layers
self.conv_block = nn.Sequential(*layers)
def forward(self, input: Tensor) -> Tensor:
output = self.activation_fn(self.conv_block(input))
return output # (N, out_channels, w, h)
class PixelShuffle3d(nn.Module):
"""3D pixel-shuffle operation
Parameters
----------
scale : int
Factor to downscale channel count by
Note
----
Reference: http://www.multisilicon.com/blog/a25332339.html
"""
def __init__(self, scale: int):
super().__init__()
self.scale = scale
def forward(self, input: Tensor) -> Tensor:
batch_size, channels, in_depth, in_height, in_width = input.size()
nOut = int(channels // self.scale**3)
out_depth = in_depth * self.scale
out_height = in_height * self.scale
out_width = in_width * self.scale
input_view = input.contiguous().view(
batch_size,
nOut,
self.scale,
self.scale,
self.scale,
in_depth,
in_height,
in_width,
)
output = input_view.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous()
return output.view(batch_size, nOut, out_depth, out_height, out_width)
class SubPixel_ConvolutionalBlock3d(nn.Module):
"""Convolutional block with Pixel Shuffle operation
Parameters
----------
kernel_size : int, optional
Kernel size, by default 3
conv_layer_size : int, optional
Latent channel size, by default 64
scaling_factor : int, optional
Pixel shuffle scaling factor, by default 2
"""
def __init__(
self, kernel_size: int = 3, conv_layer_size: int = 64, scaling_factor: int = 2
):
super().__init__()
# A convolutional layer that increases the number of channels
# by scaling factor^2, followed by pixel shuffle and PReLU
self.conv = nn.Conv3d(
in_channels=conv_layer_size,
out_channels=conv_layer_size * (scaling_factor**3),
kernel_size=kernel_size,
padding=kernel_size // 2,
)
# These additional channels are shuffled to form additional pixels,
# upscaling each dimension by the scaling factor
self.pixel_shuffle = PixelShuffle3d(scaling_factor)
self.prelu = nn.PReLU()
def forward(self, input: Tensor) -> Tensor:
output = self.conv(input) # (N, n_channels * scaling factor^2, w, h)
output = self.pixel_shuffle(
output
) # (N, n_channels, w * scaling factor, h * scaling factor)
output = self.prelu(
output
) # (N, n_channels, w * scaling factor, h * scaling factor)
return output
class ResidualConvBlock3d(nn.Module):
"""3D ResNet block
Parameters
----------
n_layers : int, optional
Number of convolutional layers, by default 1
kernel_size : int, optional
Kernel size, by default 3
conv_layer_size : int, optional
Latent channel size, by default 64
activation_fn : nn.Module, optional
Activation function, by default nn.Identity()
"""
def __init__(
self,
n_layers: int = 1,
kernel_size: int = 3,
conv_layer_size: int = 64,
activation_fn: nn.Module = nn.Identity(),
):
super().__init__()
layers = []
for i in range(n_layers - 1):
layers.append(
ConvolutionalBlock3d(
in_channels=conv_layer_size,
out_channels=conv_layer_size,
kernel_size=kernel_size,
batch_norm=True,
activation_fn=activation_fn,
)
)
# The final convolutional block with no activation
layers.append(
ConvolutionalBlock3d(
in_channels=conv_layer_size,
out_channels=conv_layer_size,
kernel_size=kernel_size,
batch_norm=True,
)
)
self.conv_layers = nn.Sequential(*layers)
def forward(self, input: Tensor) -> Tensor:
residual = input # (N, n_channels, w, h)
output = self.conv_layers(input) # (N, n_channels, w, h)
output = output + residual # (N, n_channels, w, h)
return output
|
modulus-main
|
modulus/models/srrn/super_res_net.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .super_res_net import SRResNet
|
modulus-main
|
modulus/models/srrn/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .rnn_one2many import One2ManyRNN
from .rnn_seq2seq import Seq2SeqRNN
|
modulus-main
|
modulus/models/rnn/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import modulus
from torch import Tensor
from dataclasses import dataclass
from typing import Union, List
from modulus.models.meta import ModelMetaData
from modulus.models.module import Module
from modulus.models.rnn.layers import (
_ConvLayer,
_TransposeConvLayer,
_ConvGRULayer,
_ConvResidualBlock,
)
from modulus.models.layers import get_activation
@dataclass
class MetaData(ModelMetaData):
name: str = "One2ManyRNN"
# Optimization
jit: bool = False
cuda_graphs: bool = False
amp: bool = True
torch_fx: bool = True
# Inference
onnx: bool = False
onnx_runtime: bool = False
# Physics informed
func_torch: bool = False
auto_grad: bool = False
class One2ManyRNN(Module):
"""A RNN model with encoder/decoder for 2d/3d problems that provides predictions
based on single initial condition.
Parameters
----------
input_channels : int
Number of channels in the input
dimension : int, optional
Spatial dimension of the input. Only 2d and 3d are supported, by default 2
nr_latent_channels : int, optional
Channels for encoding/decoding, by default 512
nr_residual_blocks : int, optional
Number of residual blocks, by default 2
activation_fn : str, optional
Activation function to use, by default "relu"
nr_downsamples : int, optional
Number of downsamples, by default 2
nr_tsteps : int, optional
Time steps to predict, by default 32
Example
-------
>>> model = modulus.models.rnn.One2ManyRNN(
... input_channels=6,
... dimension=2,
... nr_latent_channels=32,
... activation_fn="relu",
... nr_downsamples=2,
... nr_tsteps=16,
... )
>>> input = invar = torch.randn(4, 6, 1, 16, 16) # [N, C, T, H, W]
>>> output = model(input)
>>> output.size()
torch.Size([4, 6, 16, 16, 16])
"""
def __init__(
self,
input_channels: int,
dimension: int = 2,
nr_latent_channels: int = 512,
nr_residual_blocks: int = 2,
activation_fn: str = "relu",
nr_downsamples: int = 2,
nr_tsteps: int = 32,
) -> None:
super().__init__(meta=MetaData())
self.nr_tsteps = nr_tsteps
self.nr_residual_blocks = nr_residual_blocks
self.nr_downsamples = nr_downsamples
self.encoder_layers = nn.ModuleList()
channels_out = nr_latent_channels
activation_fn = get_activation(activation_fn)
# check valid dimensions
if dimension not in [2, 3]:
raise ValueError("Only 2D and 3D spatial dimensions are supported")
for i in range(nr_downsamples):
for j in range(nr_residual_blocks):
stride = 1
if i == 0 and j == 0:
channels_in = input_channels
else:
channels_in = channels_out
if (j == nr_residual_blocks - 1) and (i < nr_downsamples - 1):
channels_out = channels_out * 2
stride = 2
self.encoder_layers.append(
_ConvResidualBlock(
in_channels=channels_in,
out_channels=channels_out,
stride=stride,
dimension=dimension,
gated=True,
layer_normalization=False,
begin_activation_fn=not ((i == 0) and (j == 0)),
activation_fn=activation_fn,
)
)
self.rnn_layer = _ConvGRULayer(
in_features=channels_out, hidden_size=channels_out, dimension=dimension
)
self.conv_layers = nn.ModuleList()
self.decoder_layers = nn.ModuleList()
for i in range(nr_downsamples):
self.upsampling_layers = nn.ModuleList()
channels_in = channels_out
channels_out = channels_out // 2
self.upsampling_layers.append(
_TransposeConvLayer(
in_channels=channels_in,
out_channels=channels_out,
kernel_size=4,
stride=2,
dimension=dimension,
)
)
for j in range(nr_residual_blocks):
self.upsampling_layers.append(
_ConvResidualBlock(
in_channels=channels_out,
out_channels=channels_out,
stride=1,
dimension=dimension,
gated=True,
layer_normalization=False,
begin_activation_fn=not ((i == 0) and (j == 0)),
activation_fn=activation_fn,
)
)
self.conv_layers.append(
_ConvLayer(
in_channels=channels_in,
out_channels=nr_latent_channels,
kernel_size=1,
stride=1,
dimension=dimension,
)
)
self.decoder_layers.append(self.upsampling_layers)
if dimension == 2:
self.final_conv = nn.Conv2d(
nr_latent_channels, input_channels, (1, 1), (1, 1), padding="valid"
)
else:
# dimension is 3
self.final_conv = nn.Conv3d(
nr_latent_channels,
input_channels,
(1, 1, 1),
(1, 1, 1),
padding="valid",
)
def forward(self, x: Tensor) -> Tensor:
"""Forward pass
Parameters
----------
x : Tensor
Expects a tensor of size [N, C, 1, H, W] for 2D or [N, C, 1, D, H, W] for 3D
Where, N is the batch size, C is the number of channels, 1 is the number of
input timesteps and D, H, W are spatial dimensions.
Returns
-------
Tensor
Size [N, C, T, H, W] for 2D or [N, C, T, D, H, W] for 3D.
Where, T is the number of timesteps being predicted.
"""
# Encoding step
encoded_inputs = []
for t in range(1):
x_in = x[:, :, t, ...]
for layer in self.encoder_layers:
x_in = layer(x_in)
encoded_inputs.append(x_in)
# RNN step
rnn_output = []
for t in range(self.nr_tsteps):
if t == 0:
h = torch.zeros(list(x_in.size())).to(x.device)
x_in_rnn = encoded_inputs[0]
h = self.rnn_layer(x_in_rnn, h)
x_in_rnn = h
rnn_output.append(h)
decoded_output = []
for t in range(self.nr_tsteps):
x_out = rnn_output[t]
# Decoding step
latent_context_grid = []
for conv_layer, decoder in zip(self.conv_layers, self.decoder_layers):
latent_context_grid.append(conv_layer(x_out))
upsampling_layers = decoder
for upsampling_layer in upsampling_layers:
x_out = upsampling_layer(x_out)
# Add a convolution here to make the channel dimensions same as output
# Only last latent context grid is used, but mult-resolution is available
out = self.final_conv(latent_context_grid[-1])
decoded_output.append(out)
decoded_output = torch.stack(decoded_output, dim=2)
return decoded_output
|
modulus-main
|
modulus/models/rnn/rnn_one2many.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import modulus
from torch import Tensor
from dataclasses import dataclass
from typing import Union, List
from modulus.models.meta import ModelMetaData
from modulus.models.module import Module
from modulus.models.rnn.layers import (
_ConvLayer,
_TransposeConvLayer,
_ConvGRULayer,
_ConvResidualBlock,
)
from modulus.models.layers import get_activation
@dataclass
class MetaData(ModelMetaData):
name: str = "Seq2SeqRNN"
# Optimization
jit: bool = False
cuda_graphs: bool = False
amp: bool = True
torch_fx: bool = True
# Inference
onnx: bool = False
onnx_runtime: bool = False
# Physics informed
func_torch: bool = False
auto_grad: bool = False
class Seq2SeqRNN(Module):
"""A RNN model with encoder/decoder for 2d/3d problems. Given input 0 to t-1,
predicts signal t to t + nr_tsteps
Parameters
----------
input_channels : int
Number of channels in the input
dimension : int, optional
Spatial dimension of the input. Only 2d and 3d are supported, by default 2
nr_latent_channels : int, optional
Channels for encoding/decoding, by default 512
nr_residual_blocks : int, optional
Number of residual blocks, by default 2
activation_fn : str, optional
Activation function to use, by default "relu"
nr_downsamples : int, optional
Number of downsamples, by default 2
nr_tsteps : int, optional
Time steps to predict, by default 32
Example
-------
>>> model = modulus.models.rnn.Seq2SeqRNN(
... input_channels=6,
... dimension=2,
... nr_latent_channels=32,
... activation_fn="relu",
... nr_downsamples=2,
... nr_tsteps=16,
... )
>>> input = invar = torch.randn(4, 6, 16, 16, 16) # [N, C, T, H, W]
>>> output = model(input)
>>> output.size()
torch.Size([4, 6, 16, 16, 16])
"""
def __init__(
self,
input_channels: int,
dimension: int = 2,
nr_latent_channels: int = 512,
nr_residual_blocks: int = 2,
activation_fn: str = "relu",
nr_downsamples: int = 2,
nr_tsteps: int = 32,
) -> None:
super().__init__(meta=MetaData())
self.nr_tsteps = nr_tsteps
self.nr_residual_blocks = nr_residual_blocks
self.nr_downsamples = nr_downsamples
self.encoder_layers = nn.ModuleList()
channels_out = nr_latent_channels
activation_fn = get_activation(activation_fn)
# check valid dimensions
if dimension not in [2, 3]:
raise ValueError("Only 2D and 3D spatial dimensions are supported")
for i in range(nr_downsamples):
for j in range(nr_residual_blocks):
stride = 1
if i == 0 and j == 0:
channels_in = input_channels
else:
channels_in = channels_out
if (j == nr_residual_blocks - 1) and (i < nr_downsamples - 1):
channels_out = channels_out * 2
stride = 2
self.encoder_layers.append(
_ConvResidualBlock(
in_channels=channels_in,
out_channels=channels_out,
stride=stride,
dimension=dimension,
gated=True,
layer_normalization=False,
begin_activation_fn=not ((i == 0) and (j == 0)),
activation_fn=activation_fn,
)
)
self.rnn_layer = _ConvGRULayer(
in_features=channels_out, hidden_size=channels_out, dimension=dimension
)
self.conv_layers = nn.ModuleList()
self.decoder_layers = nn.ModuleList()
for i in range(nr_downsamples):
self.upsampling_layers = nn.ModuleList()
channels_in = channels_out
channels_out = channels_out // 2
self.upsampling_layers.append(
_TransposeConvLayer(
in_channels=channels_in,
out_channels=channels_out,
kernel_size=4,
stride=2,
dimension=dimension,
)
)
for j in range(nr_residual_blocks):
self.upsampling_layers.append(
_ConvResidualBlock(
in_channels=channels_out,
out_channels=channels_out,
stride=1,
dimension=dimension,
gated=True,
layer_normalization=False,
begin_activation_fn=not ((i == 0) and (j == 0)),
activation_fn=activation_fn,
)
)
self.conv_layers.append(
_ConvLayer(
in_channels=channels_in,
out_channels=nr_latent_channels,
kernel_size=1,
stride=1,
dimension=dimension,
)
)
self.decoder_layers.append(self.upsampling_layers)
if dimension == 2:
self.final_conv = nn.Conv2d(
nr_latent_channels, input_channels, (1, 1), (1, 1), padding="valid"
)
else:
# dimension is 3
self.final_conv = nn.Conv3d(
nr_latent_channels,
input_channels,
(1, 1, 1),
(1, 1, 1),
padding="valid",
)
def forward(self, x: Tensor) -> Tensor:
"""Forward pass
Parameters
----------
x : Tensor
Expects a tensor of size [N, C, T, H, W] for 2D or [N, C, T, D, H, W] for 3D
Where, N is the batch size, C is the number of channels, T is the number of
input timesteps and D, H, W are spatial dimensions. Currently, this
requires input time steps to be same as predicted time steps.
Returns
-------
Tensor
Size [N, C, T, H, W] for 2D or [N, C, T, D, H, W] for 3D.
Where, T is the number of timesteps being predicted.
"""
# Encoding step
encoded_inputs = []
for t in range(self.nr_tsteps):
x_in = x[:, :, t, ...]
for layer in self.encoder_layers:
x_in = layer(x_in)
encoded_inputs.append(x_in)
# RNN step
# encode
for t in range(x.size(2)): # time dimension of the input signal
if t == 0:
h = torch.zeros(list(x_in.size())).to(x.device)
x_in_rnn = encoded_inputs[t]
h = self.rnn_layer(x_in_rnn, h)
# decode
rnn_output = []
for t in range(self.nr_tsteps):
if t == 0:
x_in_rnn = encoded_inputs[-1]
h = self.rnn_layer(x_in_rnn, h)
x_in_rnn = h
rnn_output.append(h)
decoded_output = []
for t in range(self.nr_tsteps):
x_out = rnn_output[t]
# Decoding step
latent_context_grid = []
for conv_layer, decoder in zip(self.conv_layers, self.decoder_layers):
latent_context_grid.append(conv_layer(x_out))
upsampling_layers = decoder
for upsampling_layer in upsampling_layers:
x_out = upsampling_layer(x_out)
# Add a convolution here to make the channel dimensions same as output
# Only last latent context grid is used, but mult-resolution is available
out = self.final_conv(latent_context_grid[-1])
decoded_output.append(out)
decoded_output = torch.stack(decoded_output, dim=2)
return decoded_output
|
modulus-main
|
modulus/models/rnn/rnn_seq2seq.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn as nn
from torch import Tensor
import torch.nn.functional as F
def _get_same_padding(x: int, k: int, s: int) -> int:
"""Function to compute "same" padding. Inspired from:
https://github.com/huggingface/pytorch-image-models/blob/0.5.x/timm/models/layers/padding.py
"""
return max(s * math.ceil(x / s) - s - x + k, 0)
class _ConvLayer(nn.Module):
"""Generalized Convolution Block
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
dimension : int
Dimensionality of the input, 1, 2, 3, or 4
kernel_size : int
Kernel size for the convolution
stride : int
Stride for the convolution, by default 1
activation_fn : nn.Module, optional
Activation function to use, by default nn.Identity()
"""
def __init__(
self,
in_channels: int,
out_channels: int,
dimension: int, # TODO check if there are ways to infer this
kernel_size: int,
stride: int = 1,
activation_fn: nn.Module = nn.Identity(),
) -> None:
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dimension = dimension
self.activation_fn = activation_fn
if self.dimension == 1:
self.conv = nn.Conv1d(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
bias=True,
)
elif self.dimension == 2:
self.conv = nn.Conv2d(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
bias=True,
)
elif self.dimension == 3:
self.conv = nn.Conv3d(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
bias=True,
)
else:
raise ValueError("Only 1D, 2D and 3D dimensions are supported")
self.reset_parameters()
def exec_activation_fn(self, x: Tensor) -> Tensor:
"""Executes activation function on the input"""
return self.activation_fn(x)
def reset_parameters(self) -> None:
"""Initialization for network parameters"""
nn.init.constant_(self.conv.bias, 0)
nn.init.xavier_uniform_(self.conv.weight)
def forward(self, x: Tensor) -> Tensor:
input_length = len(x.size()) - 2 # exclude channel and batch dims
assert input_length == self.dimension, "Input dimension not compatible"
if input_length == 1:
iw = x.size()[-1:][0]
pad_w = _get_same_padding(iw, self.kernel_size, self.stride)
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2], mode="constant", value=0.0)
elif input_length == 2:
ih, iw = x.size()[-2:]
pad_h, pad_w = _get_same_padding(
ih, self.kernel_size, self.stride
), _get_same_padding(iw, self.kernel_size, self.stride)
x = F.pad(
x,
[pad_h // 2, pad_h - pad_h // 2, pad_w // 2, pad_w - pad_w // 2],
mode="constant",
value=0.0,
)
else:
_id, ih, iw = x.size()[-3:]
pad_d, pad_h, pad_w = (
_get_same_padding(_id, self.kernel_size, self.stride),
_get_same_padding(ih, self.kernel_size, self.stride),
_get_same_padding(iw, self.kernel_size, self.stride),
)
x = F.pad(
x,
[
pad_d // 2,
pad_d - pad_d // 2,
pad_h // 2,
pad_h - pad_h // 2,
pad_w // 2,
pad_w - pad_w // 2,
],
mode="constant",
value=0.0,
)
x = self.conv(x)
if self.activation_fn is not nn.Identity():
x = self.exec_activation_fn(x)
return x
class _TransposeConvLayer(nn.Module):
"""Generalized Transposed Convolution Block
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
dimension : int
Dimensionality of the input, 1, 2, 3, or 4
kernel_size : int
Kernel size for the convolution
stride : int
Stride for the convolution, by default 1
activation_fn : nn.Module, optional
Activation function to use, by default nn.Identity()
"""
def __init__(
self,
in_channels: int,
out_channels: int,
dimension: int,
kernel_size: int,
stride: int = 1,
activation_fn=nn.Identity(),
) -> None:
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dimension = dimension
self.activation_fn = activation_fn
if dimension == 1:
self.trans_conv = nn.ConvTranspose1d(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
bias=True,
)
elif dimension == 2:
self.trans_conv = nn.ConvTranspose2d(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
bias=True,
)
elif dimension == 3:
self.trans_conv = nn.ConvTranspose3d(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
bias=True,
)
else:
raise ValueError("Only 1D, 2D and 3D dimensions are supported")
self.reset_parameters()
def exec_activation_fn(self, x: Tensor) -> Tensor:
"""Executes activation function on the input"""
return self.activation_fn(x)
def reset_parameters(self) -> None:
"""Initialization for network parameters"""
nn.init.constant_(self.trans_conv.bias, 0)
nn.init.xavier_uniform_(self.trans_conv.weight)
def forward(self, x: Tensor) -> Tensor:
orig_x = x
input_length = len(orig_x.size()) - 2 # exclude channel and batch dims
assert input_length == self.dimension, "Input dimension not compatible"
x = self.trans_conv(x)
if input_length == 1:
iw = orig_x.size()[-1:][0]
pad_w = _get_same_padding(iw, self.kernel_size, self.stride)
x = x[
:,
:,
pad_w // 2 : x.size(-1) - (pad_w - pad_w // 2),
]
elif input_length == 2:
ih, iw = orig_x.size()[-2:]
pad_h, pad_w = _get_same_padding(
ih,
self.kernel_size,
self.stride,
), _get_same_padding(iw, self.kernel_size, self.stride)
x = x[
:,
:,
pad_h // 2 : x.size(-2) - (pad_h - pad_h // 2),
pad_w // 2 : x.size(-1) - (pad_w - pad_w // 2),
]
else:
_id, ih, iw = orig_x.size()[-3:]
pad_d, pad_h, pad_w = (
_get_same_padding(_id, self.kernel_size, self.stride),
_get_same_padding(ih, self.kernel_size, self.stride),
_get_same_padding(iw, self.kernel_size, self.stride),
)
x = x[
:,
:,
pad_d // 2 : x.size(-3) - (pad_d - pad_d // 2),
pad_h // 2 : x.size(-2) - (pad_h - pad_h // 2),
pad_w // 2 : x.size(-1) - (pad_w - pad_w // 2),
]
if self.activation_fn is not nn.Identity():
x = self.exec_activation_fn(x)
return x
class _ConvGRULayer(nn.Module):
"""Convolutional GRU layer
Parameters
----------
in_features : int
Input features/channels
hidden_size : int
Hidden layer features/channels
dimension : int
Spatial dimension of the input
activation_fn : nn.Module, optional
Activation Function to use, by default nn.ReLU()
"""
def __init__(
self,
in_features: int,
hidden_size: int,
dimension: int,
activation_fn: nn.Module = nn.ReLU(),
) -> None:
super().__init__()
self.in_features = in_features
self.hidden_size = hidden_size
self.activation_fn = activation_fn
self.conv_1 = _ConvLayer(
in_channels=in_features + hidden_size,
out_channels=2 * hidden_size,
kernel_size=3,
stride=1,
dimension=dimension,
)
self.conv_2 = _ConvLayer(
in_channels=in_features + hidden_size,
out_channels=hidden_size,
kernel_size=3,
stride=1,
dimension=dimension,
)
def exec_activation_fn(self, x: Tensor) -> Tensor:
"""Executes activation function on the input"""
return self.activation_fn(x)
def forward(self, x: Tensor, hidden: Tensor) -> Tensor:
concat = torch.cat((x, hidden), dim=1)
conv_concat = self.conv_1(concat)
conv_r, conv_z = torch.split(conv_concat, self.hidden_size, 1)
reset_gate = torch.special.expit(conv_r)
update_gate = torch.special.expit(conv_z)
concat = torch.cat((x, torch.mul(hidden, reset_gate)), dim=1)
n = self.exec_activation_fn(self.conv_2(concat))
h_next = torch.mul((1 - update_gate), n) + torch.mul(update_gate, hidden)
return h_next
class _ConvResidualBlock(nn.Module):
"""Convolutional ResNet Block
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
dimension : int
Dimensionality of the input
stride : int
Stride of the convolutions, by default 1
gated : bool, optional
Residual Gate, by default False
layer_normalization : bool, optional
Layer Normalization, by default False
begin_activation_fn : bool, optional
Whether to use activation function in the beginning, by default True
activation_fn : nn.Module, optional
Activation function to use, by default nn.ReLU()
Raises
------
ValueError
Stride not supported
"""
def __init__(
self,
in_channels: int,
out_channels: int,
dimension: int,
stride: int = 1,
gated: bool = False,
layer_normalization: bool = False,
begin_activation_fn: bool = True,
activation_fn: nn.Module = nn.ReLU(),
) -> None:
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.dimension = dimension
self.gated = gated
self.layer_normalization = layer_normalization
self.begin_activation_fn = begin_activation_fn
self.activation_fn = activation_fn
if self.stride == 1:
self.conv_1 = _ConvLayer(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=3,
stride=self.stride,
dimension=self.dimension,
)
elif self.stride == 2:
self.conv_1 = _ConvLayer(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=4,
stride=self.stride,
dimension=self.dimension,
)
else:
raise ValueError("stride > 2 is not supported")
if not self.gated:
self.conv_2 = _ConvLayer(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=3,
stride=1,
dimension=self.dimension,
)
else:
self.conv_2 = _ConvLayer(
in_channels=self.out_channels,
out_channels=2 * self.out_channels,
kernel_size=3,
stride=1,
dimension=self.dimension,
)
def exec_activation_fn(self, x: Tensor) -> Tensor:
"""Executes activation function on the input"""
return self.activation_fn(x)
def forward(self, x: Tensor) -> Tensor:
orig_x = x
if self.begin_activation_fn:
if self.layer_normalization:
layer_norm = nn.LayerNorm(x.size()[1:], elementwise_affine=False)
x = layer_norm(x)
x = self.exec_activation_fn(x)
# first convolutional layer
x = self.conv_1(x)
# add layer normalization
if self.layer_normalization:
layer_norm = nn.LayerNorm(x.size()[1:], elementwise_affine=False)
x = layer_norm(x)
# second activation
x = self.exec_activation_fn(x)
# second convolutional layer
x = self.conv_2(x)
if self.gated:
x_1, x_2 = torch.split(x, x.size(1) // 2, 1)
x = x_1 * torch.special.expit(x_2)
# possibly reshape skip connection
if orig_x.size(-1) > x.size(-1): # Check if widths are same)
if len(orig_x.size()) - 2 == 1:
iw = orig_x.size()[-1:][0]
pad_w = _get_same_padding(iw, 2, 2)
pool = torch.nn.AvgPool1d(
2, 2, padding=pad_w // 2, count_include_pad=False
)
elif len(orig_x.size()) - 2 == 2:
ih, iw = orig_x.size()[-2:]
pad_h, pad_w = _get_same_padding(
ih,
2,
2,
), _get_same_padding(iw, 2, 2)
pool = torch.nn.AvgPool2d(
2, 2, padding=(pad_h // 2, pad_w // 2), count_include_pad=False
)
elif len(orig_x.size()) - 2 == 3:
_id, ih, iw = orig_x.size()[-3:]
pad_d, pad_h, pad_w = (
_get_same_padding(_id, 2, 2),
_get_same_padding(ih, 2, 2),
_get_same_padding(iw, 2, 2),
)
pool = torch.nn.AvgPool3d(
2,
2,
padding=(pad_d // 2, pad_h // 2, pad_w // 2),
count_include_pad=False,
)
else:
raise ValueError("Only 1D, 2D and 3D dimensions are supported")
orig_x = pool(orig_x)
# possibly change the channels for skip connection
in_channels = int(orig_x.size(1))
if self.out_channels > in_channels:
orig_x = F.pad(
orig_x,
(len(orig_x.size()) - 2) * (0, 0)
+ (self.out_channels - self.in_channels, 0),
)
elif self.out_channels < in_channels:
pass
return orig_x + x
|
modulus-main
|
modulus/models/rnn/layers.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .fully_connected import FullyConnected
|
modulus-main
|
modulus/models/mlp/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import modulus
from modulus.models.layers import FCLayer, get_activation
from torch import Tensor
from dataclasses import dataclass
from typing import Optional, Union, List
from ..meta import ModelMetaData
from ..module import Module
@dataclass
class MetaData(ModelMetaData):
name: str = "FullyConnected"
# Optimization
jit: bool = True
cuda_graphs: bool = True
amp: bool = True
torch_fx: bool = True
# Inference
onnx: bool = True
onnx_runtime: bool = True
# Physics informed
func_torch: bool = True
auto_grad: bool = True
class FullyConnected(Module):
"""A densely-connected MLP architecture
Parameters
----------
in_features : int, optional
Size of input features, by default 512
layer_size : int, optional
Size of every hidden layer, by default 512
out_features : int, optional
Size of output features, by default 512
num_layers : int, optional
Number of hidden layers, by default 6
activation_fn : Union[str, List[str]], optional
Activation function to use, by default 'silu'
skip_connections : bool, optional
Add skip connections every 2 hidden layers, by default False
adaptive_activations : bool, optional
Use an adaptive activation function, by default False
weight_norm : bool, optional
Use weight norm on fully connected layers, by default False
Example
-------
>>> model = modulus.models.mlp.FullyConnected(in_features=32, out_features=64)
>>> input = torch.randn(128, 32)
>>> output = model(input)
>>> output.size()
torch.Size([128, 64])
"""
def __init__(
self,
in_features: int = 512,
layer_size: int = 512,
out_features: int = 512,
num_layers: int = 6,
activation_fn: Union[str, List[str]] = "silu",
skip_connections: bool = False,
adaptive_activations: bool = False,
weight_norm: bool = False,
) -> None:
super().__init__(meta=MetaData())
self.skip_connections = skip_connections
if adaptive_activations:
activation_par = nn.Parameter(torch.ones(1))
else:
activation_par = None
if not isinstance(activation_fn, list):
activation_fn = [activation_fn] * num_layers
if len(activation_fn) < num_layers:
activation_fn = activation_fn + [activation_fn[-1]] * (
num_layers - len(activation_fn)
)
activation_fn = [get_activation(a) for a in activation_fn]
self.layers = nn.ModuleList()
layer_in_features = in_features
for i in range(num_layers):
self.layers.append(
FCLayer(
layer_in_features,
layer_size,
activation_fn[i],
weight_norm,
activation_par,
)
)
layer_in_features = layer_size
self.final_layer = FCLayer(
in_features=layer_size,
out_features=out_features,
activation_fn=None,
weight_norm=False,
activation_par=None,
)
def forward(self, x: Tensor) -> Tensor:
x_skip: Optional[Tensor] = None
for i, layer in enumerate(self.layers):
x = layer(x)
if self.skip_connections and i % 2 == 0:
if x_skip is not None:
x, x_skip = x + x_skip, x
else:
x_skip = x
x = self.final_layer(x)
return x
|
modulus-main
|
modulus/models/mlp/fully_connected.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .model_registry import ModelRegistry
|
modulus-main
|
modulus/registry/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
from typing import List, Union
import modulus
# This model registry follows conventions similar to fsspec,
# https://github.com/fsspec/filesystem_spec/blob/master/fsspec/registry.py#L62C2-L62C2
# Tutorial on entrypoints: https://amir.rachum.com/blog/2017/07/28/python-entry-points/
# Borg singleton pattern: https://stackoverflow.com/questions/1318406/why-is-the-borg-pattern-better-than-the-singleton-pattern-in-python
class ModelRegistry:
_shared_state = {"_model_registry": None}
def __new__(cls, *args, **kwargs):
obj = super(ModelRegistry, cls).__new__(cls)
obj.__dict__ = cls._shared_state
if cls._shared_state["_model_registry"] is None:
cls._shared_state["_model_registry"] = cls._construct_registry()
return obj
@staticmethod
def _construct_registry() -> dict:
registry = {}
group = "modulus.models"
entrypoints = pkg_resources.iter_entry_points(group)
for entry_point in entrypoints:
registry[entry_point.name] = entry_point
return registry
def register(self, model: "modulus.Module", name: Union[str, None] = None) -> None:
"""
Registers a modulus model in the model registry under the provided name. If no name
is provided, the model's name (from its `__name__` attribute) is used. If the
name is already in use, raises a ValueError.
Parameters
----------
model : modulus.Module
The model to be registered. Can be an instance of any class.
name : str, optional
The name to register the model under. If None, the model's name is used.
Raises
------
ValueError
If the provided name is already in use in the registry.
"""
# Check if model is a modulus model
if not issubclass(model, modulus.Module):
raise ValueError(
f"Only subclasses of modulus.Module can be registered. "
f"Provided model is of type {type(model)}"
)
# If no name provided, use the model's name
if name is None:
name = model.__name__
# Check if name already in use
if name in self._model_registry:
raise ValueError(f"Name {name} already in use")
# Add this class to the dict of model registry
self._model_registry[name] = model
def factory(self, name: str) -> "modulus.Module":
"""
Returns a registered model given its name.
Parameters
----------
name : str
The name of the registered model.
Returns
-------
model : modulus.Module
The registered model.
Raises
------
KeyError
If no model is registered under the provided name.
"""
if name in self._model_registry:
model = self._model_registry[name]
if isinstance(model, pkg_resources.EntryPoint):
model = model.load()
return model
else:
raise KeyError(f"No model is registered under the name {name}")
def list_models(self) -> List[str]:
"""
Returns a list of the names of all models currently registered in the registry.
Returns
-------
List[str]
A list of the names of all registered models. The order of the names is not
guaranteed to be consistent.
"""
return list(self._model_registry.keys())
def __clear_registry__(self):
# NOTE: This is only used for testing purposes
self._model_registry = {}
def __restore_registry__(self):
# NOTE: This is only used for testing purposes
self._model_registry = self._construct_registry()
|
modulus-main
|
modulus/registry/model_registry.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import logging
from typing import Union
from pathlib import Path
from modulus.datapipes.meta import DatapipeMetaData
class Datapipe:
"""The base class for all datapipes in Modulus.
Parameters
----------
meta : DatapipeMetaData, optional
Meta data class for storing info regarding model, by default None
"""
def __init__(self, meta: DatapipeMetaData = None):
super().__init__()
if not meta or not isinstance(meta, DatapipeMetaData):
self.meta = DatapipeMetaData()
else:
self.meta = meta
self.logger = logging.getLogger("core.datapipe")
handler = logging.StreamHandler()
formatter = logging.Formatter(
f"[%(asctime)s - %(levelname)s] %(message)s", datefmt="%H:%M:%S"
)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.WARNING)
def debug(self):
"""Turn on debug logging"""
self.logger.handlers.clear()
handler = logging.StreamHandler()
formatter = logging.Formatter(
f"[%(asctime)s - %(levelname)s - {self.meta.name}] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.DEBUG)
# TODO: set up debug log
# fh = logging.FileHandler(f'modulus-core-{self.meta.name}.log')
|
modulus-main
|
modulus/datapipes/datapipe.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/datapipes/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
@dataclass
class DatapipeMetaData:
"""Data class for storing essential meta data needed for all Modulus datapipes"""
# Datapipe info
name: str = "ModulusDatapipe"
# Optimizations
auto_device: bool = False
cuda_graphs: bool = False
# Parallel
ddp_sharding: bool = False
|
modulus-main
|
modulus/datapipes/meta.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
modulus-main
|
modulus/datapipes/gnn/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import torch
from typing import Dict, Any
try:
import vtk
except:
raise ImportError("vtk package is required. Install with pip install vtk.")
def read_vtp_file(file_path: str) -> Any:
"""
Read a VTP file and return the polydata.
Parameters
----------
file_path : str
Path to the VTP file.
Returns
-------
vtkPolyData
The polydata read from the VTP file.
"""
# Check if file exists
if not os.path.exists(file_path):
raise FileNotFoundError(f"{file_path} does not exist.")
# Check if file has .vtp extension
if not file_path.endswith(".vtp"):
raise ValueError(f"Expected a .vtp file, got {file_path}")
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(file_path)
reader.Update()
# Get the polydata
polydata = reader.GetOutput()
# Check if polydata is valid
if polydata is None:
raise ValueError(f"Failed to read polydata from {file_path}")
return polydata
def save_json(var: Dict[str, torch.Tensor], file: str) -> None:
"""
Saves a dictionary of tensors to a JSON file.
Parameters
----------
var : Dict[str, torch.Tensor]
Dictionary where each value is a PyTorch tensor.
file : str
Path to the output JSON file.
"""
var_list = {k: v.numpy().tolist() for k, v in var.items()}
with open(file, "w") as f:
json.dump(var_list, f)
def load_json(file: str) -> Dict[str, torch.Tensor]:
"""
Loads a JSON file into a dictionary of PyTorch tensors.
Parameters
----------
file : str
Path to the JSON file.
Returns
-------
Dict[str, torch.Tensor]
Dictionary where each value is a PyTorch tensor.
"""
with open(file, "r") as f:
var_list = json.load(f)
var = {k: torch.tensor(v, dtype=torch.float) for k, v in var_list.items()}
return var
|
modulus-main
|
modulus/datapipes/gnn/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from typing import List, Tuple, Dict, Union, Optional, Any
import numpy as np
import torch
from torch import Tensor
from torch.nn import functional as F
from dataclasses import dataclass
from .utils import read_vtp_file, save_json, load_json
from modulus.datapipes.datapipe import Datapipe
from modulus.datapipes.meta import DatapipeMetaData
try:
import dgl
from dgl.data import DGLDataset
except:
raise ImportError(
"Ahmed Body Dataset requires the DGL library. Install the "
+ "desired CUDA version at: \n https://www.dgl.ai/pages/start.html"
)
try:
import vtk
import pyvista as pv
except:
raise ImportError(
"Ahmed Body Dataset requires the vtk and pyvista libraries. Install with "
+ "pip install vtk pyvista"
)
@dataclass
class MetaData(DatapipeMetaData):
name: str = "AhmedBody"
# Optimization
auto_device: bool = True
cuda_graphs: bool = False
# Parallel
ddp_sharding: bool = True
class AhmedBodyDataset(DGLDataset, Datapipe):
"""
In-memory Ahmed body Dataset
Parameters
----------
data_dir: str
The directory where the data is stored.
split: str, optional
The dataset split. Can be 'train', 'validation', or 'test', by default 'train'.
num_samples: int, optional
The number of samples to use, by default 10.
invar_keys: List[str], optional
The input node features to consider. Default includes 'pos', 'velocity', 'reynolds_number', 'length', 'width', 'height', 'ground_clearance', 'slant_angle', and 'fillet_radius'.
outvar_keys: List[str], optional
The output features to consider. Default includes 'p' and 'wallShearStress'.
normalize_keys List[str], optional
The features to normalize. Default includes 'p', 'wallShearStress', 'velocity', 'length', 'width', 'height', 'ground_clearance', 'slant_angle', and 'fillet_radius'.
normalization_bound: Tuple[float, float], optional
The lower and upper bounds for normalization. Default is (-1, 1).
force_reload: bool, optional
If True, forces a reload of the data, by default False.
name: str, optional
The name of the dataset, by default 'dataset'.
verbose: bool, optional
If True, enables verbose mode, by default False.
compute_drag: bool, optional
If True, also returns the coefficient and mesh area and normals that are required for computing the drag coefficient.
"""
def __init__(
self,
data_dir: str,
split: str = "train",
num_samples: int = 10,
invar_keys: List[str] = [
"pos",
"velocity",
"reynolds_number",
"length",
"width",
"height",
"ground_clearance",
"slant_angle",
"fillet_radius",
],
outvar_keys: List[str] = ["p", "wallShearStress"],
normalize_keys: List[str] = [
"p",
"wallShearStress",
"velocity",
"reynolds_number",
"length",
"width",
"height",
"ground_clearance",
"slant_angle",
"fillet_radius",
],
normalization_bound: Tuple[float, float] = (-1.0, 1.0),
force_reload: bool = False,
name: str = "dataset",
verbose: bool = False,
compute_drag: bool = False,
):
DGLDataset.__init__(
self,
name=name,
force_reload=force_reload,
verbose=verbose,
)
Datapipe.__init__(
self,
meta=MetaData(),
)
self.split = split
self.num_samples = num_samples
self.data_dir = os.path.join(data_dir, self.split)
self.info_dir = os.path.join(data_dir, self.split + "_info")
self.input_keys = invar_keys
self.output_keys = outvar_keys
self.normalization_bound = normalization_bound
self.compute_drag = compute_drag
# get the list of all files in the data_dir
all_entries = os.listdir(self.data_dir)
all_info = os.listdir(self.info_dir)
data_list = [
os.path.join(self.data_dir, entry)
for entry in all_entries
if os.path.isfile(os.path.join(self.data_dir, entry))
]
info_list = [
os.path.join(self.info_dir, entry)
for entry in all_info
if os.path.isfile(os.path.join(self.info_dir, entry))
]
numbers = []
for directory in data_list:
match = re.search(r"\d+", directory)
if match:
numbers.append(int(match.group()))
numbers_info = []
for directory in info_list:
match = re.search(r"\d+", directory)
if match:
numbers_info.append(int(match.group()))
numbers = [int(n) for n in numbers]
numbers_info = [int(n) for n in numbers_info]
# sort the data_list and info_list according to the numbers
args = np.argsort(numbers)
data_list = [data_list[index] for index in args]
numbers = [numbers[index] for index in args]
args = np.argsort(numbers_info)
info_list = [info_list[index] for index in args]
numbers_info = [numbers_info[index] for index in args]
assert sorted(numbers) == sorted(numbers_info)
self.numbers = numbers
# create the graphs and add the node and features
self.length = min(len(data_list), self.num_samples)
if self.num_samples > self.length:
raise ValueError(
f"Number of available {self.split} dataset entries "
f"({self.length}) is less than the number of samples "
f"({self.num_samples})"
)
self.graphs = []
if self.compute_drag:
self.normals = []
self.areas = []
self.coeff = []
for i in range(self.length):
file_path = data_list[i]
info_path = info_list[i]
polydata = read_vtp_file(file_path)
graph = self._create_dgl_graph(polydata, outvar_keys, dtype=torch.int32)
(
velocity,
reynolds_number,
length,
width,
height,
ground_clearance,
slant_angle,
fillet_radius,
) = self._read_info_file(info_path)
if "velocity" in invar_keys:
graph.ndata["velocity"] = velocity * torch.ones_like(
graph.ndata["pos"][:, [0]]
)
if "reynolds_number" in invar_keys:
graph.ndata["reynolds_number"] = reynolds_number * torch.ones_like(
graph.ndata["pos"][:, [0]]
)
if "length" in invar_keys:
graph.ndata["length"] = length * torch.ones_like(
graph.ndata["pos"][:, [0]]
)
if "width" in invar_keys:
graph.ndata["width"] = width * torch.ones_like(
graph.ndata["pos"][:, [0]]
)
if "height" in invar_keys:
graph.ndata["height"] = height * torch.ones_like(
graph.ndata["pos"][:, [0]]
)
if "ground_clearance" in invar_keys:
graph.ndata["ground_clearance"] = ground_clearance * torch.ones_like(
graph.ndata["pos"][:, [0]]
)
if "slant_angle" in invar_keys:
graph.ndata["slant_angle"] = slant_angle * torch.ones_like(
graph.ndata["pos"][:, [0]]
)
if "fillet_radius" in invar_keys:
graph.ndata["fillet_radius"] = fillet_radius * torch.ones_like(
graph.ndata["pos"][:, [0]]
)
if "normals" in invar_keys or self.compute_drag:
mesh = pv.read(file_path)
mesh.compute_normals(
cell_normals=True, point_normals=False, inplace=True
)
if "normals" in invar_keys:
graph.ndata["normals"] = torch.from_numpy(
mesh.cell_data_to_point_data()["Normals"]
)
if self.compute_drag:
mesh = mesh.compute_cell_sizes()
mesh = mesh.cell_data_to_point_data()
frontal_area = width * height / 2 * (10 ** (-6))
self.coeff.append(2.0 / ((velocity**2) * frontal_area))
self.normals.append(torch.from_numpy(mesh["Normals"]))
self.areas.append(torch.from_numpy(mesh["Area"]))
self.graphs.append(graph)
# add the edge features
self.graphs = self.add_edge_features()
# normalize the node and edge features
if self.split == "train":
self.node_stats = self._get_node_stats(keys=normalize_keys)
self.edge_stats = self._get_edge_stats()
else:
self.node_stats = load_json("node_stats.json")
self.edge_stats = load_json("edge_stats.json")
self.graphs = self.normalize_node()
self.graphs = self.normalize_edge()
def __getitem__(self, idx):
graph = self.graphs[idx]
if self.compute_drag:
sid = self.numbers[idx]
return graph, sid, self.normals[idx], self.areas[idx], self.coeff[idx]
return graph
def __len__(self):
return self.length
def add_edge_features(self) -> List[dgl.DGLGraph]:
"""
Add relative displacement and displacement norm as edge features for each graph
in the list of graphs. The calculations are done using the 'pos' attribute in the
node data of each graph. The resulting edge features are stored in the 'x' attribute
in the edge data of each graph.
This method will modify the list of graphs in-place.
Returns
-------
List[dgl.DGLGraph]
The list of graphs with updated edge features.
"""
if not hasattr(self, "graphs") or not self.graphs:
raise ValueError("The list 'graphs' is empty.")
for graph in self.graphs:
pos = graph.ndata.get("pos")
if pos is None:
raise ValueError(
"'pos' does not exist in the node data of one or more graphs."
)
row, col = graph.edges()
row = row.long()
col = col.long()
disp = pos[row] - pos[col]
disp_norm = torch.linalg.norm(disp, dim=-1, keepdim=True)
graph.edata["x"] = torch.cat((disp, disp_norm), dim=-1)
return self.graphs
def normalize_node(self) -> List[dgl.DGLGraph]:
"""
Normalize node data in each graph in the list of graphs using min-max normalization.
The normalization is performed in-place. The normalization formula used is:
normalized_data = 2.0 * normalization_bound[1] * (data - node_min) / (node_max - node_min) + normalization_bound[0]
This will bring the node data in each graph into the range of [normalization_bound[0], normalization_bound[1]].
After normalization, node data is concatenated according to the keys defined in 'self.input_keys'
and 'self.output_keys', resulting in new node data 'x' and 'y', respectively.
Returns
-------
List[dgl.DGLGraph]
The list of graphs with normalized and concatenated node data.
"""
if not hasattr(self, "graphs") or not self.graphs:
raise ValueError("The list 'graphs' is empty.")
if not hasattr(self, "node_stats") or not isinstance(self.node_stats, dict):
raise ValueError(
"The 'node_stats' attribute does not exist or is not a dictionary."
)
invar_keys = set(
[
key.replace("_min", "").replace("_max", "")
for key in self.node_stats.keys()
]
)
for graph in self.graphs:
for key in invar_keys:
node_min = self.node_stats.get(key + "_min")
node_max = self.node_stats.get(key + "_max")
if node_min is None or node_max is None:
raise ValueError(
f"The keys '{key}_min' and/or '{key}_max' do not exist in 'node_stats'."
)
if node_max.equal(node_min):
raise ValueError(
f"The values of '{key}_max' and '{key}_min' are equal, causing a division by zero."
)
node_data_key = graph.ndata.get(key)
if node_data_key is None:
raise ValueError(
f"The key '{key}' does not exist in the node data of one or more graphs."
)
graph.ndata[key] = (
2.0
* self.normalization_bound[1]
* (node_data_key - node_min)
/ (node_max - node_min)
+ self.normalization_bound[0]
)
graph.ndata["x"] = torch.cat(
[graph.ndata.get(key) for key in self.input_keys],
dim=-1,
)
graph.ndata["y"] = torch.cat(
[graph.ndata.get(key) for key in self.output_keys],
dim=-1,
)
return self.graphs
def normalize_edge(self) -> List[dgl.DGLGraph]:
"""
Normalize edge data 'x' in each graph in the list of graphs using min-max normalization.
The normalization is performed in-place. The normalization formula used is:
normalized_x = 2.0 * normalization_bound[1] * (x - edge_min) / (edge_max - edge_min) + normalization_bound[0]
This will bring the edge data 'x' in each graph into the range of [normalization_bound[0], normalization_bound[1]].
Returns
-------
List[dgl.DGLGraph]
The list of graphs with normalized edge data 'x'.
"""
if not hasattr(self, "graphs") or not self.graphs:
raise ValueError("The list 'graphs' is empty.")
if not hasattr(self, "edge_stats") or not isinstance(self.edge_stats, dict):
raise ValueError(
"The 'edge_stats' attribute does not exist or is not a dictionary."
)
edge_min = self.edge_stats.get("edge_min")
edge_max = self.edge_stats.get("edge_max")
if edge_min is None or edge_max is None:
raise ValueError(
"The keys 'edge_min' and/or 'edge_max' do not exist in 'edge_stats'."
)
if edge_max.equal(edge_min):
raise ValueError(
"The values of 'edge_max' and 'edge_min' are equal, causing a division by zero."
)
for graph in self.graphs:
edge_data_x = graph.edata.get("x")
if edge_data_x is None:
raise ValueError(
"The key 'x' does not exist in the edge data of one or more graphs."
)
graph.edata["x"] = (
2.0
* self.normalization_bound[1]
* (edge_data_x - edge_min)
/ (edge_max - edge_min)
+ self.normalization_bound[0]
)
return self.graphs
def denormalize(self, pred, gt, device) -> Tuple[Tensor, Tensor]:
"""
Denormalize the graph node data.
Parameters:
-----------
pred: Tensor
Normalized prediction
gt: Tensor
Normalized ground truth
device: Any
The device
Returns:
--------
Tuple(Tensor, Tensor)
Denormalized prediction and ground truth
"""
stats = self.node_stats
stats = {key: val.to(device) for key, val in stats.items()}
p_pred = pred
p_pred = pred[:, [0]]
s_pred = pred[:, 1:]
p_gt = gt[:, [0]]
s_gt = gt[:, 1:]
p_pred = (p_pred - self.normalization_bound[0]) * (
stats["p_max"] - stats["p_min"]
) / (2 * self.normalization_bound[1]) + stats["p_min"]
s_pred = (s_pred - self.normalization_bound[0]) * (
stats["wallShearStress_max"] - stats["wallShearStress_min"]
) / (2 * self.normalization_bound[1]) + stats["wallShearStress_min"]
p_gt = (p_gt - self.normalization_bound[0]) * (
stats["p_max"] - stats["p_min"]
) / (2 * self.normalization_bound[1]) + stats["p_min"]
s_gt = (s_gt - self.normalization_bound[0]) * (
stats["wallShearStress_max"] - stats["wallShearStress_min"]
) / (2 * self.normalization_bound[1]) + stats["wallShearStress_min"]
pred = torch.cat((p_pred, s_pred), dim=-1)
gt = torch.cat((p_gt, s_gt), dim=-1)
return pred, gt
def _get_edge_stats(self) -> Dict[str, Any]:
"""
Compute and save the minimum and maximum values of each edge attribute 'x' in the graphs.
Returns
-------
dict
A dictionary with keys 'edge_min' and 'edge_max' and the corresponding values being
1-D tensors containing the min or max value for each dimension of the edge attribute 'x'.
"""
if not self.graphs:
raise ValueError("The list 'graphs' is empty.")
first_edge_data = self.graphs[0].edata.get("x")
if first_edge_data is None:
raise ValueError(
"The key 'x' does not exist in the edge data of the first graph."
)
stats = {
"edge_min": torch.full_like(first_edge_data[0, :], float("inf")),
"edge_max": torch.full_like(first_edge_data[0, :], float("-inf")),
}
for i in range(len(self.graphs)):
edge_data = self.graphs[i].edata.get("x")
if edge_data is None:
raise ValueError(
f"The key 'x' does not exist in the edge data of the {i}-th graph."
)
min_val, _ = edge_data.min(dim=0)
max_val, _ = edge_data.max(dim=0)
min_val, max_val = min_val.reshape(-1), max_val.reshape(-1)
stats["edge_min"] = torch.minimum(stats["edge_min"], min_val)
stats["edge_max"] = torch.maximum(stats["edge_max"], max_val)
# Save to file
save_json(stats, "edge_stats.json")
return stats
def _get_node_stats(self, keys: List[str]) -> Dict[str, Any]:
"""
Compute and save the minimum and maximum values of each node attribute
for the list of keys in the graphs.
Parameters
----------
keys : list of str
List of keys for the node attributes.
Returns
-------
dict
A dictionary with each key being a string of format '[key]_min' or '[key]_max'
and each value being a 1-D tensor containing the min or max value for each
dimension of the node attribute.
"""
if not self.graphs:
raise ValueError("The list 'graphs' is empty.")
stats = {}
for key in keys:
first_node_data = self.graphs[0].ndata.get(key)
if first_node_data is None:
raise ValueError(
f"The key '{key}' does not exist in the node data of the first graph."
)
stats[key + "_min"] = torch.full_like(first_node_data[0, :], float("inf"))
stats[key + "_max"] = torch.full_like(first_node_data[0, :], float("-inf"))
for i in range(len(self.graphs)):
for key in keys:
node_data = self.graphs[i].ndata.get(key)
if node_data is None:
raise ValueError(
f"The key '{key}' does not exist in the node data of the {i}-th graph."
)
min_val, _ = node_data.min(dim=0)
max_val, _ = node_data.max(dim=0)
min_val, max_val = min_val.reshape(-1), max_val.reshape(-1)
stats[key + "_min"] = torch.minimum(stats[key + "_min"], min_val)
stats[key + "_max"] = torch.maximum(stats[key + "_max"], max_val)
# Save to file
save_json(stats, "node_stats.json")
return stats
@staticmethod
def _read_info_file(
file_path: str,
) -> Tuple[float, float, float, float, float, float, float, float]:
"""
Parse the values of specific parameters from a given text file.
Parameters
----------
file_path : str
Path to the text file.
Returns
-------
tuple
A tuple containing values of velocity, reynolds number, length, width, height, ground clearance, slant angle, and fillet radius.
"""
# Initialize variables to default value 0.0
velocity = (
reynolds_number
) = (
length
) = width = height = ground_clearance = slant_angle = fillet_radius = 0.0
with open(file_path, "r") as file:
for line in file:
if "Velocity" in line:
velocity = float(line.split(":")[1].strip())
elif "Re" in line:
reynolds_number = float(line.split(":")[1].strip())
elif "Length" in line:
length = float(line.split(":")[1].strip())
elif "Width" in line:
width = float(line.split(":")[1].strip())
elif "Height" in line:
height = float(line.split(":")[1].strip())
elif "GroundClearance" in line:
ground_clearance = float(line.split(":")[1].strip())
elif "SlantAngle" in line:
slant_angle = float(line.split(":")[1].strip())
elif "FilletRadius" in line:
fillet_radius = float(line.split(":")[1].strip())
return (
velocity,
reynolds_number,
length,
width,
height,
ground_clearance,
slant_angle,
fillet_radius,
)
@staticmethod
def _create_dgl_graph(
polydata: Any,
outvar_keys: List[str],
to_bidirected: bool = True,
add_self_loop: bool = False,
dtype: Union[torch.dtype, str] = torch.int32,
) -> dgl.DGLGraph:
"""
Create a DGL graph from vtkPolyData.
Parameters
----------
polydata : vtkPolyData
vtkPolyData from which the DGL graph is created.
outvar_keys : list of str
List of keys for the node attributes to be extracted from the vtkPolyData.
to_bidirected : bool, optional
Whether to make the graph bidirected. Default is True.
add_self_loop : bool, optional
Whether to add self-loops in the graph. Default is False.
dtype : torch.dtype or str, optional
Data type for the graph. Default is torch.int32.
Returns
-------
dgl.DGLGraph
The DGL graph created from the vtkPolyData.
"""
# Extract point data and connectivity information from the vtkPolyData
points = polydata.GetPoints()
if points is None:
raise ValueError("Failed to get points from the polydata.")
vertices = np.array(
[points.GetPoint(i) for i in range(points.GetNumberOfPoints())]
)
polys = polydata.GetPolys()
if polys is None:
raise ValueError("Failed to get polygons from the polydata.")
polys.InitTraversal()
edge_list = []
for i in range(polys.GetNumberOfCells()):
id_list = vtk.vtkIdList()
polys.GetNextCell(id_list)
for j in range(id_list.GetNumberOfIds() - 1):
edge_list.append((id_list.GetId(j), id_list.GetId(j + 1)))
# Create DGL graph using the connectivity information
graph = dgl.graph(edge_list, idtype=dtype)
if to_bidirected:
graph = dgl.to_bidirected(graph)
if add_self_loop:
graph = dgl.add_self_loop(graph)
# Assign node features using the vertex data
graph.ndata["pos"] = torch.tensor(vertices, dtype=torch.float32)
# Extract node attributes from the vtkPolyData
point_data = polydata.GetPointData()
if point_data is None:
raise ValueError("Failed to get point data from the polydata.")
for i in range(point_data.GetNumberOfArrays()):
array = point_data.GetArray(i)
array_name = array.GetName()
if array_name in outvar_keys:
array_data = np.zeros(
(points.GetNumberOfPoints(), array.GetNumberOfComponents())
)
for j in range(points.GetNumberOfPoints()):
array.GetTuple(j, array_data[j])
# Assign node attributes to the DGL graph
graph.ndata[array_name] = torch.tensor(array_data, dtype=torch.float32)
return graph
|
modulus-main
|
modulus/datapipes/gnn/ahmed_body_dataset.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import os, json, functools, numpy as np
try:
import tensorflow.compat.v1 as tf
except:
raise ImportError(
"Mesh Graph Net Datapipe requires the Tensorflow library. Install the "
+ "package at: https://www.tensorflow.org/install"
)
try:
import dgl
from dgl.data import DGLDataset
except:
raise ImportError(
"Mesh Graph Net Datapipe requires the DGL library. Install the "
+ "desired CUDA version at: https://www.dgl.ai/pages/start.html"
)
from torch.nn import functional as F
from .utils import read_vtp_file, save_json, load_json
# Hide GPU from visible devices for TF
tf.config.set_visible_devices([], "GPU")
class VortexSheddingDataset(DGLDataset):
"""In-memory MeshGraphNet Dataset for stationary mesh
Notes:
- This dataset prepares and processes the data available in MeshGraphNet's repo:
https://github.com/deepmind/deepmind-research/tree/master/meshgraphnets
- A single adj matrix is used for each transient simulation.
Do not use with adaptive mesh or remeshing
Parameters
----------
name : str, optional
Name of the dataset, by default "dataset"
data_dir : _type_, optional
Specifying the directory that stores the raw data in .TFRecord format., by default None
split : str, optional
Dataset split ["train", "eval", "test"], by default "train"
num_samples : int, optional
Number of samples, by default 1000
num_steps : int, optional
Number of time steps in each sample, by default 600
noise_std : float, optional
The standard deviation of the noise added to the "train" split, by default 0.02
force_reload : bool, optional
force reload, by default False
verbose : bool, optional
verbose, by default False
"""
def __init__(
self,
name="dataset",
data_dir=None,
split="train",
num_samples=1000,
num_steps=600,
noise_std=0.02,
force_reload=False,
verbose=False,
):
super().__init__(
name=name,
force_reload=force_reload,
verbose=verbose,
)
self.data_dir = data_dir
self.split = split
self.num_samples = num_samples
self.num_steps = num_steps
self.noise_std = noise_std
self.length = num_samples * (num_steps - 1)
print(f"Preparing the {split} dataset...")
# create the graphs with edge features
dataset_iterator = self._load_tf_data(self.data_dir, self.split)
self.graphs, self.cells, self.node_type = [], [], []
noise_mask, self.rollout_mask = [], []
self.mesh_pos = []
for i in range(self.num_samples):
data_np = dataset_iterator.get_next()
data_np = {key: arr[:num_steps].numpy() for key, arr in data_np.items()}
src, dst = self.cell_to_adj(data_np["cells"][0]) # assuming stationary mesh
graph = self.create_graph(src, dst, dtype=torch.int32)
graph = self.add_edge_features(graph, data_np["mesh_pos"][0])
self.graphs.append(graph)
node_type = torch.tensor(data_np["node_type"][0], dtype=torch.uint8)
self.node_type.append(self._one_hot_encode(node_type))
noise_mask.append(torch.eq(node_type, torch.zeros_like(node_type)))
if self.split != "train":
self.mesh_pos.append(torch.tensor(data_np["mesh_pos"][0]))
self.cells.append(data_np["cells"][0])
self.rollout_mask.append(self._get_rollout_mask(node_type))
# compute or load edge data stats
if self.split == "train":
self.edge_stats = self._get_edge_stats()
else:
self.edge_stats = load_json("edge_stats.json")
# normalize edge features
for i in range(num_samples):
self.graphs[i].edata["x"] = self.normalize_edge(
self.graphs[i],
self.edge_stats["edge_mean"],
self.edge_stats["edge_std"],
)
# create the node features
dataset_iterator = self._load_tf_data(self.data_dir, self.split)
self.node_features, self.node_targets = [], []
for i in range(self.num_samples):
data_np = dataset_iterator.get_next()
data_np = {key: arr[:num_steps].numpy() for key, arr in data_np.items()}
features, targets = {}, {}
features["velocity"] = self._drop_last(data_np["velocity"])
targets["velocity"] = self._push_forward_diff(data_np["velocity"])
targets["pressure"] = self._push_forward(data_np["pressure"])
# add noise
if split == "train":
features["velocity"], targets["velocity"] = self._add_noise(
features["velocity"],
targets["velocity"],
self.noise_std,
noise_mask[i],
)
self.node_features.append(features)
self.node_targets.append(targets)
# compute or load node data stats
if self.split == "train":
self.node_stats = self._get_node_stats()
else:
self.node_stats = load_json("node_stats.json")
# normalize node features
for i in range(num_samples):
self.node_features[i]["velocity"] = self.normalize_node(
self.node_features[i]["velocity"],
self.node_stats["velocity_mean"],
self.node_stats["velocity_std"],
)
self.node_targets[i]["velocity"] = self.normalize_node(
self.node_targets[i]["velocity"],
self.node_stats["velocity_diff_mean"],
self.node_stats["velocity_diff_std"],
)
self.node_targets[i]["pressure"] = self.normalize_node(
self.node_targets[i]["pressure"],
self.node_stats["pressure_mean"],
self.node_stats["pressure_std"],
)
def __getitem__(self, idx):
gidx = idx // (self.num_steps - 1) # graph index
tidx = idx % (self.num_steps - 1) # time step index
graph = self.graphs[gidx]
node_features = torch.cat(
(self.node_features[gidx]["velocity"][tidx], self.node_type[gidx]), dim=-1
)
node_targets = torch.cat(
(
self.node_targets[gidx]["velocity"][tidx],
self.node_targets[gidx]["pressure"][tidx],
),
dim=-1,
)
graph.ndata["x"] = node_features
graph.ndata["y"] = node_targets
if self.split == "train":
return graph
else:
graph.ndata["mesh_pos"] = self.mesh_pos[gidx]
cells = self.cells[gidx]
rollout_mask = self.rollout_mask[gidx]
return graph, cells, rollout_mask
def __len__(self):
return self.length
def _get_edge_stats(self):
stats = {
"edge_mean": 0,
"edge_meansqr": 0,
}
for i in range(self.num_samples):
stats["edge_mean"] += (
torch.mean(self.graphs[i].edata["x"], dim=0) / self.num_samples
)
stats["edge_meansqr"] += (
torch.mean(torch.square(self.graphs[i].edata["x"]), dim=0)
/ self.num_samples
)
stats["edge_std"] = torch.sqrt(
stats["edge_meansqr"] - torch.square(stats["edge_mean"])
)
stats.pop("edge_meansqr")
# save to file
save_json(stats, "edge_stats.json")
return stats
def _get_node_stats(self):
stats = {
"velocity_mean": 0,
"velocity_meansqr": 0,
"velocity_diff_mean": 0,
"velocity_diff_meansqr": 0,
"pressure_mean": 0,
"pressure_meansqr": 0,
}
for i in range(self.num_samples):
stats["velocity_mean"] += (
torch.mean(self.node_features[i]["velocity"], dim=(0, 1))
/ self.num_samples
)
stats["velocity_meansqr"] += (
torch.mean(torch.square(self.node_features[i]["velocity"]), dim=(0, 1))
/ self.num_samples
)
stats["pressure_mean"] += (
torch.mean(self.node_targets[i]["pressure"], dim=(0, 1))
/ self.num_samples
)
stats["pressure_meansqr"] += (
torch.mean(torch.square(self.node_targets[i]["pressure"]), dim=(0, 1))
/ self.num_samples
)
stats["velocity_diff_mean"] += (
torch.mean(
self.node_targets[i]["velocity"],
dim=(0, 1),
)
/ self.num_samples
)
stats["velocity_diff_meansqr"] += (
torch.mean(
torch.square(self.node_targets[i]["velocity"]),
dim=(0, 1),
)
/ self.num_samples
)
stats["velocity_std"] = torch.sqrt(
stats["velocity_meansqr"] - torch.square(stats["velocity_mean"])
)
stats["pressure_std"] = torch.sqrt(
stats["pressure_meansqr"] - torch.square(stats["pressure_mean"])
)
stats["velocity_diff_std"] = torch.sqrt(
stats["velocity_diff_meansqr"] - torch.square(stats["velocity_diff_mean"])
)
stats.pop("velocity_meansqr")
stats.pop("pressure_meansqr")
stats.pop("velocity_diff_meansqr")
# save to file
save_json(stats, "node_stats.json")
return stats
def _load_tf_data(self, path, split):
"""
Utility for loading the .tfrecord dataset in DeepMind's MeshGraphNet repo:
https://github.com/deepmind/deepmind-research/tree/master/meshgraphnets
Follow the instructions provided in that repo to download the .tfrecord files.
"""
dataset = self._load_dataset(path, split)
dataset_iterator = tf.data.make_one_shot_iterator(dataset)
return dataset_iterator
def _load_dataset(self, path, split):
with open(os.path.join(path, "meta.json"), "r") as fp:
meta = json.loads(fp.read())
dataset = tf.data.TFRecordDataset(os.path.join(path, split + ".tfrecord"))
return dataset.map(
functools.partial(self._parse_data, meta=meta), num_parallel_calls=8
).prefetch(tf.data.AUTOTUNE)
@staticmethod
def cell_to_adj(cells):
"""creates adjancy matrix in COO format from mesh cells"""
num_cells = np.shape(cells)[0]
src = [cells[i][indx] for i in range(num_cells) for indx in [0, 1, 2]]
dst = [cells[i][indx] for i in range(num_cells) for indx in [1, 2, 0]]
return src, dst
@staticmethod
def create_graph(src, dst, dtype=torch.int32):
"""
creates a DGL graph from an adj matrix in COO format.
torch.int32 can handle graphs with up to 2**31-1 nodes or edges.
"""
graph = dgl.to_bidirected(dgl.graph((src, dst), idtype=dtype))
return graph
@staticmethod
def add_edge_features(graph, pos):
"""
adds relative displacement & displacement norm as edge features
"""
row, col = graph.edges()
disp = torch.tensor(pos[row.long()] - pos[col.long()])
disp_norm = torch.linalg.norm(disp, dim=-1, keepdim=True)
graph.edata["x"] = torch.cat((disp, disp_norm), dim=1)
return graph
@staticmethod
def normalize_node(invar, mu, std):
"""normalizes a tensor"""
assert invar.size()[-1] == mu.size()[-1]
assert invar.size()[-1] == std.size()[-1]
return (invar - mu.expand(invar.size())) / std.expand(invar.size())
@staticmethod
def normalize_edge(graph, mu, std):
"""normalizes a tensor"""
assert graph.edata["x"].size()[-1] == mu.size()[-1]
assert graph.edata["x"].size()[-1] == std.size()[-1]
return (graph.edata["x"] - mu) / std
@staticmethod
def denormalize(invar, mu, std):
"""denormalizes a tensor"""
# assert invar.size()[-1] == mu.size()[-1]
# assert invar.size()[-1] == std.size()[-1]
denormalized_invar = invar * std + mu
return denormalized_invar
@staticmethod
def _one_hot_encode(node_type): # TODO generalize
node_type = torch.squeeze(node_type, dim=-1)
node_type = torch.where(
node_type == 0,
torch.zeros_like(node_type),
node_type - 3,
)
node_type = F.one_hot(node_type.long(), num_classes=4)
return node_type
@staticmethod
def _drop_last(invar):
return torch.tensor(invar[0:-1], dtype=torch.float)
@staticmethod
def _push_forward(invar):
return torch.tensor(invar[1:], dtype=torch.float)
@staticmethod
def _push_forward_diff(invar):
return torch.tensor(invar[1:] - invar[0:-1], dtype=torch.float)
@staticmethod
def _get_rollout_mask(node_type):
mask = torch.logical_or(
torch.eq(node_type, torch.zeros_like(node_type)),
torch.eq(
node_type,
torch.zeros_like(node_type) + 5,
),
)
return mask
@staticmethod
def _add_noise(features, targets, noise_std, noise_mask):
noise = torch.normal(mean=0, std=noise_std, size=features.size())
noise_mask = noise_mask.expand(features.size()[0], -1, 2)
noise = torch.where(noise_mask, noise, torch.zeros_like(noise))
features += noise
targets -= noise
return features, targets
@staticmethod
def _parse_data(p, meta):
outvar = {}
feature_dict = {k: tf.io.VarLenFeature(tf.string) for k in meta["field_names"]}
features = tf.io.parse_single_example(p, feature_dict)
for k, v in meta["features"].items():
data = tf.reshape(
tf.io.decode_raw(features[k].values, getattr(tf, v["dtype"])),
v["shape"],
)
if v["type"] == "static":
data = tf.tile(data, [meta["trajectory_length"], 1, 1])
elif v["type"] == "dynamic_varlen":
row_len = tf.reshape(
tf.io.decode_raw(features["length_" + k].values, tf.int32), [-1]
)
data = tf.RaggedTensor.from_row_lengths(data, row_lengths=row_len)
outvar[k] = data
return outvar
|
modulus-main
|
modulus/datapipes/gnn/vortex_shedding_dataset.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.